move getDrives to onedrive/api, add mock (#2941)

Moves the drive fetcher and pagers into the
onedrive/api package so that it can be easily re-
used by other packages.  Also moves the mocks
out from local testing and into a mock package
nested in the api pkg.

first of a couple PRs fixing up sharepoint testing
issues.

---

#### Does this PR need a docs update or release note?

- [x]  No

#### Type of change

- [x] 🤖 Supportability/Tests

#### Test Plan

- [x] 💚 E2E
This commit is contained in:
Keepers 2023-03-27 14:13:15 -06:00 committed by GitHub
parent ddeaaf2ff2
commit 17cb3db524
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 494 additions and 512 deletions

View File

@ -4,8 +4,10 @@ import (
"context"
"fmt"
"strings"
"time"
"github.com/alcionai/clues"
"github.com/alcionai/corso/src/internal/connector/graph/api"
abstractions "github.com/microsoft/kiota-abstractions-go"
msdrives "github.com/microsoftgraph/msgraph-sdk-go/drives"
"github.com/microsoftgraph/msgraph-sdk-go/models"
@ -14,7 +16,7 @@ import (
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/connector/graph"
"github.com/alcionai/corso/src/internal/connector/graph/api"
"github.com/alcionai/corso/src/pkg/logger"
)
func getValues[T any](l api.PageLinker) ([]T, error) {
@ -201,71 +203,75 @@ func (p *siteDrivePager) ValuesIn(l api.PageLinker) ([]models.Driveable, error)
return getValues[models.Driveable](l)
}
// GetDriveIDByName is a helper function to retrieve the M365ID of a site drive.
// Returns "" if the folder is not within the drive.
// Dependency: Requires "name" and "id" to be part of the given options
func (p *siteDrivePager) GetDriveIDByName(ctx context.Context, driveName string) (string, error) {
var empty string
// ---------------------------------------------------------------------------
// Drive Paging
// ---------------------------------------------------------------------------
// DrivePager pages through different types of drive owners
type DrivePager interface {
GetPage(context.Context) (api.PageLinker, error)
SetNext(nextLink string)
ValuesIn(api.PageLinker) ([]models.Driveable, error)
}
// GetAllDrives fetches all drives for the given pager
func GetAllDrives(
ctx context.Context,
pager DrivePager,
retry bool,
maxRetryCount int,
) ([]models.Driveable, error) {
drives := []models.Driveable{}
if !retry {
maxRetryCount = 0
}
// Loop through all pages returned by Graph API.
for {
resp, err := p.builder.Get(ctx, p.options)
if err != nil {
return empty, graph.Stack(ctx, err)
}
var (
err error
page api.PageLinker
)
for _, entry := range resp.GetValue() {
if ptr.Val(entry.GetName()) == driveName {
return ptr.Val(entry.GetId()), nil
// Retry Loop for Drive retrieval. Request can timeout
for i := 0; i <= maxRetryCount; i++ {
page, err = pager.GetPage(ctx)
if err != nil {
if clues.HasLabel(err, graph.LabelsMysiteNotFound) {
logger.Ctx(ctx).Infof("resource owner does not have a drive")
return make([]models.Driveable, 0), nil // no license or drives.
}
if graph.IsErrTimeout(err) && i < maxRetryCount {
time.Sleep(time.Duration(3*(i+1)) * time.Second)
continue
}
return nil, graph.Wrap(ctx, err, "retrieving drives")
}
}
link, ok := ptr.ValOK(resp.GetOdataNextLink())
if !ok {
// No error encountered, break the retry loop so we can extract results
// and see if there's another page to fetch.
break
}
p.builder = mssites.NewItemDrivesRequestBuilder(link, p.gs.Adapter())
}
return empty, nil
}
// GetFolderIDByName is a helper function to retrieve the M365ID of a folder within a site document library.
// Returns "" if the folder is not within the drive
func (p *siteDrivePager) GetFolderIDByName(ctx context.Context, driveID, folderName string) (string, error) {
var empty string
// *msdrives.ItemRootChildrenRequestBuilder
builder := p.gs.Client().DrivesById(driveID).Root().Children()
option := &msdrives.ItemRootChildrenRequestBuilderGetRequestConfiguration{
QueryParameters: &msdrives.ItemRootChildrenRequestBuilderGetQueryParameters{
Select: []string{"id", "name", "folder"},
},
}
for {
resp, err := builder.Get(ctx, option)
tmp, err := pager.ValuesIn(page)
if err != nil {
return empty, graph.Stack(ctx, err)
return nil, graph.Wrap(ctx, err, "extracting drives from response")
}
for _, entry := range resp.GetValue() {
if entry.GetFolder() == nil {
continue
}
drives = append(drives, tmp...)
if ptr.Val(entry.GetName()) == folderName {
return ptr.Val(entry.GetId()), nil
}
}
link, ok := ptr.ValOK(resp.GetOdataNextLink())
if !ok {
nextLink := ptr.Val(page.GetOdataNextLink())
if len(nextLink) == 0 {
break
}
builder = msdrives.NewItemRootChildrenRequestBuilder(link, p.gs.Adapter())
pager.SetNext(nextLink)
}
return empty, nil
logger.Ctx(ctx).Debugf("retrieved %d valid drives", len(drives))
return drives, nil
}

View File

@ -55,30 +55,3 @@ func (suite *OneDriveAPISuite) TestCreatePagerAndGetPage() {
assert.NoError(t, err, clues.ToCore(err))
assert.NotNil(t, a)
}
func (suite *OneDriveAPISuite) TestGetDriveIDByName() {
ctx, flush := tester.NewContext()
defer flush()
t := suite.T()
siteID := tester.M365SiteID(t)
pager := api.NewSiteDrivePager(suite.service, siteID, []string{"id", "name"})
id, err := pager.GetDriveIDByName(ctx, "Documents")
assert.NoError(t, err, clues.ToCore(err))
assert.NotEmpty(t, id)
}
func (suite *OneDriveAPISuite) TestGetDriveFolderByName() {
ctx, flush := tester.NewContext()
defer flush()
t := suite.T()
siteID := tester.M365SiteID(t)
pager := api.NewSiteDrivePager(suite.service, siteID, []string{"id", "name"})
id, err := pager.GetDriveIDByName(ctx, "Documents")
require.NoError(t, err, clues.ToCore(err))
require.NotEmpty(t, id)
_, err = pager.GetFolderIDByName(ctx, id, "folder")
assert.NoError(t, err, clues.ToCore(err))
}

View File

@ -0,0 +1,56 @@
package mock
import (
"context"
"github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/alcionai/clues"
"github.com/alcionai/corso/src/internal/connector/graph/api"
)
type PageLinker struct {
Link *string
}
func (pl *PageLinker) GetOdataNextLink() *string {
return pl.Link
}
type PagerResult struct {
Drives []models.Driveable
NextLink *string
Err error
}
type DrivePager struct {
ToReturn []PagerResult
GetIdx int
}
func (p *DrivePager) GetPage(context.Context) (api.PageLinker, error) {
if len(p.ToReturn) <= p.GetIdx {
return nil, clues.New("ToReturn index out of bounds")
}
idx := p.GetIdx
p.GetIdx++
return &PageLinker{p.ToReturn[idx].NextLink}, p.ToReturn[idx].Err
}
func (p *DrivePager) SetNext(string) {}
func (p *DrivePager) ValuesIn(api.PageLinker) ([]models.Driveable, error) {
idx := p.GetIdx
if idx > 0 {
// Return values lag by one since we increment in GetPage().
idx--
}
if len(p.ToReturn) <= idx {
return nil, clues.New("ToReturn index out of bounds")
}
return p.ToReturn[idx].Drives, nil
}

View File

@ -15,6 +15,7 @@ import (
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/connector/graph"
"github.com/alcionai/corso/src/internal/connector/onedrive/api"
"github.com/alcionai/corso/src/internal/connector/support"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/observe"
@ -80,7 +81,7 @@ type Collections struct {
servicer graph.Servicer,
resourceOwner string,
fields []string,
) (drivePager, error)
) (api.DrivePager, error)
itemPagerFunc func(
servicer graph.Servicer,
driveID, link string,
@ -273,7 +274,7 @@ func (c *Collections) Get(
return nil, nil, graph.Stack(ctx, err)
}
drives, err := drives(ctx, pager, true)
drives, err := api.GetAllDrives(ctx, pager, true, maxDrivesRetries)
if err != nil {
return nil, nil, err
}

View File

@ -17,6 +17,8 @@ import (
"github.com/alcionai/clues"
"github.com/alcionai/corso/src/internal/connector/graph"
gapi "github.com/alcionai/corso/src/internal/connector/graph/api"
"github.com/alcionai/corso/src/internal/connector/onedrive/api"
"github.com/alcionai/corso/src/internal/connector/onedrive/api/mock"
"github.com/alcionai/corso/src/internal/connector/support"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/tester"
@ -2208,11 +2210,11 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
servicer graph.Servicer,
resourceOwner string,
fields []string,
) (drivePager, error) {
return &mockDrivePager{
toReturn: []pagerResult{
) (api.DrivePager, error) {
return &mock.DrivePager{
ToReturn: []mock.PagerResult{
{
drives: test.drives,
Drives: test.drives,
},
},
}, nil

View File

@ -4,7 +4,6 @@ import (
"context"
"fmt"
"strings"
"time"
"github.com/alcionai/clues"
msdrive "github.com/microsoftgraph/msgraph-sdk-go/drive"
@ -23,7 +22,7 @@ import (
var errFolderNotFound = clues.New("folder not found")
const (
getDrivesRetries = 3
maxDrivesRetries = 3
// nextLinkKey is used to find the next link in a paged
// graph response
@ -44,18 +43,12 @@ type DeltaUpdate struct {
Reset bool
}
type drivePager interface {
GetPage(context.Context) (gapi.PageLinker, error)
SetNext(nextLink string)
ValuesIn(gapi.PageLinker) ([]models.Driveable, error)
}
func PagerForSource(
source driveSource,
servicer graph.Servicer,
resourceOwner string,
fields []string,
) (drivePager, error) {
) (api.DrivePager, error) {
switch source {
case OneDriveSource:
return api.NewUserDrivePager(servicer, resourceOwner, fields), nil
@ -66,69 +59,6 @@ func PagerForSource(
}
}
func drives(
ctx context.Context,
pager drivePager,
retry bool,
) ([]models.Driveable, error) {
var (
numberOfRetries = getDrivesRetries
drives = []models.Driveable{}
)
if !retry {
numberOfRetries = 0
}
// Loop through all pages returned by Graph API.
for {
var (
err error
page gapi.PageLinker
)
// Retry Loop for Drive retrieval. Request can timeout
for i := 0; i <= numberOfRetries; i++ {
page, err = pager.GetPage(ctx)
if err != nil {
if clues.HasLabel(err, graph.LabelsMysiteNotFound) {
logger.Ctx(ctx).Infof("resource owner does not have a drive")
return make([]models.Driveable, 0), nil // no license or drives.
}
if graph.IsErrTimeout(err) && i < numberOfRetries {
time.Sleep(time.Duration(3*(i+1)) * time.Second)
continue
}
return nil, graph.Wrap(ctx, err, "retrieving drives")
}
// No error encountered, break the retry loop so we can extract results
// and see if there's another page to fetch.
break
}
tmp, err := pager.ValuesIn(page)
if err != nil {
return nil, graph.Wrap(ctx, err, "extracting drives from response")
}
drives = append(drives, tmp...)
nextLink := ptr.Val(page.GetOdataNextLink())
if len(nextLink) == 0 {
break
}
pager.SetNext(nextLink)
}
logger.Ctx(ctx).Debugf("retrieved %d valid drives", len(drives))
return drives, nil
}
// itemCollector functions collect the items found in a drive
type itemCollector func(
ctx context.Context,
@ -350,11 +280,11 @@ func (op *Displayable) GetDisplayName() *string {
func GetAllFolders(
ctx context.Context,
gs graph.Servicer,
pager drivePager,
pager api.DrivePager,
prefix string,
errs *fault.Bus,
) ([]*Displayable, error) {
drives, err := drives(ctx, pager, true)
drives, err := api.GetAllDrives(ctx, pager, true, maxDrivesRetries)
if err != nil {
return nil, errors.Wrap(err, "getting OneDrive folders")
}

View File

@ -16,7 +16,8 @@ import (
"github.com/alcionai/corso/src/internal/common"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/connector/graph"
"github.com/alcionai/corso/src/internal/connector/graph/api"
"github.com/alcionai/corso/src/internal/connector/onedrive/api"
"github.com/alcionai/corso/src/internal/connector/onedrive/api/mock"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/fault"
@ -24,52 +25,6 @@ import (
"github.com/alcionai/corso/src/pkg/selectors"
)
type mockPageLinker struct {
link *string
}
func (pl *mockPageLinker) GetOdataNextLink() *string {
return pl.link
}
type pagerResult struct {
drives []models.Driveable
nextLink *string
err error
}
type mockDrivePager struct {
toReturn []pagerResult
getIdx int
}
func (p *mockDrivePager) GetPage(context.Context) (api.PageLinker, error) {
if len(p.toReturn) <= p.getIdx {
return nil, assert.AnError
}
idx := p.getIdx
p.getIdx++
return &mockPageLinker{p.toReturn[idx].nextLink}, p.toReturn[idx].err
}
func (p *mockDrivePager) SetNext(string) {}
func (p *mockDrivePager) ValuesIn(api.PageLinker) ([]models.Driveable, error) {
idx := p.getIdx
if idx > 0 {
// Return values lag by one since we increment in GetPage().
idx--
}
if len(p.toReturn) <= idx {
return nil, assert.AnError
}
return p.toReturn[idx].drives, nil
}
// Unit tests
type OneDriveUnitSuite struct {
tester.Suite
@ -117,28 +72,28 @@ func (suite *OneDriveUnitSuite) TestDrives() {
resultDrives = append(resultDrives, d)
}
tooManyRetries := make([]pagerResult, 0, getDrivesRetries+1)
tooManyRetries := make([]mock.PagerResult, 0, maxDrivesRetries+1)
for i := 0; i < getDrivesRetries+1; i++ {
tooManyRetries = append(tooManyRetries, pagerResult{
err: context.DeadlineExceeded,
for i := 0; i < maxDrivesRetries+1; i++ {
tooManyRetries = append(tooManyRetries, mock.PagerResult{
Err: context.DeadlineExceeded,
})
}
table := []struct {
name string
pagerResults []pagerResult
pagerResults []mock.PagerResult
retry bool
expectedErr assert.ErrorAssertionFunc
expectedResults []models.Driveable
}{
{
name: "AllOneResultNilNextLink",
pagerResults: []pagerResult{
pagerResults: []mock.PagerResult{
{
drives: resultDrives,
nextLink: nil,
err: nil,
Drives: resultDrives,
NextLink: nil,
Err: nil,
},
},
retry: false,
@ -147,11 +102,11 @@ func (suite *OneDriveUnitSuite) TestDrives() {
},
{
name: "AllOneResultEmptyNextLink",
pagerResults: []pagerResult{
pagerResults: []mock.PagerResult{
{
drives: resultDrives,
nextLink: &emptyLink,
err: nil,
Drives: resultDrives,
NextLink: &emptyLink,
Err: nil,
},
},
retry: false,
@ -160,16 +115,16 @@ func (suite *OneDriveUnitSuite) TestDrives() {
},
{
name: "SplitResultsNilNextLink",
pagerResults: []pagerResult{
pagerResults: []mock.PagerResult{
{
drives: resultDrives[:numDriveResults/2],
nextLink: &link,
err: nil,
Drives: resultDrives[:numDriveResults/2],
NextLink: &link,
Err: nil,
},
{
drives: resultDrives[numDriveResults/2:],
nextLink: nil,
err: nil,
Drives: resultDrives[numDriveResults/2:],
NextLink: nil,
Err: nil,
},
},
retry: false,
@ -178,16 +133,16 @@ func (suite *OneDriveUnitSuite) TestDrives() {
},
{
name: "SplitResultsEmptyNextLink",
pagerResults: []pagerResult{
pagerResults: []mock.PagerResult{
{
drives: resultDrives[:numDriveResults/2],
nextLink: &link,
err: nil,
Drives: resultDrives[:numDriveResults/2],
NextLink: &link,
Err: nil,
},
{
drives: resultDrives[numDriveResults/2:],
nextLink: &emptyLink,
err: nil,
Drives: resultDrives[numDriveResults/2:],
NextLink: &emptyLink,
Err: nil,
},
},
retry: false,
@ -196,16 +151,16 @@ func (suite *OneDriveUnitSuite) TestDrives() {
},
{
name: "NonRetryableError",
pagerResults: []pagerResult{
pagerResults: []mock.PagerResult{
{
drives: resultDrives,
nextLink: &link,
err: nil,
Drives: resultDrives,
NextLink: &link,
Err: nil,
},
{
drives: nil,
nextLink: nil,
err: assert.AnError,
Drives: nil,
NextLink: nil,
Err: assert.AnError,
},
},
retry: true,
@ -214,11 +169,11 @@ func (suite *OneDriveUnitSuite) TestDrives() {
},
{
name: "MySiteURLNotFound",
pagerResults: []pagerResult{
pagerResults: []mock.PagerResult{
{
drives: nil,
nextLink: nil,
err: graph.Stack(ctx, mySiteURLNotFound),
Drives: nil,
NextLink: nil,
Err: graph.Stack(ctx, mySiteURLNotFound),
},
},
retry: true,
@ -227,11 +182,11 @@ func (suite *OneDriveUnitSuite) TestDrives() {
},
{
name: "MySiteNotFound",
pagerResults: []pagerResult{
pagerResults: []mock.PagerResult{
{
drives: nil,
nextLink: nil,
err: graph.Stack(ctx, mySiteNotFound),
Drives: nil,
NextLink: nil,
Err: graph.Stack(ctx, mySiteNotFound),
},
},
retry: true,
@ -240,21 +195,21 @@ func (suite *OneDriveUnitSuite) TestDrives() {
},
{
name: "SplitResultsContextTimeoutWithRetries",
pagerResults: []pagerResult{
pagerResults: []mock.PagerResult{
{
drives: resultDrives[:numDriveResults/2],
nextLink: &link,
err: nil,
Drives: resultDrives[:numDriveResults/2],
NextLink: &link,
Err: nil,
},
{
drives: nil,
nextLink: nil,
err: context.DeadlineExceeded,
Drives: nil,
NextLink: nil,
Err: context.DeadlineExceeded,
},
{
drives: resultDrives[numDriveResults/2:],
nextLink: &emptyLink,
err: nil,
Drives: resultDrives[numDriveResults/2:],
NextLink: &emptyLink,
Err: nil,
},
},
retry: true,
@ -263,21 +218,21 @@ func (suite *OneDriveUnitSuite) TestDrives() {
},
{
name: "SplitResultsContextTimeoutNoRetries",
pagerResults: []pagerResult{
pagerResults: []mock.PagerResult{
{
drives: resultDrives[:numDriveResults/2],
nextLink: &link,
err: nil,
Drives: resultDrives[:numDriveResults/2],
NextLink: &link,
Err: nil,
},
{
drives: nil,
nextLink: nil,
err: context.DeadlineExceeded,
Drives: nil,
NextLink: nil,
Err: context.DeadlineExceeded,
},
{
drives: resultDrives[numDriveResults/2:],
nextLink: &emptyLink,
err: nil,
Drives: resultDrives[numDriveResults/2:],
NextLink: &emptyLink,
Err: nil,
},
},
retry: false,
@ -287,11 +242,11 @@ func (suite *OneDriveUnitSuite) TestDrives() {
{
name: "TooManyRetries",
pagerResults: append(
[]pagerResult{
[]mock.PagerResult{
{
drives: resultDrives[:numDriveResults/2],
nextLink: &link,
err: nil,
Drives: resultDrives[:numDriveResults/2],
NextLink: &link,
Err: nil,
},
},
tooManyRetries...,
@ -308,11 +263,11 @@ func (suite *OneDriveUnitSuite) TestDrives() {
ctx, flush := tester.NewContext()
defer flush()
pager := &mockDrivePager{
toReturn: test.pagerResults,
pager := &mock.DrivePager{
ToReturn: test.pagerResults,
}
drives, err := drives(ctx, pager, test.retry)
drives, err := api.GetAllDrives(ctx, pager, test.retry, maxDrivesRetries)
test.expectedErr(t, err, clues.ToCore(err))
assert.ElementsMatch(t, test.expectedResults, drives)
@ -353,7 +308,7 @@ func (suite *OneDriveSuite) TestCreateGetDeleteFolder() {
pager, err := PagerForSource(OneDriveSource, gs, suite.userID, nil)
require.NoError(t, err, clues.ToCore(err))
drives, err := drives(ctx, pager, true)
drives, err := api.GetAllDrives(ctx, pager, true, maxDrivesRetries)
require.NoError(t, err, clues.ToCore(err))
require.NotEmpty(t, drives)

View File

@ -15,6 +15,7 @@ import (
"github.com/alcionai/corso/src/internal/common"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/connector/graph"
"github.com/alcionai/corso/src/internal/connector/onedrive/api"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/fault"
)
@ -47,7 +48,7 @@ func (suite *ItemIntegrationSuite) SetupSuite() {
pager, err := PagerForSource(OneDriveSource, suite.service, suite.user, nil)
require.NoError(t, err, clues.ToCore(err))
odDrives, err := drives(ctx, pager, true)
odDrives, err := api.GetAllDrives(ctx, pager, true, maxDrivesRetries)
require.NoError(t, err, clues.ToCore(err))
// Test Requirement 1: Need a drive
require.Greaterf(t, len(odDrives), 0, "user %s does not have a drive", suite.user)

View File

@ -140,8 +140,8 @@ func RestoreCollection(
metrics = support.CollectionMetrics{}
copyBuffer = make([]byte, copyBufferSize)
directory = dc.FullPath()
itemInfo details.ItemInfo
folderMetas = map[string]Metadata{}
el = errs.Local()
)
ctx, end := D.Span(ctx, "gc:oneDrive:restoreCollection", D.Label("path", directory))
@ -186,16 +186,12 @@ func RestoreCollection(
drivePath,
restoreFolderElements,
colMeta,
permissionIDMappings,
)
permissionIDMappings)
if err != nil {
return metrics, folderMetas, permissionIDMappings, clues.Wrap(err, "creating folders for restore")
}
var (
el = errs.Local()
items = dc.Items(ctx, errs)
)
items := dc.Items(ctx, errs)
for {
if el.Failure() != nil {
@ -217,129 +213,188 @@ func RestoreCollection(
continue
}
if backupVersion >= version.OneDrive1DataAndMetaFiles {
name := itemData.UUID()
itemInfo, skipped, err := restoreItem(
ctx,
dc,
backupVersion,
source,
service,
drivePath,
restoreFolderID,
copyBuffer,
folderMetas,
permissionIDMappings,
restorePerms,
itemData,
itemPath)
if strings.HasSuffix(name, DataFileSuffix) {
metrics.Objects++
metrics.Bytes += int64(len(copyBuffer))
var (
itemInfo details.ItemInfo
err error
)
if backupVersion < version.OneDrive6NameInMeta {
itemInfo, err = restoreV1File(
ctx,
source,
service,
drivePath,
dc,
restoreFolderID,
copyBuffer,
permissionIDMappings,
restorePerms,
itemData,
)
} else {
itemInfo, err = restoreV2File(
ctx,
source,
service,
drivePath,
dc,
restoreFolderID,
copyBuffer,
permissionIDMappings,
restorePerms,
itemData,
)
}
if err != nil {
el.AddRecoverable(err)
continue
}
err = deets.Add(
itemPath.String(),
itemPath.ShortRef(),
"",
"", // TODO: implement locationRef
true,
itemInfo)
if err != nil {
// Not critical enough to need to stop restore operation.
logger.Ctx(ctx).Infow("accounting for restored item", "error", err)
}
metrics.Successes++
} else if strings.HasSuffix(name, MetaFileSuffix) {
// Just skip this for the moment since we moved the code to the above
// item restore path. We haven't yet stopped fetching these items in
// RestoreOp, so we still need to handle them in some way.
continue
} else if strings.HasSuffix(name, DirMetaFileSuffix) {
// Only the version.OneDrive1DataAndMetaFiles needed to deserialize the
// permission for child folders here. Later versions can request
// permissions inline when processing the collection.
if !restorePerms || backupVersion >= version.OneDrive4DirIncludesPermissions {
continue
}
metaReader := itemData.ToReader()
defer metaReader.Close()
meta, err := getMetadata(metaReader)
if err != nil {
el.AddRecoverable(clues.Wrap(err, "getting directory metadata").WithClues(ctx))
continue
}
trimmedPath := strings.TrimSuffix(itemPath.String(), DirMetaFileSuffix)
folderMetas[trimmedPath] = meta
}
} else {
// skipped items don't get counted, but they can error
if !skipped {
metrics.Objects++
metrics.Bytes += int64(len(copyBuffer))
// No permissions stored at the moment for SharePoint
_, itemInfo, err = restoreData(
ctx,
service,
itemData.UUID(),
itemData,
drivePath.DriveID,
restoreFolderID,
copyBuffer,
source)
if err != nil {
el.AddRecoverable(err)
continue
}
err = deets.Add(
itemPath.String(),
itemPath.ShortRef(),
"",
"", // TODO: implement locationRef
true,
itemInfo)
if err != nil {
// Not critical enough to need to stop restore operation.
logger.Ctx(ctx).Infow("accounting for restored item", "error", err)
}
metrics.Successes++
}
if err != nil {
el.AddRecoverable(clues.Wrap(err, "restoring item"))
continue
}
if skipped {
logger.Ctx(ctx).With("item_path", itemPath).Debug("did not restore item")
continue
}
err = deets.Add(
itemPath.String(),
itemPath.ShortRef(),
"",
"", // TODO: implement locationRef
true,
itemInfo)
if err != nil {
// Not critical enough to need to stop restore operation.
logger.CtxErr(ctx, err).Infow("adding restored item to details")
}
metrics.Successes++
}
}
return metrics, folderMetas, permissionIDMappings, el.Failure()
}
// restores an item, according to correct backup version behavior.
// returns the item info, a bool (true = restore was skipped), and an error
func restoreItem(
ctx context.Context,
dc data.RestoreCollection,
backupVersion int,
source driveSource,
service graph.Servicer,
drivePath *path.DrivePath,
restoreFolderID string,
copyBuffer []byte,
folderMetas map[string]Metadata,
permissionIDMappings map[string]string,
restorePerms bool,
itemData data.Stream,
itemPath path.Path,
) (details.ItemInfo, bool, error) {
itemUUID := itemData.UUID()
if backupVersion < version.OneDrive1DataAndMetaFiles {
itemInfo, err := restoreV0File(
ctx,
source,
service,
drivePath,
restoreFolderID,
copyBuffer,
itemData)
if err != nil {
return details.ItemInfo{}, false, clues.Wrap(err, "v0 restore")
}
return itemInfo, false, nil
}
// only v1+ backups from this point on
if strings.HasSuffix(itemUUID, MetaFileSuffix) {
// Just skip this for the moment since we moved the code to the above
// item restore path. We haven't yet stopped fetching these items in
// RestoreOp, so we still need to handle them in some way.
return details.ItemInfo{}, true, nil
}
if strings.HasSuffix(itemUUID, DirMetaFileSuffix) {
// Only the version.OneDrive1DataAndMetaFiles needed to deserialize the
// permission for child folders here. Later versions can request
// permissions inline when processing the collection.
if !restorePerms || backupVersion >= version.OneDrive4DirIncludesPermissions {
return details.ItemInfo{}, true, nil
}
metaReader := itemData.ToReader()
defer metaReader.Close()
meta, err := getMetadata(metaReader)
if err != nil {
return details.ItemInfo{}, true, clues.Wrap(err, "getting directory metadata").WithClues(ctx)
}
trimmedPath := strings.TrimSuffix(itemPath.String(), DirMetaFileSuffix)
folderMetas[trimmedPath] = meta
return details.ItemInfo{}, true, nil
}
// only items with DataFileSuffix from this point on
if backupVersion < version.OneDrive6NameInMeta {
itemInfo, err := restoreV1File(
ctx,
source,
service,
drivePath,
dc,
restoreFolderID,
copyBuffer,
permissionIDMappings,
restorePerms,
itemData)
if err != nil {
return details.ItemInfo{}, false, clues.Wrap(err, "v1 restore")
}
return itemInfo, false, nil
}
// only v6+ backups from this point on
itemInfo, err := restoreV6File(
ctx,
source,
service,
drivePath,
dc,
restoreFolderID,
copyBuffer,
permissionIDMappings,
restorePerms,
itemData)
if err != nil {
return details.ItemInfo{}, false, clues.Wrap(err, "v6 restore")
}
return itemInfo, false, nil
}
func restoreV0File(
ctx context.Context,
source driveSource,
service graph.Servicer,
drivePath *path.DrivePath,
restoreFolderID string,
copyBuffer []byte,
itemData data.Stream,
) (details.ItemInfo, error) {
_, itemInfo, err := restoreData(
ctx,
service,
itemData.UUID(),
itemData,
drivePath.DriveID,
restoreFolderID,
copyBuffer,
source)
if err != nil {
return itemInfo, clues.Wrap(err, "restoring file")
}
return itemInfo, nil
}
type fileFetcher interface {
Fetch(ctx context.Context, name string) (data.Stream, error)
}
@ -391,8 +446,7 @@ func restoreV1File(
drivePath.DriveID,
itemID,
meta,
permissionIDMappings,
)
permissionIDMappings)
if err != nil {
return details.ItemInfo{}, clues.Wrap(err, "restoring item permissions")
}
@ -400,7 +454,7 @@ func restoreV1File(
return itemInfo, nil
}
func restoreV2File(
func restoreV6File(
ctx context.Context,
source driveSource,
service graph.Servicer,

View File

@ -334,7 +334,7 @@ func getItemStream(
bcounter ByteCounter,
) (data.Stream, error) {
if itemPath == nil {
return nil, clues.Stack(errNoRestorePath).WithClues(ctx)
return nil, clues.Wrap(errNoRestorePath, "getting item stream").WithClues(ctx)
}
// GetNestedEntry handles nil properly.

View File

@ -197,7 +197,8 @@ func (op *RestoreOperation) do(
ctx,
"resource_owner", bup.Selector.DiscreteOwner,
"details_paths", len(paths),
"backup_snapshot_id", bup.SnapshotID)
"backup_snapshot_id", bup.SnapshotID,
"backup_version", bup.Version)
op.bus.Event(
ctx,
@ -218,7 +219,7 @@ func (op *RestoreOperation) do(
dcs, err := op.kopia.RestoreMultipleItems(ctx, bup.SnapshotID, paths, opStats.bytesRead, op.Errors)
if err != nil {
return nil, errors.Wrap(err, "retrieving collections from repository")
return nil, errors.Wrap(err, "producing collections to restore")
}
kopiaComplete <- struct{}{}

View File

@ -14,7 +14,6 @@ import (
"github.com/alcionai/corso/src/internal/connector/exchange"
"github.com/alcionai/corso/src/internal/connector/graph"
"github.com/alcionai/corso/src/internal/connector/mockconnector"
"github.com/alcionai/corso/src/internal/connector/onedrive"
"github.com/alcionai/corso/src/internal/connector/onedrive/api"
"github.com/alcionai/corso/src/internal/connector/support"
"github.com/alcionai/corso/src/internal/data"
@ -137,8 +136,10 @@ func (suite *RestoreOpSuite) TestRestoreOperation_PersistResults() {
// ---------------------------------------------------------------------------
type bupResults struct {
backupID model.StableID
items int
selectorResourceOwners []string
resourceOwner string
backupID model.StableID
items int
}
type RestoreOpIntegrationSuite struct {
@ -165,12 +166,13 @@ func (suite *RestoreOpIntegrationSuite) SetupSuite() {
ctx, flush := tester.NewContext()
defer flush()
t := suite.T()
m365UserID := tester.M365UserID(t)
acct := tester.NewM365Account(t)
// need to initialize the repository before we can test connecting to it.
st := tester.NewPrefixedS3Storage(t)
k := kopia.NewConn(st)
var (
t = suite.T()
m365UserID = tester.M365UserID(t)
acct = tester.NewM365Account(t)
st = tester.NewPrefixedS3Storage(t)
k = kopia.NewConn(st)
)
err := k.Initialize(ctx)
require.NoError(t, err, clues.ToCore(err))
@ -192,65 +194,104 @@ func (suite *RestoreOpIntegrationSuite) SetupSuite() {
sw := store.NewKopiaStore(ms)
suite.sw = sw
users := []string{m365UserID}
suite.Run("exchange_setup", func() {
var (
t = suite.T()
users = []string{m365UserID}
bsel = selectors.NewExchangeBackup(users)
)
bsel := selectors.NewExchangeBackup(users)
bsel.DiscreteOwner = m365UserID
bsel.Include(
bsel.MailFolders([]string{exchange.DefaultMailFolder}, selectors.PrefixMatch()),
bsel.ContactFolders([]string{exchange.DefaultContactFolder}, selectors.PrefixMatch()),
bsel.EventCalendars([]string{exchange.DefaultCalendar}, selectors.PrefixMatch()),
)
bsel.DiscreteOwner = m365UserID
bsel.Include(
bsel.MailFolders([]string{exchange.DefaultMailFolder}, selectors.PrefixMatch()),
bsel.ContactFolders([]string{exchange.DefaultContactFolder}, selectors.PrefixMatch()),
bsel.EventCalendars([]string{exchange.DefaultCalendar}, selectors.PrefixMatch()),
)
bo, err := NewBackupOperation(
ctx,
control.Options{},
kw,
sw,
acct,
bsel.Selector,
bsel.Selector.DiscreteOwner,
evmock.NewBus())
require.NoError(t, err, clues.ToCore(err))
bo, err := NewBackupOperation(
ctx,
control.Options{},
kw,
sw,
acct,
bsel.Selector,
bsel.Selector.DiscreteOwner,
evmock.NewBus())
require.NoError(t, err, clues.ToCore(err))
err = bo.Run(ctx)
require.NoError(t, err, clues.ToCore(err))
require.NotEmpty(t, bo.Results.BackupID)
err = bo.Run(ctx)
require.NoError(t, err, clues.ToCore(err))
require.NotEmpty(t, bo.Results.BackupID)
suite.exchange = bupResults{
backupID: bo.Results.BackupID,
// Discount metadata files (3 paths, 3 deltas) as
// they are not part of the data restored.
items: bo.Results.ItemsWritten - 6,
}
suite.exchange = bupResults{
selectorResourceOwners: users,
resourceOwner: m365UserID,
backupID: bo.Results.BackupID,
// Discount metadata collection files (1 delta and one prev path for each category).
// These meta files are used to aid restore, but are not themselves
// restored (ie: counted as writes).
items: bo.Results.ItemsWritten - 6,
}
})
siteID := tester.M365SiteID(t)
sites := []string{siteID}
csel := selectors.NewSharePointBackup(sites)
csel.DiscreteOwner = siteID
csel.Include(csel.LibraryFolders(selectors.Any()))
suite.Run("sharepoint_setup", func() {
var (
t = suite.T()
siteID = tester.M365SiteID(t)
sites = []string{siteID}
spsel = selectors.NewSharePointBackup(sites)
)
bo, err = NewBackupOperation(
ctx,
control.Options{},
kw,
sw,
acct,
csel.Selector,
csel.Selector.DiscreteOwner,
evmock.NewBus())
require.NoError(t, err, clues.ToCore(err))
spsel.DiscreteOwner = siteID
// assume a folder name "test" exists in the drive.
// this is brittle, and requires us to backfill anytime
// the site under test changes, but also prevents explosive
// growth from re-backup/restore of restored files.
spsel.Include(spsel.LibraryFolders([]string{"test"}, selectors.PrefixMatch()))
err = bo.Run(ctx)
require.NoError(t, err, clues.ToCore(err))
require.NotEmpty(t, bo.Results.BackupID)
bo, err := NewBackupOperation(
ctx,
control.Options{},
kw,
sw,
acct,
spsel.Selector,
spsel.Selector.DiscreteOwner,
evmock.NewBus())
require.NoError(t, err, clues.ToCore(err))
suite.sharepoint = bupResults{
backupID: bo.Results.BackupID,
// Discount metadata files (2 paths, 2 deltas) as
// they are not part of the data restored.
items: bo.Results.ItemsWritten - 4,
}
// get the count of drives
m365, err := acct.M365Config()
require.NoError(t, err, clues.ToCore(err))
adpt, err := graph.CreateAdapter(
m365.AzureTenantID,
m365.AzureClientID,
m365.AzureClientSecret)
require.NoError(t, err, clues.ToCore(err))
service := graph.NewService(adpt)
spPgr := api.NewSiteDrivePager(service, siteID, []string{"id", "name"})
drives, err := api.GetAllDrives(ctx, spPgr, true, 3)
require.NoError(t, err, clues.ToCore(err))
err = bo.Run(ctx)
require.NoError(t, err, clues.ToCore(err))
require.NotEmpty(t, bo.Results.BackupID)
suite.sharepoint = bupResults{
selectorResourceOwners: sites,
resourceOwner: siteID,
backupID: bo.Results.BackupID,
// Discount metadata files (1 delta, 1 prev path)
// assume only one folder, and therefore 1 dirmeta per drive
// assume only one file in each folder, and therefore 1 meta per drive.
// These meta files are used to aid restore, but are not themselves
// restored (ie: counted as writes).
items: bo.Results.ItemsWritten - 2 - len(drives) - len(drives),
}
})
}
func (suite *RestoreOpIntegrationSuite) TearDownSuite() {
@ -328,8 +369,7 @@ func (suite *RestoreOpIntegrationSuite) TestRestore_Run() {
expectedItems: suite.exchange.items,
dest: tester.DefaultTestRestoreDestination(),
getSelector: func(t *testing.T) selectors.Selector {
users := []string{tester.M365UserID(t)}
rsel := selectors.NewExchangeRestore(users)
rsel := selectors.NewExchangeRestore(suite.exchange.selectorResourceOwners)
rsel.Include(rsel.AllData())
return rsel.Selector
@ -341,46 +381,19 @@ func (suite *RestoreOpIntegrationSuite) TestRestore_Run() {
expectedItems: suite.sharepoint.items,
dest: control.DefaultRestoreDestination(common.SimpleDateTimeOneDrive),
getSelector: func(t *testing.T) selectors.Selector {
bsel := selectors.NewSharePointRestore([]string{tester.M365SiteID(t)})
bsel.Include(bsel.AllData())
rsel := selectors.NewSharePointRestore(suite.sharepoint.selectorResourceOwners)
rsel.Include(rsel.AllData())
return bsel.Selector
},
cleanup: func(t *testing.T, dest string) {
ctx, flush := tester.NewContext()
defer flush()
act := tester.NewM365Account(t)
m365, err := act.M365Config()
require.NoError(t, err, clues.ToCore(err))
adpt, err := graph.CreateAdapter(
m365.AzureTenantID,
m365.AzureClientID,
m365.AzureClientSecret)
require.NoError(t, err, clues.ToCore(err))
service := graph.NewService(adpt)
pager := api.NewSiteDrivePager(service, tester.M365SiteID(t), []string{"id", "name"})
driveID, err := pager.GetDriveIDByName(ctx, "Documents")
require.NoError(t, err, clues.ToCore(err))
require.NotEmpty(t, driveID)
folderID, err := pager.GetFolderIDByName(ctx, driveID, dest)
require.NoError(t, err, clues.ToCore(err))
require.NotEmpty(t, folderID)
err = onedrive.DeleteItem(ctx, service, driveID, folderID)
assert.NoError(t, err, "deleting restore folder", clues.ToCore(err))
return rsel.Selector
},
},
}
for _, test := range tables {
suite.T().Run(test.name, func(t *testing.T) {
suite.Run(test.name, func() {
t := suite.T()
mb := evmock.NewBus()
ro, err := NewRestoreOperation(
ctx,
control.Options{FailFast: true},
@ -408,11 +421,6 @@ func (suite *RestoreOpIntegrationSuite) TestRestore_Run() {
assert.Equal(t, test.expectedItems, ro.Results.ItemsWritten, "backup and restore wrote the same num of items")
assert.Equal(t, 1, mb.TimesCalled[events.RestoreStart], "restore-start events")
assert.Equal(t, 1, mb.TimesCalled[events.RestoreEnd], "restore-end events")
// clean up
if test.cleanup != nil {
test.cleanup(t, test.dest.ContainerName)
}
})
}
}

View File

@ -112,36 +112,31 @@ func readTestConfig() (map[string]string, error) {
TestCfgUserID,
os.Getenv(EnvCorsoM365TestUserID),
vpr.GetString(TestCfgUserID),
"LynneR@10rqc2.onmicrosoft.com",
)
"LynneR@10rqc2.onmicrosoft.com")
fallbackTo(
testEnv,
TestCfgSecondaryUserID,
os.Getenv(EnvCorsoSecondaryM365TestUserID),
vpr.GetString(TestCfgSecondaryUserID),
"AdeleV@10rqc2.onmicrosoft.com",
)
"AdeleV@10rqc2.onmicrosoft.com")
fallbackTo(
testEnv,
TestCfgLoadTestUserID,
os.Getenv(EnvCorsoM365LoadTestUserID),
vpr.GetString(TestCfgLoadTestUserID),
"leeg@10rqc2.onmicrosoft.com",
)
"leeg@10rqc2.onmicrosoft.com")
fallbackTo(
testEnv,
TestCfgLoadTestOrgUsers,
os.Getenv(EnvCorsoM365LoadTestOrgUsers),
vpr.GetString(TestCfgLoadTestOrgUsers),
"AdeleV@10rqc2.onmicrosoft.com,LynneR@10rqc2.onmicrosoft.com",
)
"AdeleV@10rqc2.onmicrosoft.com,LynneR@10rqc2.onmicrosoft.com")
fallbackTo(
testEnv,
TestCfgSiteID,
os.Getenv(EnvCorsoM365TestSiteID),
vpr.GetString(TestCfgSiteID),
"10rqc2.sharepoint.com,bb5d5b4a-e089-4e66-9868-9e263ecc635d,4fa3a2c0-fa81-4e6f-8e8b-1479a8927bc6",
)
"10rqc2.sharepoint.com,4892edf5-2ebf-46be-a6e5-a40b2cbf1c1a,38ab6d06-fc82-4417-af93-22d8733c22be")
testEnv[EnvCorsoTestConfigFilePath] = os.Getenv(EnvCorsoTestConfigFilePath)
testConfig = testEnv