add api funcs for creating documentLibs (#3793)

Adds api handlers for creating document libraries in sharepoint. This is the first step in allowing us to restore drives that were deleted between backup and restore.

---

#### Does this PR need a docs update or release note?

- [x] 🕐 Yes, but in a later PR

#### Type of change

- [x] 🌻 Feature

#### Issue(s)

* #3562

#### Test Plan

- [x]  Unit test
- [x] 💚 E2E
This commit is contained in:
Keepers 2023-07-20 15:05:30 -06:00 committed by GitHub
parent e130f85da3
commit 9359679f99
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 1051 additions and 63 deletions

View File

@ -7,6 +7,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased] (beta) ## [Unreleased] (beta)
### Fixed
- SharePoint document libraries deleted after the last backup can now be restored.
## [v0.11.1] (beta) - 2023-07-20 ## [v0.11.1] (beta) - 2023-07-20
### Fixed ### Fixed
@ -23,6 +26,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Fixed ### Fixed
- Return a ServiceNotEnabled error when a tenant has no active SharePoint license. - Return a ServiceNotEnabled error when a tenant has no active SharePoint license.
- Added retries for http/2 stream connection failures when downloading large item content. - Added retries for http/2 stream connection failures when downloading large item content.
- SharePoint document libraries that were deleted after the last backup can now be restored.
### Known issues ### Known issues
- If a link share is created for an item with inheritance disabled - If a link share is created for an item with inheritance disabled

View File

@ -40,6 +40,11 @@ type Cacher interface {
ProviderForName(id string) Provider ProviderForName(id string) Provider
} }
type CacheBuilder interface {
Add(id, name string)
Cacher
}
var _ Cacher = &cache{} var _ Cacher = &cache{}
type cache struct { type cache struct {
@ -47,17 +52,29 @@ type cache struct {
nameToID map[string]string nameToID map[string]string
} }
func NewCache(idToName map[string]string) cache { func NewCache(idToName map[string]string) *cache {
nti := make(map[string]string, len(idToName)) c := cache{
idToName: map[string]string{},
for id, name := range idToName { nameToID: map[string]string{},
nti[name] = id
} }
return cache{ if len(idToName) > 0 {
idToName: idToName, nti := make(map[string]string, len(idToName))
nameToID: nti,
for id, name := range idToName {
nti[name] = id
}
c.idToName = idToName
c.nameToID = nti
} }
return &c
}
func (c *cache) Add(id, name string) {
c.idToName[strings.ToLower(id)] = name
c.nameToID[strings.ToLower(name)] = id
} }
// IDOf returns the id associated with the given name. // IDOf returns the id associated with the given name.

View File

@ -1,12 +1,24 @@
package mock package mock
import ( import (
"context"
"io" "io"
"time" "time"
"github.com/alcionai/clues"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/path"
) )
// ---------------------------------------------------------------------------
// stream
// ---------------------------------------------------------------------------
var _ data.Stream = &Stream{}
type Stream struct { type Stream struct {
ID string ID string
Reader io.ReadCloser Reader io.ReadCloser
@ -52,3 +64,39 @@ type errReader struct {
func (er errReader) Read([]byte) (int, error) { func (er errReader) Read([]byte) (int, error) {
return 0, er.readErr return 0, er.readErr
} }
// ---------------------------------------------------------------------------
// collection
// ---------------------------------------------------------------------------
var (
_ data.Collection = &Collection{}
_ data.BackupCollection = &Collection{}
_ data.RestoreCollection = &Collection{}
)
type Collection struct{}
func (c Collection) Items(ctx context.Context, errs *fault.Bus) <-chan data.Stream {
return nil
}
func (c Collection) FullPath() path.Path {
return nil
}
func (c Collection) PreviousPath() path.Path {
return nil
}
func (c Collection) State() data.CollectionState {
return data.NewState
}
func (c Collection) DoNotMergeItems() bool {
return true
}
func (c Collection) FetchItemByName(ctx context.Context, name string) (data.Stream, error) {
return &Stream{}, clues.New("not implemented")
}

View File

@ -14,6 +14,7 @@ import (
"github.com/alcionai/corso/src/internal/m365/support" "github.com/alcionai/corso/src/internal/m365/support"
"github.com/alcionai/corso/src/internal/operations/inject" "github.com/alcionai/corso/src/internal/operations/inject"
"github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/services/m365/api" "github.com/alcionai/corso/src/pkg/services/m365/api"
@ -47,6 +48,11 @@ type Controller struct {
// mutex used to synchronize updates to `status` // mutex used to synchronize updates to `status`
mu sync.Mutex mu sync.Mutex
status support.ControllerOperationStatus // contains the status of the last run status status support.ControllerOperationStatus // contains the status of the last run status
// backupDriveIDNames is populated on restore. It maps the backup's
// drive names to their id. Primarily for use when creating or looking
// up a new drive.
backupDriveIDNames idname.CacheBuilder
} }
func NewController( func NewController(
@ -142,6 +148,20 @@ func (ctrl *Controller) incrementAwaitingMessages() {
ctrl.wg.Add(1) ctrl.wg.Add(1)
} }
func (ctrl *Controller) CacheItemInfo(dii details.ItemInfo) {
if ctrl.backupDriveIDNames == nil {
ctrl.backupDriveIDNames = idname.NewCache(map[string]string{})
}
if dii.SharePoint != nil {
ctrl.backupDriveIDNames.Add(dii.SharePoint.DriveID, dii.SharePoint.DriveName)
}
if dii.OneDrive != nil {
ctrl.backupDriveIDNames.Add(dii.OneDrive.DriveID, dii.OneDrive.DriveName)
}
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Resource Lookup Handling // Resource Lookup Handling
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------

View File

@ -12,8 +12,10 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/common/idname"
inMock "github.com/alcionai/corso/src/internal/common/idname/mock" inMock "github.com/alcionai/corso/src/internal/common/idname/mock"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
dataMock "github.com/alcionai/corso/src/internal/data/mock"
exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock" exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock"
"github.com/alcionai/corso/src/internal/m365/mock" "github.com/alcionai/corso/src/internal/m365/mock"
"github.com/alcionai/corso/src/internal/m365/resource" "github.com/alcionai/corso/src/internal/m365/resource"
@ -22,6 +24,7 @@ import (
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/internal/version" "github.com/alcionai/corso/src/internal/version"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/control/testdata" "github.com/alcionai/corso/src/pkg/control/testdata"
"github.com/alcionai/corso/src/pkg/count" "github.com/alcionai/corso/src/pkg/count"
@ -260,6 +263,82 @@ func (suite *ControllerUnitSuite) TestController_Wait() {
assert.Equal(t, int64(4), result.Bytes) assert.Equal(t, int64(4), result.Bytes)
} }
func (suite *ControllerUnitSuite) TestController_CacheItemInfo() {
var (
odid = "od-id"
odname = "od-name"
spid = "sp-id"
spname = "sp-name"
// intentionally declared outside the test loop
ctrl = &Controller{
wg: &sync.WaitGroup{},
region: &trace.Region{},
backupDriveIDNames: idname.NewCache(nil),
}
)
table := []struct {
name string
service path.ServiceType
cat path.CategoryType
dii details.ItemInfo
expectID string
expectName string
}{
{
name: "exchange",
dii: details.ItemInfo{
Exchange: &details.ExchangeInfo{},
},
expectID: "",
expectName: "",
},
{
name: "folder",
dii: details.ItemInfo{
Folder: &details.FolderInfo{},
},
expectID: "",
expectName: "",
},
{
name: "onedrive",
dii: details.ItemInfo{
OneDrive: &details.OneDriveInfo{
DriveID: odid,
DriveName: odname,
},
},
expectID: odid,
expectName: odname,
},
{
name: "sharepoint",
dii: details.ItemInfo{
SharePoint: &details.SharePointInfo{
DriveID: spid,
DriveName: spname,
},
},
expectID: spid,
expectName: spname,
},
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
ctrl.CacheItemInfo(test.dii)
name, _ := ctrl.backupDriveIDNames.NameOf(test.expectID)
assert.Equal(t, test.expectName, name)
id, _ := ctrl.backupDriveIDNames.IDOf(test.expectName)
assert.Equal(t, test.expectID, id)
})
}
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Integration tests // Integration tests
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@ -315,7 +394,7 @@ func (suite *ControllerIntegrationSuite) TestRestoreFailsBadService() {
RestorePermissions: true, RestorePermissions: true,
ToggleFeatures: control.Toggles{}, ToggleFeatures: control.Toggles{},
}, },
nil, []data.RestoreCollection{&dataMock.Collection{}},
fault.New(true), fault.New(true),
count.New()) count.New())
assert.Error(t, err, clues.ToCore(err)) assert.Error(t, err, clues.ToCore(err))
@ -397,13 +476,8 @@ func (suite *ControllerIntegrationSuite) TestEmptyCollections() {
test.col, test.col,
fault.New(true), fault.New(true),
count.New()) count.New())
require.NoError(t, err, clues.ToCore(err)) require.Error(t, err, clues.ToCore(err))
assert.NotNil(t, deets) assert.Nil(t, deets)
stats := suite.ctrl.Wait()
assert.Zero(t, stats.Objects)
assert.Zero(t, stats.Folders)
assert.Zero(t, stats.Successes)
}) })
} }
} }

View File

@ -69,3 +69,5 @@ func (ctrl Controller) ConsumeRestoreCollections(
) (*details.Details, error) { ) (*details.Details, error) {
return ctrl.Deets, ctrl.Err return ctrl.Deets, ctrl.Err
} }
func (ctrl Controller) CacheItemInfo(dii details.ItemInfo) {}

View File

@ -35,6 +35,7 @@ type BackupHandler interface {
api.Getter api.Getter
GetItemPermissioner GetItemPermissioner
GetItemer GetItemer
NewDrivePagerer
// PathPrefix constructs the service and category specific path prefix for // PathPrefix constructs the service and category specific path prefix for
// the given values. // the given values.
@ -49,7 +50,6 @@ type BackupHandler interface {
// ServiceCat returns the service and category used by this implementation. // ServiceCat returns the service and category used by this implementation.
ServiceCat() (path.ServiceType, path.CategoryType) ServiceCat() (path.ServiceType, path.CategoryType)
NewDrivePager(resourceOwner string, fields []string) api.DrivePager
NewItemPager(driveID, link string, fields []string) api.DriveItemDeltaEnumerator NewItemPager(driveID, link string, fields []string) api.DriveItemDeltaEnumerator
// FormatDisplayPath creates a human-readable string to represent the // FormatDisplayPath creates a human-readable string to represent the
// provided path. // provided path.
@ -61,6 +61,10 @@ type BackupHandler interface {
IncludesDir(dir string) bool IncludesDir(dir string) bool
} }
type NewDrivePagerer interface {
NewDrivePager(resourceOwner string, fields []string) api.DrivePager
}
type GetItemPermissioner interface { type GetItemPermissioner interface {
GetItemPermission( GetItemPermission(
ctx context.Context, ctx context.Context,
@ -86,7 +90,9 @@ type RestoreHandler interface {
GetItemsByCollisionKeyser GetItemsByCollisionKeyser
GetRootFolderer GetRootFolderer
ItemInfoAugmenter ItemInfoAugmenter
NewDrivePagerer
NewItemContentUploader NewItemContentUploader
PostDriver
PostItemInContainerer PostItemInContainerer
DeleteItemPermissioner DeleteItemPermissioner
UpdateItemPermissioner UpdateItemPermissioner
@ -145,6 +151,13 @@ type UpdateItemLinkSharer interface {
) (models.Permissionable, error) ) (models.Permissionable, error)
} }
type PostDriver interface {
PostDrive(
ctx context.Context,
protectedResourceID, driveName string,
) (models.Driveable, error)
}
type PostItemInContainerer interface { type PostItemInContainerer interface {
PostItemInContainer( PostItemInContainer(
ctx context.Context, ctx context.Context,

View File

@ -361,8 +361,8 @@ func (suite *OneDriveIntgSuite) TestCreateGetDeleteFolder() {
Folders: folderElements, Folders: folderElements,
} }
caches := NewRestoreCaches() caches := NewRestoreCaches(nil)
caches.DriveIDToRootFolderID[driveID] = ptr.Val(rootFolder.GetId()) caches.DriveIDToDriveInfo[driveID] = driveInfo{rootFolderID: ptr.Val(rootFolder.GetId())}
rh := NewRestoreHandler(suite.ac) rh := NewRestoreHandler(suite.ac)

View File

@ -5,6 +5,7 @@ import (
"net/http" "net/http"
"strings" "strings"
"github.com/alcionai/clues"
"github.com/microsoftgraph/msgraph-sdk-go/drives" "github.com/microsoftgraph/msgraph-sdk-go/drives"
"github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/microsoftgraph/msgraph-sdk-go/models"
@ -133,6 +134,19 @@ func NewRestoreHandler(ac api.Client) *itemRestoreHandler {
return &itemRestoreHandler{ac.Drives()} return &itemRestoreHandler{ac.Drives()}
} }
func (h itemRestoreHandler) PostDrive(
context.Context,
string, string,
) (models.Driveable, error) {
return nil, clues.New("creating drives in oneDrive is not supported")
}
func (h itemRestoreHandler) NewDrivePager(
resourceOwner string, fields []string,
) api.DrivePager {
return h.ac.NewUserDrivePager(resourceOwner, fields)
}
// AugmentItemInfo will populate a details.OneDriveInfo struct // AugmentItemInfo will populate a details.OneDriveInfo struct
// with properties from the drive item. ItemSize is specified // with properties from the drive item. ItemSize is specified
// separately for restore processes because the local itemable // separately for restore processes because the local itemable

View File

@ -249,9 +249,25 @@ type RestoreHandler struct {
PostItemResp models.DriveItemable PostItemResp models.DriveItemable
PostItemErr error PostItemErr error
DrivePagerV api.DrivePager
PostDriveResp models.Driveable
PostDriveErr error
UploadSessionErr error UploadSessionErr error
} }
func (h RestoreHandler) PostDrive(
ctx context.Context,
protectedResourceID, driveName string,
) (models.Driveable, error) {
return h.PostDriveResp, h.PostDriveErr
}
func (h RestoreHandler) NewDrivePager(string, []string) api.DrivePager {
return h.DrivePagerV
}
func (h *RestoreHandler) AugmentItemInfo( func (h *RestoreHandler) AugmentItemInfo(
details.ItemInfo, details.ItemInfo,
models.DriveItemable, models.DriveItemable,

View File

@ -15,6 +15,7 @@ import (
"github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/alcionai/corso/src/internal/common/idname"
"github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/diagnostics" "github.com/alcionai/corso/src/internal/diagnostics"
@ -37,9 +38,17 @@ const (
maxUploadRetries = 3 maxUploadRetries = 3
) )
type driveInfo struct {
id string
name string
rootFolderID string
}
type restoreCaches struct { type restoreCaches struct {
BackupDriveIDName idname.Cacher
collisionKeyToItemID map[string]api.DriveItemIDType collisionKeyToItemID map[string]api.DriveItemIDType
DriveIDToRootFolderID map[string]string DriveIDToDriveInfo map[string]driveInfo
DriveNameToDriveInfo map[string]driveInfo
Folders *folderCache Folders *folderCache
OldLinkShareIDToNewID map[string]string OldLinkShareIDToNewID map[string]string
OldPermIDToNewID map[string]string OldPermIDToNewID map[string]string
@ -48,10 +57,74 @@ type restoreCaches struct {
pool sync.Pool pool sync.Pool
} }
func NewRestoreCaches() *restoreCaches { func (rc *restoreCaches) AddDrive(
ctx context.Context,
md models.Driveable,
grf GetRootFolderer,
) error {
di := driveInfo{
id: ptr.Val(md.GetId()),
name: ptr.Val(md.GetName()),
}
ctx = clues.Add(ctx, "drive_info", di)
root, err := grf.GetRootFolder(ctx, di.id)
if err != nil {
return clues.Wrap(err, "getting drive root id")
}
di.rootFolderID = ptr.Val(root.GetId())
rc.DriveIDToDriveInfo[di.id] = di
rc.DriveNameToDriveInfo[di.name] = di
return nil
}
// Populate looks up drive items available to the protectedResource
// and adds their info to the caches.
func (rc *restoreCaches) Populate(
ctx context.Context,
gdparf GetDrivePagerAndRootFolderer,
protectedResourceID string,
) error {
drives, err := api.GetAllDrives(
ctx,
gdparf.NewDrivePager(protectedResourceID, nil),
true,
maxDrivesRetries)
if err != nil {
return clues.Wrap(err, "getting drives")
}
for _, md := range drives {
if err := rc.AddDrive(ctx, md, gdparf); err != nil {
return clues.Wrap(err, "caching drive")
}
}
return nil
}
type GetDrivePagerAndRootFolderer interface {
GetRootFolderer
NewDrivePagerer
}
func NewRestoreCaches(
backupDriveIDNames idname.Cacher,
) *restoreCaches {
// avoid nil panics
if backupDriveIDNames == nil {
backupDriveIDNames = idname.NewCache(nil)
}
return &restoreCaches{ return &restoreCaches{
BackupDriveIDName: backupDriveIDNames,
collisionKeyToItemID: map[string]api.DriveItemIDType{}, collisionKeyToItemID: map[string]api.DriveItemIDType{},
DriveIDToRootFolderID: map[string]string{}, DriveIDToDriveInfo: map[string]driveInfo{},
DriveNameToDriveInfo: map[string]driveInfo{},
Folders: NewFolderCache(), Folders: NewFolderCache(),
OldLinkShareIDToNewID: map[string]string{}, OldLinkShareIDToNewID: map[string]string{},
OldPermIDToNewID: map[string]string{}, OldPermIDToNewID: map[string]string{},
@ -73,19 +146,27 @@ func ConsumeRestoreCollections(
backupVersion int, backupVersion int,
restoreCfg control.RestoreConfig, restoreCfg control.RestoreConfig,
opts control.Options, opts control.Options,
backupDriveIDNames idname.Cacher,
dcs []data.RestoreCollection, dcs []data.RestoreCollection,
deets *details.Builder, deets *details.Builder,
errs *fault.Bus, errs *fault.Bus,
ctr *count.Bus, ctr *count.Bus,
) (*support.ControllerOperationStatus, error) { ) (*support.ControllerOperationStatus, error) {
var ( var (
restoreMetrics support.CollectionMetrics restoreMetrics support.CollectionMetrics
caches = NewRestoreCaches() el = errs.Local()
el = errs.Local() caches = NewRestoreCaches(backupDriveIDNames)
protectedResourceID = dcs[0].FullPath().ResourceOwner()
fallbackDriveName = restoreCfg.Location
) )
ctx = clues.Add(ctx, "backup_version", backupVersion) ctx = clues.Add(ctx, "backup_version", backupVersion)
err := caches.Populate(ctx, rh, protectedResourceID)
if err != nil {
return nil, clues.Wrap(err, "initializing restore caches")
}
// Reorder collections so that the parents directories are created // Reorder collections so that the parents directories are created
// before the child directories; a requirement for permissions. // before the child directories; a requirement for permissions.
data.SortRestoreCollections(dcs) data.SortRestoreCollections(dcs)
@ -102,7 +183,7 @@ func ConsumeRestoreCollections(
ictx = clues.Add( ictx = clues.Add(
ctx, ctx,
"category", dc.FullPath().Category(), "category", dc.FullPath().Category(),
"resource_owner", clues.Hide(dc.FullPath().ResourceOwner()), "resource_owner", clues.Hide(protectedResourceID),
"full_path", dc.FullPath()) "full_path", dc.FullPath())
) )
@ -115,6 +196,7 @@ func ConsumeRestoreCollections(
caches, caches,
deets, deets,
opts.RestorePermissions, opts.RestorePermissions,
fallbackDriveName,
errs, errs,
ctr.Local()) ctr.Local())
if err != nil { if err != nil {
@ -152,18 +234,20 @@ func RestoreCollection(
caches *restoreCaches, caches *restoreCaches,
deets *details.Builder, deets *details.Builder,
restorePerms bool, // TODD: move into restoreConfig restorePerms bool, // TODD: move into restoreConfig
fallbackDriveName string,
errs *fault.Bus, errs *fault.Bus,
ctr *count.Bus, ctr *count.Bus,
) (support.CollectionMetrics, error) { ) (support.CollectionMetrics, error) {
var ( var (
metrics = support.CollectionMetrics{} metrics = support.CollectionMetrics{}
directory = dc.FullPath() directory = dc.FullPath()
el = errs.Local() protectedResourceID = directory.ResourceOwner()
metricsObjects int64 el = errs.Local()
metricsBytes int64 metricsObjects int64
metricsSuccess int64 metricsBytes int64
wg sync.WaitGroup metricsSuccess int64
complete bool wg sync.WaitGroup
complete bool
) )
ctx, end := diagnostics.Span(ctx, "gc:drive:restoreCollection", diagnostics.Label("path", directory)) ctx, end := diagnostics.Span(ctx, "gc:drive:restoreCollection", diagnostics.Label("path", directory))
@ -174,15 +258,23 @@ func RestoreCollection(
return metrics, clues.Wrap(err, "creating drive path").WithClues(ctx) return metrics, clues.Wrap(err, "creating drive path").WithClues(ctx)
} }
if _, ok := caches.DriveIDToRootFolderID[drivePath.DriveID]; !ok { di, err := ensureDriveExists(
root, err := rh.GetRootFolder(ctx, drivePath.DriveID) ctx,
if err != nil { rh,
return metrics, clues.Wrap(err, "getting drive root id") caches,
} drivePath,
protectedResourceID,
caches.DriveIDToRootFolderID[drivePath.DriveID] = ptr.Val(root.GetId()) fallbackDriveName)
if err != nil {
return metrics, clues.Wrap(err, "ensuring drive exists")
} }
// clobber the drivePath details with the details retrieved
// in the ensure func, as they might have changed to reflect
// a different drive as a restore location.
drivePath.DriveID = di.id
drivePath.Root = di.rootFolderID
// Assemble folder hierarchy we're going to restore into (we recreate the folder hierarchy // Assemble folder hierarchy we're going to restore into (we recreate the folder hierarchy
// from the backup under this the restore folder instead of root) // from the backup under this the restore folder instead of root)
// i.e. Restore into `<restoreContainerName>/<original folder path>` // i.e. Restore into `<restoreContainerName>/<original folder path>`
@ -704,7 +796,7 @@ func createRestoreFolders(
driveID = drivePath.DriveID driveID = drivePath.DriveID
folders = restoreDir.Elements() folders = restoreDir.Elements()
location = path.Builder{}.Append(driveID) location = path.Builder{}.Append(driveID)
parentFolderID = caches.DriveIDToRootFolderID[drivePath.DriveID] parentFolderID = caches.DriveIDToDriveInfo[drivePath.DriveID].rootFolderID
) )
ctx = clues.Add( ctx = clues.Add(
@ -1113,3 +1205,79 @@ func AugmentRestorePaths(
return paths, nil return paths, nil
} }
type PostDriveAndGetRootFolderer interface {
PostDriver
GetRootFolderer
}
// ensureDriveExists looks up the drive by its id. If no drive is found with
// that ID, a new drive is generated with the same name. If the name collides
// with an existing drive, a number is appended to the drive name. Eg: foo ->
// foo 1. This will repeat as many times as is needed.
// Returns the root folder of the drive
func ensureDriveExists(
ctx context.Context,
pdagrf PostDriveAndGetRootFolderer,
caches *restoreCaches,
drivePath *path.DrivePath,
protectedResourceID, fallbackDriveName string,
) (driveInfo, error) {
driveID := drivePath.DriveID
// the drive might already be cached by ID. it's okay
// if the name has changed. the ID is a better reference
// anyway.
if di, ok := caches.DriveIDToDriveInfo[driveID]; ok {
return di, nil
}
var (
newDriveName = fallbackDriveName
newDrive models.Driveable
err error
)
// if the drive wasn't found by ID, maybe we can find a
// drive with the same name but different ID.
// start by looking up the old drive's name
oldName, ok := caches.BackupDriveIDName.NameOf(driveID)
if ok {
// check for drives that currently have the same name
if di, ok := caches.DriveNameToDriveInfo[oldName]; ok {
return di, nil
}
// if no current drives have the same name, we'll make
// a new drive with that name.
newDriveName = oldName
}
nextDriveName := newDriveName
// For sharepoint, document libraries can collide by name with
// item types beyond just drive. Lists, for example, cannot share
// names with document libraries (they're the same type, actually).
// In those cases we need to rename the drive until we can create
// one without a collision.
for i := 1; ; i++ {
ictx := clues.Add(ctx, "new_drive_name", clues.Hide(nextDriveName))
newDrive, err = pdagrf.PostDrive(ictx, protectedResourceID, nextDriveName)
if err != nil && !errors.Is(err, graph.ErrItemAlreadyExistsConflict) {
return driveInfo{}, clues.Wrap(err, "creating new drive")
}
if err == nil {
break
}
nextDriveName = fmt.Sprintf("%s %d", newDriveName, i)
}
if err := caches.AddDrive(ctx, newDrive, pdagrf); err != nil {
return driveInfo{}, clues.Wrap(err, "adding drive to cache").OrNil()
}
return caches.DriveIDToDriveInfo[ptr.Val(newDrive.GetId())], nil
}

View File

@ -11,6 +11,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/common/idname"
"github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/m365/graph"
odConsts "github.com/alcionai/corso/src/internal/m365/onedrive/consts" odConsts "github.com/alcionai/corso/src/internal/m365/onedrive/consts"
@ -21,6 +22,7 @@ import (
"github.com/alcionai/corso/src/pkg/count" "github.com/alcionai/corso/src/pkg/count"
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/services/m365/api" "github.com/alcionai/corso/src/pkg/services/m365/api"
apiMock "github.com/alcionai/corso/src/pkg/services/m365/api/mock"
) )
type RestoreUnitSuite struct { type RestoreUnitSuite struct {
@ -491,7 +493,7 @@ func (suite *RestoreUnitSuite) TestRestoreItem_collisionHandling() {
mndi.SetId(ptr.To(mndiID)) mndi.SetId(ptr.To(mndiID))
var ( var (
caches = NewRestoreCaches() caches = NewRestoreCaches(nil)
rh = &mock.RestoreHandler{ rh = &mock.RestoreHandler{
PostItemResp: models.NewDriveItem(), PostItemResp: models.NewDriveItem(),
DeleteItemErr: test.deleteErr, DeleteItemErr: test.deleteErr,
@ -617,3 +619,435 @@ func (suite *RestoreUnitSuite) TestCreateFolder() {
}) })
} }
} }
type mockGRF struct {
err error
rootFolder models.DriveItemable
}
func (m *mockGRF) GetRootFolder(
context.Context,
string,
) (models.DriveItemable, error) {
return m.rootFolder, m.err
}
func (suite *RestoreUnitSuite) TestRestoreCaches_AddDrive() {
rfID := "this-is-id"
driveID := "another-id"
name := "name"
rf := models.NewDriveItem()
rf.SetId(&rfID)
md := models.NewDrive()
md.SetId(&driveID)
md.SetName(&name)
table := []struct {
name string
mock *mockGRF
expectErr require.ErrorAssertionFunc
expectID string
checkValues bool
}{
{
name: "good",
mock: &mockGRF{rootFolder: rf},
expectErr: require.NoError,
expectID: rfID,
checkValues: true,
},
{
name: "err",
mock: &mockGRF{err: assert.AnError},
expectErr: require.Error,
expectID: "",
},
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
rc := NewRestoreCaches(nil)
err := rc.AddDrive(ctx, md, test.mock)
test.expectErr(t, err, clues.ToCore(err))
if test.checkValues {
idResult := rc.DriveIDToDriveInfo[driveID]
assert.Equal(t, driveID, idResult.id, "drive id")
assert.Equal(t, name, idResult.name, "drive name")
assert.Equal(t, test.expectID, idResult.rootFolderID, "root folder id")
nameResult := rc.DriveNameToDriveInfo[name]
assert.Equal(t, driveID, nameResult.id, "drive id")
assert.Equal(t, name, nameResult.name, "drive name")
assert.Equal(t, test.expectID, nameResult.rootFolderID, "root folder id")
}
})
}
}
type mockGDPARF struct {
err error
rootFolder models.DriveItemable
pager *apiMock.DrivePager
}
func (m *mockGDPARF) GetRootFolder(
context.Context,
string,
) (models.DriveItemable, error) {
return m.rootFolder, m.err
}
func (m *mockGDPARF) NewDrivePager(
string,
[]string,
) api.DrivePager {
return m.pager
}
func (suite *RestoreUnitSuite) TestRestoreCaches_Populate() {
rfID := "this-is-id"
driveID := "another-id"
name := "name"
rf := models.NewDriveItem()
rf.SetId(&rfID)
md := models.NewDrive()
md.SetId(&driveID)
md.SetName(&name)
table := []struct {
name string
mock *apiMock.DrivePager
expectErr require.ErrorAssertionFunc
expectLen int
checkValues bool
}{
{
name: "no results",
mock: &apiMock.DrivePager{
ToReturn: []apiMock.PagerResult{
{Drives: []models.Driveable{}},
},
},
expectErr: require.NoError,
expectLen: 0,
},
{
name: "one result",
mock: &apiMock.DrivePager{
ToReturn: []apiMock.PagerResult{
{Drives: []models.Driveable{md}},
},
},
expectErr: require.NoError,
expectLen: 1,
checkValues: true,
},
{
name: "error",
mock: &apiMock.DrivePager{
ToReturn: []apiMock.PagerResult{
{Err: assert.AnError},
},
},
expectErr: require.Error,
expectLen: 0,
},
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
gdparf := &mockGDPARF{
rootFolder: rf,
pager: test.mock,
}
rc := NewRestoreCaches(nil)
err := rc.Populate(ctx, gdparf, "shmoo")
test.expectErr(t, err, clues.ToCore(err))
assert.Len(t, rc.DriveIDToDriveInfo, test.expectLen)
assert.Len(t, rc.DriveNameToDriveInfo, test.expectLen)
if test.checkValues {
idResult := rc.DriveIDToDriveInfo[driveID]
assert.Equal(t, driveID, idResult.id, "drive id")
assert.Equal(t, name, idResult.name, "drive name")
assert.Equal(t, rfID, idResult.rootFolderID, "root folder id")
nameResult := rc.DriveNameToDriveInfo[name]
assert.Equal(t, driveID, nameResult.id, "drive id")
assert.Equal(t, name, nameResult.name, "drive name")
assert.Equal(t, rfID, nameResult.rootFolderID, "root folder id")
}
})
}
}
type mockPDAGRF struct {
i int
postResp []models.Driveable
postErr []error
grf mockGRF
}
func (m *mockPDAGRF) PostDrive(
ctx context.Context,
protectedResourceID, driveName string,
) (models.Driveable, error) {
defer func() { m.i++ }()
md := m.postResp[m.i]
if md != nil {
md.SetName(&driveName)
}
return md, m.postErr[m.i]
}
func (m *mockPDAGRF) GetRootFolder(
ctx context.Context,
driveID string,
) (models.DriveItemable, error) {
return m.grf.rootFolder, m.grf.err
}
func (suite *RestoreUnitSuite) TestEnsureDriveExists() {
rfID := "this-is-id"
driveID := "another-id"
oldID := "old-id"
name := "name"
otherName := "other name"
rf := models.NewDriveItem()
rf.SetId(&rfID)
grf := mockGRF{rootFolder: rf}
makeMD := func() models.Driveable {
md := models.NewDrive()
md.SetId(&driveID)
md.SetName(&name)
return md
}
dp := &path.DrivePath{
DriveID: driveID,
Root: "root:",
Folders: path.Elements{},
}
oldDP := &path.DrivePath{
DriveID: oldID,
Root: "root:",
Folders: path.Elements{},
}
populatedCache := func(id string) *restoreCaches {
rc := NewRestoreCaches(nil)
di := driveInfo{
id: id,
name: name,
}
rc.DriveIDToDriveInfo[id] = di
rc.DriveNameToDriveInfo[name] = di
return rc
}
oldDriveIDNames := idname.NewCache(nil)
oldDriveIDNames.Add(oldID, name)
idSwitchedCache := func() *restoreCaches {
rc := NewRestoreCaches(oldDriveIDNames)
di := driveInfo{
id: "diff",
name: name,
}
rc.DriveIDToDriveInfo["diff"] = di
rc.DriveNameToDriveInfo[name] = di
return rc
}
table := []struct {
name string
dp *path.DrivePath
mock *mockPDAGRF
rc *restoreCaches
expectErr require.ErrorAssertionFunc
fallbackName string
expectName string
expectID string
skipValueChecks bool
}{
{
name: "drive already in cache",
dp: dp,
mock: &mockPDAGRF{
postResp: []models.Driveable{makeMD()},
postErr: []error{nil},
grf: grf,
},
rc: populatedCache(driveID),
expectErr: require.NoError,
fallbackName: name,
expectName: name,
expectID: driveID,
},
{
name: "drive with same name but different id exists",
dp: oldDP,
mock: &mockPDAGRF{
postResp: []models.Driveable{makeMD()},
postErr: []error{nil},
grf: grf,
},
rc: idSwitchedCache(),
expectErr: require.NoError,
fallbackName: otherName,
expectName: name,
expectID: "diff",
},
{
name: "drive created with old name",
dp: oldDP,
mock: &mockPDAGRF{
postResp: []models.Driveable{makeMD()},
postErr: []error{nil},
grf: grf,
},
rc: NewRestoreCaches(oldDriveIDNames),
expectErr: require.NoError,
fallbackName: otherName,
expectName: name,
expectID: driveID,
},
{
name: "drive created with fallback name",
dp: dp,
mock: &mockPDAGRF{
postResp: []models.Driveable{makeMD()},
postErr: []error{nil},
grf: grf,
},
rc: NewRestoreCaches(nil),
expectErr: require.NoError,
fallbackName: otherName,
expectName: otherName,
expectID: driveID,
},
{
name: "error creating drive",
dp: dp,
mock: &mockPDAGRF{
postResp: []models.Driveable{nil},
postErr: []error{assert.AnError},
grf: grf,
},
rc: NewRestoreCaches(nil),
expectErr: require.Error,
fallbackName: name,
expectName: "",
skipValueChecks: true,
expectID: driveID,
},
{
name: "drive name already exists",
dp: dp,
mock: &mockPDAGRF{
postResp: []models.Driveable{makeMD()},
postErr: []error{nil},
grf: grf,
},
rc: populatedCache("beaux"),
expectErr: require.NoError,
fallbackName: name,
expectName: name,
expectID: driveID,
},
{
name: "list with name already exists",
dp: dp,
mock: &mockPDAGRF{
postResp: []models.Driveable{nil, makeMD()},
postErr: []error{graph.ErrItemAlreadyExistsConflict, nil},
grf: grf,
},
rc: NewRestoreCaches(nil),
expectErr: require.NoError,
fallbackName: name,
expectName: name + " 1",
expectID: driveID,
},
{
name: "list with old name already exists",
dp: oldDP,
mock: &mockPDAGRF{
postResp: []models.Driveable{nil, makeMD()},
postErr: []error{graph.ErrItemAlreadyExistsConflict, nil},
grf: grf,
},
rc: NewRestoreCaches(oldDriveIDNames),
expectErr: require.NoError,
fallbackName: name,
expectName: name + " 1",
expectID: driveID,
},
{
name: "drive and list with name already exist",
dp: dp,
mock: &mockPDAGRF{
postResp: []models.Driveable{nil, makeMD()},
postErr: []error{graph.ErrItemAlreadyExistsConflict, nil},
grf: grf,
},
rc: populatedCache(driveID),
expectErr: require.NoError,
fallbackName: name,
expectName: name,
expectID: driveID,
},
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
rc := test.rc
di, err := ensureDriveExists(
ctx,
test.mock,
rc,
test.dp,
"prID",
test.fallbackName)
test.expectErr(t, err, clues.ToCore(err))
if !test.skipValueChecks {
assert.Equal(t, test.expectName, di.name, "ensured drive has expected name")
assert.Equal(t, test.expectID, di.id, "ensured drive has expected id")
nameResult := rc.DriveNameToDriveInfo[test.expectName]
assert.Equal(t, test.expectName, nameResult.name, "found drive entry with expected name")
}
})
}
}

View File

@ -38,6 +38,10 @@ func (ctrl *Controller) ConsumeRestoreCollections(
ctx = graph.BindRateLimiterConfig(ctx, graph.LimiterCfg{Service: sels.PathService()}) ctx = graph.BindRateLimiterConfig(ctx, graph.LimiterCfg{Service: sels.PathService()})
ctx = clues.Add(ctx, "restore_config", restoreCfg) // TODO(rkeepers): needs PII control ctx = clues.Add(ctx, "restore_config", restoreCfg) // TODO(rkeepers): needs PII control
if len(dcs) == 0 {
return nil, clues.New("no data collections to restore")
}
var ( var (
status *support.ControllerOperationStatus status *support.ControllerOperationStatus
deets = &details.Builder{} deets = &details.Builder{}
@ -54,6 +58,7 @@ func (ctrl *Controller) ConsumeRestoreCollections(
backupVersion, backupVersion,
restoreCfg, restoreCfg,
opts, opts,
ctrl.backupDriveIDNames,
dcs, dcs,
deets, deets,
errs, errs,
@ -65,6 +70,7 @@ func (ctrl *Controller) ConsumeRestoreCollections(
ctrl.AC, ctrl.AC,
restoreCfg, restoreCfg,
opts, opts,
ctrl.backupDriveIDNames,
dcs, dcs,
deets, deets,
errs, errs,

View File

@ -157,11 +157,25 @@ func (h libraryBackupHandler) IncludesDir(dir string) bool {
var _ onedrive.RestoreHandler = &libraryRestoreHandler{} var _ onedrive.RestoreHandler = &libraryRestoreHandler{}
type libraryRestoreHandler struct { type libraryRestoreHandler struct {
ac api.Drives ac api.Client
}
func (h libraryRestoreHandler) PostDrive(
ctx context.Context,
siteID, driveName string,
) (models.Driveable, error) {
return h.ac.Lists().PostDrive(ctx, siteID, driveName)
} }
func NewRestoreHandler(ac api.Client) *libraryRestoreHandler { func NewRestoreHandler(ac api.Client) *libraryRestoreHandler {
return &libraryRestoreHandler{ac.Drives()} return &libraryRestoreHandler{ac}
}
func (h libraryRestoreHandler) NewDrivePager(
resourceOwner string,
fields []string,
) api.DrivePager {
return h.ac.Drives().NewSiteDrivePager(resourceOwner, fields)
} }
func (h libraryRestoreHandler) AugmentItemInfo( func (h libraryRestoreHandler) AugmentItemInfo(
@ -177,21 +191,21 @@ func (h libraryRestoreHandler) DeleteItem(
ctx context.Context, ctx context.Context,
driveID, itemID string, driveID, itemID string,
) error { ) error {
return h.ac.DeleteItem(ctx, driveID, itemID) return h.ac.Drives().DeleteItem(ctx, driveID, itemID)
} }
func (h libraryRestoreHandler) DeleteItemPermission( func (h libraryRestoreHandler) DeleteItemPermission(
ctx context.Context, ctx context.Context,
driveID, itemID, permissionID string, driveID, itemID, permissionID string,
) error { ) error {
return h.ac.DeleteItemPermission(ctx, driveID, itemID, permissionID) return h.ac.Drives().DeleteItemPermission(ctx, driveID, itemID, permissionID)
} }
func (h libraryRestoreHandler) GetItemsInContainerByCollisionKey( func (h libraryRestoreHandler) GetItemsInContainerByCollisionKey(
ctx context.Context, ctx context.Context,
driveID, containerID string, driveID, containerID string,
) (map[string]api.DriveItemIDType, error) { ) (map[string]api.DriveItemIDType, error) {
m, err := h.ac.GetItemsInContainerByCollisionKey(ctx, driveID, containerID) m, err := h.ac.Drives().GetItemsInContainerByCollisionKey(ctx, driveID, containerID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -203,7 +217,7 @@ func (h libraryRestoreHandler) NewItemContentUpload(
ctx context.Context, ctx context.Context,
driveID, itemID string, driveID, itemID string,
) (models.UploadSessionable, error) { ) (models.UploadSessionable, error) {
return h.ac.NewItemContentUpload(ctx, driveID, itemID) return h.ac.Drives().NewItemContentUpload(ctx, driveID, itemID)
} }
func (h libraryRestoreHandler) PostItemPermissionUpdate( func (h libraryRestoreHandler) PostItemPermissionUpdate(
@ -211,7 +225,7 @@ func (h libraryRestoreHandler) PostItemPermissionUpdate(
driveID, itemID string, driveID, itemID string,
body *drives.ItemItemsItemInvitePostRequestBody, body *drives.ItemItemsItemInvitePostRequestBody,
) (drives.ItemItemsItemInviteResponseable, error) { ) (drives.ItemItemsItemInviteResponseable, error) {
return h.ac.PostItemPermissionUpdate(ctx, driveID, itemID, body) return h.ac.Drives().PostItemPermissionUpdate(ctx, driveID, itemID, body)
} }
func (h libraryRestoreHandler) PostItemLinkShareUpdate( func (h libraryRestoreHandler) PostItemLinkShareUpdate(
@ -219,7 +233,7 @@ func (h libraryRestoreHandler) PostItemLinkShareUpdate(
driveID, itemID string, driveID, itemID string,
body *drives.ItemItemsItemCreateLinkPostRequestBody, body *drives.ItemItemsItemCreateLinkPostRequestBody,
) (models.Permissionable, error) { ) (models.Permissionable, error) {
return h.ac.PostItemLinkShareUpdate(ctx, driveID, itemID, body) return h.ac.Drives().PostItemLinkShareUpdate(ctx, driveID, itemID, body)
} }
func (h libraryRestoreHandler) PostItemInContainer( func (h libraryRestoreHandler) PostItemInContainer(
@ -228,21 +242,21 @@ func (h libraryRestoreHandler) PostItemInContainer(
newItem models.DriveItemable, newItem models.DriveItemable,
onCollision control.CollisionPolicy, onCollision control.CollisionPolicy,
) (models.DriveItemable, error) { ) (models.DriveItemable, error) {
return h.ac.PostItemInContainer(ctx, driveID, parentFolderID, newItem, onCollision) return h.ac.Drives().PostItemInContainer(ctx, driveID, parentFolderID, newItem, onCollision)
} }
func (h libraryRestoreHandler) GetFolderByName( func (h libraryRestoreHandler) GetFolderByName(
ctx context.Context, ctx context.Context,
driveID, parentFolderID, folderName string, driveID, parentFolderID, folderName string,
) (models.DriveItemable, error) { ) (models.DriveItemable, error) {
return h.ac.GetFolderByName(ctx, driveID, parentFolderID, folderName) return h.ac.Drives().GetFolderByName(ctx, driveID, parentFolderID, folderName)
} }
func (h libraryRestoreHandler) GetRootFolder( func (h libraryRestoreHandler) GetRootFolder(
ctx context.Context, ctx context.Context,
driveID string, driveID string,
) (models.DriveItemable, error) { ) (models.DriveItemable, error) {
return h.ac.GetRootFolder(ctx, driveID) return h.ac.Drives().GetRootFolder(ctx, driveID)
} }
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------

View File

@ -10,6 +10,8 @@ import (
"github.com/alcionai/clues" "github.com/alcionai/clues"
"github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/alcionai/corso/src/internal/common/dttm"
"github.com/alcionai/corso/src/internal/common/idname"
"github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/diagnostics" "github.com/alcionai/corso/src/internal/diagnostics"
@ -33,17 +35,25 @@ func ConsumeRestoreCollections(
ac api.Client, ac api.Client,
restoreCfg control.RestoreConfig, restoreCfg control.RestoreConfig,
opts control.Options, opts control.Options,
backupDriveIDNames idname.Cacher,
dcs []data.RestoreCollection, dcs []data.RestoreCollection,
deets *details.Builder, deets *details.Builder,
errs *fault.Bus, errs *fault.Bus,
ctr *count.Bus, ctr *count.Bus,
) (*support.ControllerOperationStatus, error) { ) (*support.ControllerOperationStatus, error) {
var ( var (
restoreMetrics support.CollectionMetrics lrh = libraryRestoreHandler{ac}
caches = onedrive.NewRestoreCaches() protectedResourceID = dcs[0].FullPath().ResourceOwner()
el = errs.Local() restoreMetrics support.CollectionMetrics
caches = onedrive.NewRestoreCaches(backupDriveIDNames)
el = errs.Local()
) )
err := caches.Populate(ctx, lrh, protectedResourceID)
if err != nil {
return nil, clues.Wrap(err, "initializing restore caches")
}
// Reorder collections so that the parents directories are created // Reorder collections so that the parents directories are created
// before the child directories; a requirement for permissions. // before the child directories; a requirement for permissions.
data.SortRestoreCollections(dcs) data.SortRestoreCollections(dcs)
@ -69,13 +79,14 @@ func ConsumeRestoreCollections(
case path.LibrariesCategory: case path.LibrariesCategory:
metrics, err = onedrive.RestoreCollection( metrics, err = onedrive.RestoreCollection(
ictx, ictx,
libraryRestoreHandler{ac.Drives()}, lrh,
restoreCfg, restoreCfg,
backupVersion, backupVersion,
dc, dc,
caches, caches,
deets, deets,
opts.RestorePermissions, opts.RestorePermissions,
control.DefaultRestoreContainerName(dttm.HumanReadableDriveItem),
errs, errs,
ctr) ctr)

View File

@ -46,6 +46,17 @@ type (
) (*details.Details, error) ) (*details.Details, error)
Wait() *data.CollectionStats Wait() *data.CollectionStats
CacheItemInfoer
}
CacheItemInfoer interface {
// CacheItemInfo is used by the consumer to cache metadata that is
// sourced from per-item info, but may be valuable to the restore at
// large.
// Ex: pairing drive ids with drive names as they appeared at the time
// of backup.
CacheItemInfo(v details.ItemInfo)
} }
RepoMaintenancer interface { RepoMaintenancer interface {

View File

@ -219,7 +219,13 @@ func (op *RestoreOperation) do(
observe.Message(ctx, "Restoring", observe.Bullet, clues.Hide(bup.Selector.DiscreteOwner)) observe.Message(ctx, "Restoring", observe.Bullet, clues.Hide(bup.Selector.DiscreteOwner))
paths, err := formatDetailsForRestoration(ctx, bup.Version, op.Selectors, deets, op.Errors) paths, err := formatDetailsForRestoration(
ctx,
bup.Version,
op.Selectors,
deets,
op.rc,
op.Errors)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "formatting paths from details") return nil, clues.Wrap(err, "formatting paths from details")
} }
@ -359,6 +365,7 @@ func formatDetailsForRestoration(
backupVersion int, backupVersion int,
sel selectors.Selector, sel selectors.Selector,
deets *details.Details, deets *details.Details,
cii inject.CacheItemInfoer,
errs *fault.Bus, errs *fault.Bus,
) ([]path.RestorePaths, error) { ) ([]path.RestorePaths, error) {
fds, err := sel.Reduce(ctx, deets, errs) fds, err := sel.Reduce(ctx, deets, errs)
@ -366,6 +373,11 @@ func formatDetailsForRestoration(
return nil, err return nil, err
} }
// allow restore controllers to iterate over item metadata
for _, ent := range fds.Entries {
cii.CacheItemInfo(ent.ItemInfo)
}
paths, err := pathtransformer.GetPaths(ctx, backupVersion, fds.Items(), errs) paths, err := pathtransformer.GetPaths(ctx, backupVersion, fds.Items(), errs)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "getting restore paths") return nil, clues.Wrap(err, "getting restore paths")

View File

@ -52,8 +52,9 @@ type RestoreConfig struct {
// Defaults to "Corso_Restore_<current_dttm>" // Defaults to "Corso_Restore_<current_dttm>"
Location string Location string
// Drive specifies the drive into which the data will be restored. // Drive specifies the name of the drive into which the data will be
// If empty, data is restored to the same drive that was backed up. // restored. If empty, data is restored to the same drive that was backed
// up.
// Defaults to empty. // Defaults to empty.
Drive string Drive string
} }
@ -65,6 +66,10 @@ func DefaultRestoreConfig(timeFormat dttm.TimeFormat) RestoreConfig {
} }
} }
func DefaultRestoreContainerName(timeFormat dttm.TimeFormat) string {
return defaultRestoreLocation + dttm.FormatNow(timeFormat)
}
// EnsureRestoreConfigDefaults sets all non-supported values in the config // EnsureRestoreConfigDefaults sets all non-supported values in the config
// struct to the default value. // struct to the default value.
func EnsureRestoreConfigDefaults( func EnsureRestoreConfigDefaults(

View File

@ -0,0 +1,64 @@
package api
import (
"context"
"github.com/alcionai/clues"
"github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/m365/graph"
)
// ---------------------------------------------------------------------------
// controller
// ---------------------------------------------------------------------------
func (c Client) Lists() Lists {
return Lists{c}
}
// Lists is an interface-compliant provider of the client.
type Lists struct {
Client
}
// PostDrive creates a new list of type drive. Specifically used to create
// documentLibraries for SharePoint Sites.
func (c Lists) PostDrive(
ctx context.Context,
siteID, driveName string,
) (models.Driveable, error) {
list := models.NewList()
list.SetDisplayName(&driveName)
list.SetDescription(ptr.To("corso auto-generated restore destination"))
li := models.NewListInfo()
li.SetTemplate(ptr.To("documentLibrary"))
list.SetList(li)
// creating a list of type documentLibrary will result in the creation
// of a new drive owned by the given site.
builder := c.Stable.
Client().
Sites().
BySiteId(siteID).
Lists()
newList, err := builder.Post(ctx, list, nil)
if graph.IsErrItemAlreadyExistsConflict(err) {
return nil, clues.Stack(graph.ErrItemAlreadyExistsConflict, err).WithClues(ctx)
}
if err != nil {
return nil, graph.Wrap(ctx, err, "creating documentLibrary list")
}
// drive information is not returned by the list creation.
drive, err := builder.
ByListId(ptr.Val(newList.GetId())).
Drive().
Get(ctx, nil)
return drive, graph.Wrap(ctx, err, "fetching created documentLibrary").OrNil()
}

View File

@ -0,0 +1,57 @@
package api_test
import (
"testing"
"github.com/alcionai/clues"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/m365/graph"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/control/testdata"
)
type ListsAPIIntgSuite struct {
tester.Suite
its intgTesterSetup
}
func (suite *ListsAPIIntgSuite) SetupSuite() {
suite.its = newIntegrationTesterSetup(suite.T())
}
func TestListsAPIIntgSuite(t *testing.T) {
suite.Run(t, &ListsAPIIntgSuite{
Suite: tester.NewIntegrationSuite(
t,
[][]string{tconfig.M365AcctCredEnvs}),
})
}
func (suite *ListsAPIIntgSuite) TestLists_PostDrive() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
var (
acl = suite.its.ac.Lists()
driveName = testdata.DefaultRestoreConfig("list_api_post_drive").Location
siteID = suite.its.siteID
)
// first post, should have no errors
list, err := acl.PostDrive(ctx, siteID, driveName)
require.NoError(t, err, clues.ToCore(err))
// the site name cannot be set when posting, only its DisplayName.
// so we double check here that we're still getting the name we expect.
assert.Equal(t, driveName, ptr.Val(list.GetName()))
// second post, same name, should error on name conflict]
_, err = acl.PostDrive(ctx, siteID, driveName)
require.ErrorIs(t, err, graph.ErrItemAlreadyExistsConflict, clues.ToCore(err))
}

View File

@ -16,8 +16,6 @@ Below is a list of known Corso issues and limitations:
from M365 while a backup creation is running. from M365 while a backup creation is running.
The next backup creation will correct any missing data. The next backup creation will correct any missing data.
* SharePoint document library data can't be restored after the library has been deleted.
* Sharing information of items in OneDrive/SharePoint using sharing links aren't backed up and restored. * Sharing information of items in OneDrive/SharePoint using sharing links aren't backed up and restored.
* Permissions/Access given to a site group can't be restored. * Permissions/Access given to a site group can't be restored.