From 9359679f9967b7f0eaae245d89c96f2a3727f79a Mon Sep 17 00:00:00 2001 From: Keepers Date: Thu, 20 Jul 2023 15:05:30 -0600 Subject: [PATCH] add api funcs for creating documentLibs (#3793) Adds api handlers for creating document libraries in sharepoint. This is the first step in allowing us to restore drives that were deleted between backup and restore. --- #### Does this PR need a docs update or release note? - [x] :clock1: Yes, but in a later PR #### Type of change - [x] :sunflower: Feature #### Issue(s) * #3562 #### Test Plan - [x] :zap: Unit test - [x] :green_heart: E2E --- CHANGELOG.md | 4 + src/internal/common/idname/idname.go | 33 +- src/internal/data/mock/collection.go | 48 ++ src/internal/m365/controller.go | 20 + src/internal/m365/controller_test.go | 90 +++- src/internal/m365/mock/connector.go | 2 + src/internal/m365/onedrive/handlers.go | 15 +- .../m365/onedrive/item_collector_test.go | 4 +- src/internal/m365/onedrive/item_handler.go | 14 + src/internal/m365/onedrive/mock/handlers.go | 16 + src/internal/m365/onedrive/restore.go | 214 ++++++++- src/internal/m365/onedrive/restore_test.go | 436 +++++++++++++++++- src/internal/m365/restore.go | 6 + .../m365/sharepoint/library_handler.go | 36 +- src/internal/m365/sharepoint/restore.go | 19 +- src/internal/operations/inject/inject.go | 11 + src/internal/operations/restore.go | 14 +- src/pkg/control/restore.go | 9 +- src/pkg/services/m365/api/lists.go | 64 +++ src/pkg/services/m365/api/lists_test.go | 57 +++ website/docs/support/known-issues.md | 2 - 21 files changed, 1051 insertions(+), 63 deletions(-) create mode 100644 src/pkg/services/m365/api/lists.go create mode 100644 src/pkg/services/m365/api/lists_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 44a5075af..296b01a85 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] (beta) +### Fixed +- SharePoint document libraries deleted after the last backup can now be restored. + ## [v0.11.1] (beta) - 2023-07-20 ### Fixed @@ -23,6 +26,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Fixed - Return a ServiceNotEnabled error when a tenant has no active SharePoint license. - Added retries for http/2 stream connection failures when downloading large item content. +- SharePoint document libraries that were deleted after the last backup can now be restored. ### Known issues - If a link share is created for an item with inheritance disabled diff --git a/src/internal/common/idname/idname.go b/src/internal/common/idname/idname.go index d56fab025..0367d954b 100644 --- a/src/internal/common/idname/idname.go +++ b/src/internal/common/idname/idname.go @@ -40,6 +40,11 @@ type Cacher interface { ProviderForName(id string) Provider } +type CacheBuilder interface { + Add(id, name string) + Cacher +} + var _ Cacher = &cache{} type cache struct { @@ -47,17 +52,29 @@ type cache struct { nameToID map[string]string } -func NewCache(idToName map[string]string) cache { - nti := make(map[string]string, len(idToName)) - - for id, name := range idToName { - nti[name] = id +func NewCache(idToName map[string]string) *cache { + c := cache{ + idToName: map[string]string{}, + nameToID: map[string]string{}, } - return cache{ - idToName: idToName, - nameToID: nti, + if len(idToName) > 0 { + nti := make(map[string]string, len(idToName)) + + for id, name := range idToName { + nti[name] = id + } + + c.idToName = idToName + c.nameToID = nti } + + return &c +} + +func (c *cache) Add(id, name string) { + c.idToName[strings.ToLower(id)] = name + c.nameToID[strings.ToLower(name)] = id } // IDOf returns the id associated with the given name. diff --git a/src/internal/data/mock/collection.go b/src/internal/data/mock/collection.go index 63f2b2dd8..55f291a7f 100644 --- a/src/internal/data/mock/collection.go +++ b/src/internal/data/mock/collection.go @@ -1,12 +1,24 @@ package mock import ( + "context" "io" "time" + "github.com/alcionai/clues" + + "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/path" ) +// --------------------------------------------------------------------------- +// stream +// --------------------------------------------------------------------------- + +var _ data.Stream = &Stream{} + type Stream struct { ID string Reader io.ReadCloser @@ -52,3 +64,39 @@ type errReader struct { func (er errReader) Read([]byte) (int, error) { return 0, er.readErr } + +// --------------------------------------------------------------------------- +// collection +// --------------------------------------------------------------------------- + +var ( + _ data.Collection = &Collection{} + _ data.BackupCollection = &Collection{} + _ data.RestoreCollection = &Collection{} +) + +type Collection struct{} + +func (c Collection) Items(ctx context.Context, errs *fault.Bus) <-chan data.Stream { + return nil +} + +func (c Collection) FullPath() path.Path { + return nil +} + +func (c Collection) PreviousPath() path.Path { + return nil +} + +func (c Collection) State() data.CollectionState { + return data.NewState +} + +func (c Collection) DoNotMergeItems() bool { + return true +} + +func (c Collection) FetchItemByName(ctx context.Context, name string) (data.Stream, error) { + return &Stream{}, clues.New("not implemented") +} diff --git a/src/internal/m365/controller.go b/src/internal/m365/controller.go index b0c8792e5..9b037350b 100644 --- a/src/internal/m365/controller.go +++ b/src/internal/m365/controller.go @@ -14,6 +14,7 @@ import ( "github.com/alcionai/corso/src/internal/m365/support" "github.com/alcionai/corso/src/internal/operations/inject" "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/services/m365/api" @@ -47,6 +48,11 @@ type Controller struct { // mutex used to synchronize updates to `status` mu sync.Mutex status support.ControllerOperationStatus // contains the status of the last run status + + // backupDriveIDNames is populated on restore. It maps the backup's + // drive names to their id. Primarily for use when creating or looking + // up a new drive. + backupDriveIDNames idname.CacheBuilder } func NewController( @@ -142,6 +148,20 @@ func (ctrl *Controller) incrementAwaitingMessages() { ctrl.wg.Add(1) } +func (ctrl *Controller) CacheItemInfo(dii details.ItemInfo) { + if ctrl.backupDriveIDNames == nil { + ctrl.backupDriveIDNames = idname.NewCache(map[string]string{}) + } + + if dii.SharePoint != nil { + ctrl.backupDriveIDNames.Add(dii.SharePoint.DriveID, dii.SharePoint.DriveName) + } + + if dii.OneDrive != nil { + ctrl.backupDriveIDNames.Add(dii.OneDrive.DriveID, dii.OneDrive.DriveName) + } +} + // --------------------------------------------------------------------------- // Resource Lookup Handling // --------------------------------------------------------------------------- diff --git a/src/internal/m365/controller_test.go b/src/internal/m365/controller_test.go index 6d04b7e9e..ef729493b 100644 --- a/src/internal/m365/controller_test.go +++ b/src/internal/m365/controller_test.go @@ -12,8 +12,10 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/alcionai/corso/src/internal/common/idname" inMock "github.com/alcionai/corso/src/internal/common/idname/mock" "github.com/alcionai/corso/src/internal/data" + dataMock "github.com/alcionai/corso/src/internal/data/mock" exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock" "github.com/alcionai/corso/src/internal/m365/mock" "github.com/alcionai/corso/src/internal/m365/resource" @@ -22,6 +24,7 @@ import ( "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/internal/version" + "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control/testdata" "github.com/alcionai/corso/src/pkg/count" @@ -260,6 +263,82 @@ func (suite *ControllerUnitSuite) TestController_Wait() { assert.Equal(t, int64(4), result.Bytes) } +func (suite *ControllerUnitSuite) TestController_CacheItemInfo() { + var ( + odid = "od-id" + odname = "od-name" + spid = "sp-id" + spname = "sp-name" + // intentionally declared outside the test loop + ctrl = &Controller{ + wg: &sync.WaitGroup{}, + region: &trace.Region{}, + backupDriveIDNames: idname.NewCache(nil), + } + ) + + table := []struct { + name string + service path.ServiceType + cat path.CategoryType + dii details.ItemInfo + expectID string + expectName string + }{ + { + name: "exchange", + dii: details.ItemInfo{ + Exchange: &details.ExchangeInfo{}, + }, + expectID: "", + expectName: "", + }, + { + name: "folder", + dii: details.ItemInfo{ + Folder: &details.FolderInfo{}, + }, + expectID: "", + expectName: "", + }, + { + name: "onedrive", + dii: details.ItemInfo{ + OneDrive: &details.OneDriveInfo{ + DriveID: odid, + DriveName: odname, + }, + }, + expectID: odid, + expectName: odname, + }, + { + name: "sharepoint", + dii: details.ItemInfo{ + SharePoint: &details.SharePointInfo{ + DriveID: spid, + DriveName: spname, + }, + }, + expectID: spid, + expectName: spname, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + ctrl.CacheItemInfo(test.dii) + + name, _ := ctrl.backupDriveIDNames.NameOf(test.expectID) + assert.Equal(t, test.expectName, name) + + id, _ := ctrl.backupDriveIDNames.IDOf(test.expectName) + assert.Equal(t, test.expectID, id) + }) + } +} + // --------------------------------------------------------------------------- // Integration tests // --------------------------------------------------------------------------- @@ -315,7 +394,7 @@ func (suite *ControllerIntegrationSuite) TestRestoreFailsBadService() { RestorePermissions: true, ToggleFeatures: control.Toggles{}, }, - nil, + []data.RestoreCollection{&dataMock.Collection{}}, fault.New(true), count.New()) assert.Error(t, err, clues.ToCore(err)) @@ -397,13 +476,8 @@ func (suite *ControllerIntegrationSuite) TestEmptyCollections() { test.col, fault.New(true), count.New()) - require.NoError(t, err, clues.ToCore(err)) - assert.NotNil(t, deets) - - stats := suite.ctrl.Wait() - assert.Zero(t, stats.Objects) - assert.Zero(t, stats.Folders) - assert.Zero(t, stats.Successes) + require.Error(t, err, clues.ToCore(err)) + assert.Nil(t, deets) }) } } diff --git a/src/internal/m365/mock/connector.go b/src/internal/m365/mock/connector.go index 05cb8e159..977306883 100644 --- a/src/internal/m365/mock/connector.go +++ b/src/internal/m365/mock/connector.go @@ -69,3 +69,5 @@ func (ctrl Controller) ConsumeRestoreCollections( ) (*details.Details, error) { return ctrl.Deets, ctrl.Err } + +func (ctrl Controller) CacheItemInfo(dii details.ItemInfo) {} diff --git a/src/internal/m365/onedrive/handlers.go b/src/internal/m365/onedrive/handlers.go index dfea5ee17..cb33b373d 100644 --- a/src/internal/m365/onedrive/handlers.go +++ b/src/internal/m365/onedrive/handlers.go @@ -35,6 +35,7 @@ type BackupHandler interface { api.Getter GetItemPermissioner GetItemer + NewDrivePagerer // PathPrefix constructs the service and category specific path prefix for // the given values. @@ -49,7 +50,6 @@ type BackupHandler interface { // ServiceCat returns the service and category used by this implementation. ServiceCat() (path.ServiceType, path.CategoryType) - NewDrivePager(resourceOwner string, fields []string) api.DrivePager NewItemPager(driveID, link string, fields []string) api.DriveItemDeltaEnumerator // FormatDisplayPath creates a human-readable string to represent the // provided path. @@ -61,6 +61,10 @@ type BackupHandler interface { IncludesDir(dir string) bool } +type NewDrivePagerer interface { + NewDrivePager(resourceOwner string, fields []string) api.DrivePager +} + type GetItemPermissioner interface { GetItemPermission( ctx context.Context, @@ -86,7 +90,9 @@ type RestoreHandler interface { GetItemsByCollisionKeyser GetRootFolderer ItemInfoAugmenter + NewDrivePagerer NewItemContentUploader + PostDriver PostItemInContainerer DeleteItemPermissioner UpdateItemPermissioner @@ -145,6 +151,13 @@ type UpdateItemLinkSharer interface { ) (models.Permissionable, error) } +type PostDriver interface { + PostDrive( + ctx context.Context, + protectedResourceID, driveName string, + ) (models.Driveable, error) +} + type PostItemInContainerer interface { PostItemInContainer( ctx context.Context, diff --git a/src/internal/m365/onedrive/item_collector_test.go b/src/internal/m365/onedrive/item_collector_test.go index 6e4a79be6..ec2ab26af 100644 --- a/src/internal/m365/onedrive/item_collector_test.go +++ b/src/internal/m365/onedrive/item_collector_test.go @@ -361,8 +361,8 @@ func (suite *OneDriveIntgSuite) TestCreateGetDeleteFolder() { Folders: folderElements, } - caches := NewRestoreCaches() - caches.DriveIDToRootFolderID[driveID] = ptr.Val(rootFolder.GetId()) + caches := NewRestoreCaches(nil) + caches.DriveIDToDriveInfo[driveID] = driveInfo{rootFolderID: ptr.Val(rootFolder.GetId())} rh := NewRestoreHandler(suite.ac) diff --git a/src/internal/m365/onedrive/item_handler.go b/src/internal/m365/onedrive/item_handler.go index 0b1420cf0..64701da8f 100644 --- a/src/internal/m365/onedrive/item_handler.go +++ b/src/internal/m365/onedrive/item_handler.go @@ -5,6 +5,7 @@ import ( "net/http" "strings" + "github.com/alcionai/clues" "github.com/microsoftgraph/msgraph-sdk-go/drives" "github.com/microsoftgraph/msgraph-sdk-go/models" @@ -133,6 +134,19 @@ func NewRestoreHandler(ac api.Client) *itemRestoreHandler { return &itemRestoreHandler{ac.Drives()} } +func (h itemRestoreHandler) PostDrive( + context.Context, + string, string, +) (models.Driveable, error) { + return nil, clues.New("creating drives in oneDrive is not supported") +} + +func (h itemRestoreHandler) NewDrivePager( + resourceOwner string, fields []string, +) api.DrivePager { + return h.ac.NewUserDrivePager(resourceOwner, fields) +} + // AugmentItemInfo will populate a details.OneDriveInfo struct // with properties from the drive item. ItemSize is specified // separately for restore processes because the local itemable diff --git a/src/internal/m365/onedrive/mock/handlers.go b/src/internal/m365/onedrive/mock/handlers.go index 92b4573e6..75dd3c3f1 100644 --- a/src/internal/m365/onedrive/mock/handlers.go +++ b/src/internal/m365/onedrive/mock/handlers.go @@ -249,9 +249,25 @@ type RestoreHandler struct { PostItemResp models.DriveItemable PostItemErr error + DrivePagerV api.DrivePager + + PostDriveResp models.Driveable + PostDriveErr error + UploadSessionErr error } +func (h RestoreHandler) PostDrive( + ctx context.Context, + protectedResourceID, driveName string, +) (models.Driveable, error) { + return h.PostDriveResp, h.PostDriveErr +} + +func (h RestoreHandler) NewDrivePager(string, []string) api.DrivePager { + return h.DrivePagerV +} + func (h *RestoreHandler) AugmentItemInfo( details.ItemInfo, models.DriveItemable, diff --git a/src/internal/m365/onedrive/restore.go b/src/internal/m365/onedrive/restore.go index 84b8f1cd0..f951419be 100644 --- a/src/internal/m365/onedrive/restore.go +++ b/src/internal/m365/onedrive/restore.go @@ -15,6 +15,7 @@ import ( "github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/pkg/errors" + "github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/diagnostics" @@ -37,9 +38,17 @@ const ( maxUploadRetries = 3 ) +type driveInfo struct { + id string + name string + rootFolderID string +} + type restoreCaches struct { + BackupDriveIDName idname.Cacher collisionKeyToItemID map[string]api.DriveItemIDType - DriveIDToRootFolderID map[string]string + DriveIDToDriveInfo map[string]driveInfo + DriveNameToDriveInfo map[string]driveInfo Folders *folderCache OldLinkShareIDToNewID map[string]string OldPermIDToNewID map[string]string @@ -48,10 +57,74 @@ type restoreCaches struct { pool sync.Pool } -func NewRestoreCaches() *restoreCaches { +func (rc *restoreCaches) AddDrive( + ctx context.Context, + md models.Driveable, + grf GetRootFolderer, +) error { + di := driveInfo{ + id: ptr.Val(md.GetId()), + name: ptr.Val(md.GetName()), + } + + ctx = clues.Add(ctx, "drive_info", di) + + root, err := grf.GetRootFolder(ctx, di.id) + if err != nil { + return clues.Wrap(err, "getting drive root id") + } + + di.rootFolderID = ptr.Val(root.GetId()) + + rc.DriveIDToDriveInfo[di.id] = di + rc.DriveNameToDriveInfo[di.name] = di + + return nil +} + +// Populate looks up drive items available to the protectedResource +// and adds their info to the caches. +func (rc *restoreCaches) Populate( + ctx context.Context, + gdparf GetDrivePagerAndRootFolderer, + protectedResourceID string, +) error { + drives, err := api.GetAllDrives( + ctx, + gdparf.NewDrivePager(protectedResourceID, nil), + true, + maxDrivesRetries) + if err != nil { + return clues.Wrap(err, "getting drives") + } + + for _, md := range drives { + if err := rc.AddDrive(ctx, md, gdparf); err != nil { + return clues.Wrap(err, "caching drive") + } + } + + return nil +} + +type GetDrivePagerAndRootFolderer interface { + GetRootFolderer + NewDrivePagerer +} + +func NewRestoreCaches( + backupDriveIDNames idname.Cacher, +) *restoreCaches { + // avoid nil panics + if backupDriveIDNames == nil { + backupDriveIDNames = idname.NewCache(nil) + } + return &restoreCaches{ + BackupDriveIDName: backupDriveIDNames, collisionKeyToItemID: map[string]api.DriveItemIDType{}, - DriveIDToRootFolderID: map[string]string{}, + DriveIDToDriveInfo: map[string]driveInfo{}, + DriveNameToDriveInfo: map[string]driveInfo{}, Folders: NewFolderCache(), OldLinkShareIDToNewID: map[string]string{}, OldPermIDToNewID: map[string]string{}, @@ -73,19 +146,27 @@ func ConsumeRestoreCollections( backupVersion int, restoreCfg control.RestoreConfig, opts control.Options, + backupDriveIDNames idname.Cacher, dcs []data.RestoreCollection, deets *details.Builder, errs *fault.Bus, ctr *count.Bus, ) (*support.ControllerOperationStatus, error) { var ( - restoreMetrics support.CollectionMetrics - caches = NewRestoreCaches() - el = errs.Local() + restoreMetrics support.CollectionMetrics + el = errs.Local() + caches = NewRestoreCaches(backupDriveIDNames) + protectedResourceID = dcs[0].FullPath().ResourceOwner() + fallbackDriveName = restoreCfg.Location ) ctx = clues.Add(ctx, "backup_version", backupVersion) + err := caches.Populate(ctx, rh, protectedResourceID) + if err != nil { + return nil, clues.Wrap(err, "initializing restore caches") + } + // Reorder collections so that the parents directories are created // before the child directories; a requirement for permissions. data.SortRestoreCollections(dcs) @@ -102,7 +183,7 @@ func ConsumeRestoreCollections( ictx = clues.Add( ctx, "category", dc.FullPath().Category(), - "resource_owner", clues.Hide(dc.FullPath().ResourceOwner()), + "resource_owner", clues.Hide(protectedResourceID), "full_path", dc.FullPath()) ) @@ -115,6 +196,7 @@ func ConsumeRestoreCollections( caches, deets, opts.RestorePermissions, + fallbackDriveName, errs, ctr.Local()) if err != nil { @@ -152,18 +234,20 @@ func RestoreCollection( caches *restoreCaches, deets *details.Builder, restorePerms bool, // TODD: move into restoreConfig + fallbackDriveName string, errs *fault.Bus, ctr *count.Bus, ) (support.CollectionMetrics, error) { var ( - metrics = support.CollectionMetrics{} - directory = dc.FullPath() - el = errs.Local() - metricsObjects int64 - metricsBytes int64 - metricsSuccess int64 - wg sync.WaitGroup - complete bool + metrics = support.CollectionMetrics{} + directory = dc.FullPath() + protectedResourceID = directory.ResourceOwner() + el = errs.Local() + metricsObjects int64 + metricsBytes int64 + metricsSuccess int64 + wg sync.WaitGroup + complete bool ) ctx, end := diagnostics.Span(ctx, "gc:drive:restoreCollection", diagnostics.Label("path", directory)) @@ -174,15 +258,23 @@ func RestoreCollection( return metrics, clues.Wrap(err, "creating drive path").WithClues(ctx) } - if _, ok := caches.DriveIDToRootFolderID[drivePath.DriveID]; !ok { - root, err := rh.GetRootFolder(ctx, drivePath.DriveID) - if err != nil { - return metrics, clues.Wrap(err, "getting drive root id") - } - - caches.DriveIDToRootFolderID[drivePath.DriveID] = ptr.Val(root.GetId()) + di, err := ensureDriveExists( + ctx, + rh, + caches, + drivePath, + protectedResourceID, + fallbackDriveName) + if err != nil { + return metrics, clues.Wrap(err, "ensuring drive exists") } + // clobber the drivePath details with the details retrieved + // in the ensure func, as they might have changed to reflect + // a different drive as a restore location. + drivePath.DriveID = di.id + drivePath.Root = di.rootFolderID + // Assemble folder hierarchy we're going to restore into (we recreate the folder hierarchy // from the backup under this the restore folder instead of root) // i.e. Restore into `/` @@ -704,7 +796,7 @@ func createRestoreFolders( driveID = drivePath.DriveID folders = restoreDir.Elements() location = path.Builder{}.Append(driveID) - parentFolderID = caches.DriveIDToRootFolderID[drivePath.DriveID] + parentFolderID = caches.DriveIDToDriveInfo[drivePath.DriveID].rootFolderID ) ctx = clues.Add( @@ -1113,3 +1205,79 @@ func AugmentRestorePaths( return paths, nil } + +type PostDriveAndGetRootFolderer interface { + PostDriver + GetRootFolderer +} + +// ensureDriveExists looks up the drive by its id. If no drive is found with +// that ID, a new drive is generated with the same name. If the name collides +// with an existing drive, a number is appended to the drive name. Eg: foo -> +// foo 1. This will repeat as many times as is needed. +// Returns the root folder of the drive +func ensureDriveExists( + ctx context.Context, + pdagrf PostDriveAndGetRootFolderer, + caches *restoreCaches, + drivePath *path.DrivePath, + protectedResourceID, fallbackDriveName string, +) (driveInfo, error) { + driveID := drivePath.DriveID + + // the drive might already be cached by ID. it's okay + // if the name has changed. the ID is a better reference + // anyway. + if di, ok := caches.DriveIDToDriveInfo[driveID]; ok { + return di, nil + } + + var ( + newDriveName = fallbackDriveName + newDrive models.Driveable + err error + ) + + // if the drive wasn't found by ID, maybe we can find a + // drive with the same name but different ID. + // start by looking up the old drive's name + oldName, ok := caches.BackupDriveIDName.NameOf(driveID) + if ok { + // check for drives that currently have the same name + if di, ok := caches.DriveNameToDriveInfo[oldName]; ok { + return di, nil + } + + // if no current drives have the same name, we'll make + // a new drive with that name. + newDriveName = oldName + } + + nextDriveName := newDriveName + + // For sharepoint, document libraries can collide by name with + // item types beyond just drive. Lists, for example, cannot share + // names with document libraries (they're the same type, actually). + // In those cases we need to rename the drive until we can create + // one without a collision. + for i := 1; ; i++ { + ictx := clues.Add(ctx, "new_drive_name", clues.Hide(nextDriveName)) + + newDrive, err = pdagrf.PostDrive(ictx, protectedResourceID, nextDriveName) + if err != nil && !errors.Is(err, graph.ErrItemAlreadyExistsConflict) { + return driveInfo{}, clues.Wrap(err, "creating new drive") + } + + if err == nil { + break + } + + nextDriveName = fmt.Sprintf("%s %d", newDriveName, i) + } + + if err := caches.AddDrive(ctx, newDrive, pdagrf); err != nil { + return driveInfo{}, clues.Wrap(err, "adding drive to cache").OrNil() + } + + return caches.DriveIDToDriveInfo[ptr.Val(newDrive.GetId())], nil +} diff --git a/src/internal/m365/onedrive/restore_test.go b/src/internal/m365/onedrive/restore_test.go index 4128661f5..dbb7317c9 100644 --- a/src/internal/m365/onedrive/restore_test.go +++ b/src/internal/m365/onedrive/restore_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/m365/graph" odConsts "github.com/alcionai/corso/src/internal/m365/onedrive/consts" @@ -21,6 +22,7 @@ import ( "github.com/alcionai/corso/src/pkg/count" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/services/m365/api" + apiMock "github.com/alcionai/corso/src/pkg/services/m365/api/mock" ) type RestoreUnitSuite struct { @@ -491,7 +493,7 @@ func (suite *RestoreUnitSuite) TestRestoreItem_collisionHandling() { mndi.SetId(ptr.To(mndiID)) var ( - caches = NewRestoreCaches() + caches = NewRestoreCaches(nil) rh = &mock.RestoreHandler{ PostItemResp: models.NewDriveItem(), DeleteItemErr: test.deleteErr, @@ -617,3 +619,435 @@ func (suite *RestoreUnitSuite) TestCreateFolder() { }) } } + +type mockGRF struct { + err error + rootFolder models.DriveItemable +} + +func (m *mockGRF) GetRootFolder( + context.Context, + string, +) (models.DriveItemable, error) { + return m.rootFolder, m.err +} + +func (suite *RestoreUnitSuite) TestRestoreCaches_AddDrive() { + rfID := "this-is-id" + driveID := "another-id" + name := "name" + + rf := models.NewDriveItem() + rf.SetId(&rfID) + + md := models.NewDrive() + md.SetId(&driveID) + md.SetName(&name) + + table := []struct { + name string + mock *mockGRF + expectErr require.ErrorAssertionFunc + expectID string + checkValues bool + }{ + { + name: "good", + mock: &mockGRF{rootFolder: rf}, + expectErr: require.NoError, + expectID: rfID, + checkValues: true, + }, + { + name: "err", + mock: &mockGRF{err: assert.AnError}, + expectErr: require.Error, + expectID: "", + }, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + rc := NewRestoreCaches(nil) + err := rc.AddDrive(ctx, md, test.mock) + test.expectErr(t, err, clues.ToCore(err)) + + if test.checkValues { + idResult := rc.DriveIDToDriveInfo[driveID] + assert.Equal(t, driveID, idResult.id, "drive id") + assert.Equal(t, name, idResult.name, "drive name") + assert.Equal(t, test.expectID, idResult.rootFolderID, "root folder id") + + nameResult := rc.DriveNameToDriveInfo[name] + assert.Equal(t, driveID, nameResult.id, "drive id") + assert.Equal(t, name, nameResult.name, "drive name") + assert.Equal(t, test.expectID, nameResult.rootFolderID, "root folder id") + } + }) + } +} + +type mockGDPARF struct { + err error + rootFolder models.DriveItemable + pager *apiMock.DrivePager +} + +func (m *mockGDPARF) GetRootFolder( + context.Context, + string, +) (models.DriveItemable, error) { + return m.rootFolder, m.err +} + +func (m *mockGDPARF) NewDrivePager( + string, + []string, +) api.DrivePager { + return m.pager +} + +func (suite *RestoreUnitSuite) TestRestoreCaches_Populate() { + rfID := "this-is-id" + driveID := "another-id" + name := "name" + + rf := models.NewDriveItem() + rf.SetId(&rfID) + + md := models.NewDrive() + md.SetId(&driveID) + md.SetName(&name) + + table := []struct { + name string + mock *apiMock.DrivePager + expectErr require.ErrorAssertionFunc + expectLen int + checkValues bool + }{ + { + name: "no results", + mock: &apiMock.DrivePager{ + ToReturn: []apiMock.PagerResult{ + {Drives: []models.Driveable{}}, + }, + }, + expectErr: require.NoError, + expectLen: 0, + }, + { + name: "one result", + mock: &apiMock.DrivePager{ + ToReturn: []apiMock.PagerResult{ + {Drives: []models.Driveable{md}}, + }, + }, + expectErr: require.NoError, + expectLen: 1, + checkValues: true, + }, + { + name: "error", + mock: &apiMock.DrivePager{ + ToReturn: []apiMock.PagerResult{ + {Err: assert.AnError}, + }, + }, + expectErr: require.Error, + expectLen: 0, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + gdparf := &mockGDPARF{ + rootFolder: rf, + pager: test.mock, + } + + rc := NewRestoreCaches(nil) + err := rc.Populate(ctx, gdparf, "shmoo") + test.expectErr(t, err, clues.ToCore(err)) + + assert.Len(t, rc.DriveIDToDriveInfo, test.expectLen) + assert.Len(t, rc.DriveNameToDriveInfo, test.expectLen) + + if test.checkValues { + idResult := rc.DriveIDToDriveInfo[driveID] + assert.Equal(t, driveID, idResult.id, "drive id") + assert.Equal(t, name, idResult.name, "drive name") + assert.Equal(t, rfID, idResult.rootFolderID, "root folder id") + + nameResult := rc.DriveNameToDriveInfo[name] + assert.Equal(t, driveID, nameResult.id, "drive id") + assert.Equal(t, name, nameResult.name, "drive name") + assert.Equal(t, rfID, nameResult.rootFolderID, "root folder id") + } + }) + } +} + +type mockPDAGRF struct { + i int + postResp []models.Driveable + postErr []error + + grf mockGRF +} + +func (m *mockPDAGRF) PostDrive( + ctx context.Context, + protectedResourceID, driveName string, +) (models.Driveable, error) { + defer func() { m.i++ }() + + md := m.postResp[m.i] + if md != nil { + md.SetName(&driveName) + } + + return md, m.postErr[m.i] +} + +func (m *mockPDAGRF) GetRootFolder( + ctx context.Context, + driveID string, +) (models.DriveItemable, error) { + return m.grf.rootFolder, m.grf.err +} + +func (suite *RestoreUnitSuite) TestEnsureDriveExists() { + rfID := "this-is-id" + driveID := "another-id" + oldID := "old-id" + name := "name" + otherName := "other name" + + rf := models.NewDriveItem() + rf.SetId(&rfID) + + grf := mockGRF{rootFolder: rf} + + makeMD := func() models.Driveable { + md := models.NewDrive() + md.SetId(&driveID) + md.SetName(&name) + + return md + } + + dp := &path.DrivePath{ + DriveID: driveID, + Root: "root:", + Folders: path.Elements{}, + } + + oldDP := &path.DrivePath{ + DriveID: oldID, + Root: "root:", + Folders: path.Elements{}, + } + + populatedCache := func(id string) *restoreCaches { + rc := NewRestoreCaches(nil) + di := driveInfo{ + id: id, + name: name, + } + rc.DriveIDToDriveInfo[id] = di + rc.DriveNameToDriveInfo[name] = di + + return rc + } + + oldDriveIDNames := idname.NewCache(nil) + oldDriveIDNames.Add(oldID, name) + + idSwitchedCache := func() *restoreCaches { + rc := NewRestoreCaches(oldDriveIDNames) + di := driveInfo{ + id: "diff", + name: name, + } + rc.DriveIDToDriveInfo["diff"] = di + rc.DriveNameToDriveInfo[name] = di + + return rc + } + + table := []struct { + name string + dp *path.DrivePath + mock *mockPDAGRF + rc *restoreCaches + expectErr require.ErrorAssertionFunc + fallbackName string + expectName string + expectID string + skipValueChecks bool + }{ + { + name: "drive already in cache", + dp: dp, + mock: &mockPDAGRF{ + postResp: []models.Driveable{makeMD()}, + postErr: []error{nil}, + grf: grf, + }, + rc: populatedCache(driveID), + expectErr: require.NoError, + fallbackName: name, + expectName: name, + expectID: driveID, + }, + { + name: "drive with same name but different id exists", + dp: oldDP, + mock: &mockPDAGRF{ + postResp: []models.Driveable{makeMD()}, + postErr: []error{nil}, + grf: grf, + }, + rc: idSwitchedCache(), + expectErr: require.NoError, + fallbackName: otherName, + expectName: name, + expectID: "diff", + }, + { + name: "drive created with old name", + dp: oldDP, + mock: &mockPDAGRF{ + postResp: []models.Driveable{makeMD()}, + postErr: []error{nil}, + grf: grf, + }, + rc: NewRestoreCaches(oldDriveIDNames), + expectErr: require.NoError, + fallbackName: otherName, + expectName: name, + expectID: driveID, + }, + { + name: "drive created with fallback name", + dp: dp, + mock: &mockPDAGRF{ + postResp: []models.Driveable{makeMD()}, + postErr: []error{nil}, + grf: grf, + }, + rc: NewRestoreCaches(nil), + expectErr: require.NoError, + fallbackName: otherName, + expectName: otherName, + expectID: driveID, + }, + { + name: "error creating drive", + dp: dp, + mock: &mockPDAGRF{ + postResp: []models.Driveable{nil}, + postErr: []error{assert.AnError}, + grf: grf, + }, + rc: NewRestoreCaches(nil), + expectErr: require.Error, + fallbackName: name, + expectName: "", + skipValueChecks: true, + expectID: driveID, + }, + { + name: "drive name already exists", + dp: dp, + mock: &mockPDAGRF{ + postResp: []models.Driveable{makeMD()}, + postErr: []error{nil}, + grf: grf, + }, + rc: populatedCache("beaux"), + expectErr: require.NoError, + fallbackName: name, + expectName: name, + expectID: driveID, + }, + { + name: "list with name already exists", + dp: dp, + mock: &mockPDAGRF{ + postResp: []models.Driveable{nil, makeMD()}, + postErr: []error{graph.ErrItemAlreadyExistsConflict, nil}, + grf: grf, + }, + rc: NewRestoreCaches(nil), + expectErr: require.NoError, + fallbackName: name, + expectName: name + " 1", + expectID: driveID, + }, + { + name: "list with old name already exists", + dp: oldDP, + mock: &mockPDAGRF{ + postResp: []models.Driveable{nil, makeMD()}, + postErr: []error{graph.ErrItemAlreadyExistsConflict, nil}, + grf: grf, + }, + rc: NewRestoreCaches(oldDriveIDNames), + expectErr: require.NoError, + fallbackName: name, + expectName: name + " 1", + expectID: driveID, + }, + { + name: "drive and list with name already exist", + dp: dp, + mock: &mockPDAGRF{ + postResp: []models.Driveable{nil, makeMD()}, + postErr: []error{graph.ErrItemAlreadyExistsConflict, nil}, + grf: grf, + }, + rc: populatedCache(driveID), + expectErr: require.NoError, + fallbackName: name, + expectName: name, + expectID: driveID, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + rc := test.rc + + di, err := ensureDriveExists( + ctx, + test.mock, + rc, + test.dp, + "prID", + test.fallbackName) + test.expectErr(t, err, clues.ToCore(err)) + + if !test.skipValueChecks { + assert.Equal(t, test.expectName, di.name, "ensured drive has expected name") + assert.Equal(t, test.expectID, di.id, "ensured drive has expected id") + + nameResult := rc.DriveNameToDriveInfo[test.expectName] + assert.Equal(t, test.expectName, nameResult.name, "found drive entry with expected name") + } + }) + } +} diff --git a/src/internal/m365/restore.go b/src/internal/m365/restore.go index 3c5e3e646..31e36e2bb 100644 --- a/src/internal/m365/restore.go +++ b/src/internal/m365/restore.go @@ -38,6 +38,10 @@ func (ctrl *Controller) ConsumeRestoreCollections( ctx = graph.BindRateLimiterConfig(ctx, graph.LimiterCfg{Service: sels.PathService()}) ctx = clues.Add(ctx, "restore_config", restoreCfg) // TODO(rkeepers): needs PII control + if len(dcs) == 0 { + return nil, clues.New("no data collections to restore") + } + var ( status *support.ControllerOperationStatus deets = &details.Builder{} @@ -54,6 +58,7 @@ func (ctrl *Controller) ConsumeRestoreCollections( backupVersion, restoreCfg, opts, + ctrl.backupDriveIDNames, dcs, deets, errs, @@ -65,6 +70,7 @@ func (ctrl *Controller) ConsumeRestoreCollections( ctrl.AC, restoreCfg, opts, + ctrl.backupDriveIDNames, dcs, deets, errs, diff --git a/src/internal/m365/sharepoint/library_handler.go b/src/internal/m365/sharepoint/library_handler.go index 07c997fcb..3f16c6eae 100644 --- a/src/internal/m365/sharepoint/library_handler.go +++ b/src/internal/m365/sharepoint/library_handler.go @@ -157,11 +157,25 @@ func (h libraryBackupHandler) IncludesDir(dir string) bool { var _ onedrive.RestoreHandler = &libraryRestoreHandler{} type libraryRestoreHandler struct { - ac api.Drives + ac api.Client +} + +func (h libraryRestoreHandler) PostDrive( + ctx context.Context, + siteID, driveName string, +) (models.Driveable, error) { + return h.ac.Lists().PostDrive(ctx, siteID, driveName) } func NewRestoreHandler(ac api.Client) *libraryRestoreHandler { - return &libraryRestoreHandler{ac.Drives()} + return &libraryRestoreHandler{ac} +} + +func (h libraryRestoreHandler) NewDrivePager( + resourceOwner string, + fields []string, +) api.DrivePager { + return h.ac.Drives().NewSiteDrivePager(resourceOwner, fields) } func (h libraryRestoreHandler) AugmentItemInfo( @@ -177,21 +191,21 @@ func (h libraryRestoreHandler) DeleteItem( ctx context.Context, driveID, itemID string, ) error { - return h.ac.DeleteItem(ctx, driveID, itemID) + return h.ac.Drives().DeleteItem(ctx, driveID, itemID) } func (h libraryRestoreHandler) DeleteItemPermission( ctx context.Context, driveID, itemID, permissionID string, ) error { - return h.ac.DeleteItemPermission(ctx, driveID, itemID, permissionID) + return h.ac.Drives().DeleteItemPermission(ctx, driveID, itemID, permissionID) } func (h libraryRestoreHandler) GetItemsInContainerByCollisionKey( ctx context.Context, driveID, containerID string, ) (map[string]api.DriveItemIDType, error) { - m, err := h.ac.GetItemsInContainerByCollisionKey(ctx, driveID, containerID) + m, err := h.ac.Drives().GetItemsInContainerByCollisionKey(ctx, driveID, containerID) if err != nil { return nil, err } @@ -203,7 +217,7 @@ func (h libraryRestoreHandler) NewItemContentUpload( ctx context.Context, driveID, itemID string, ) (models.UploadSessionable, error) { - return h.ac.NewItemContentUpload(ctx, driveID, itemID) + return h.ac.Drives().NewItemContentUpload(ctx, driveID, itemID) } func (h libraryRestoreHandler) PostItemPermissionUpdate( @@ -211,7 +225,7 @@ func (h libraryRestoreHandler) PostItemPermissionUpdate( driveID, itemID string, body *drives.ItemItemsItemInvitePostRequestBody, ) (drives.ItemItemsItemInviteResponseable, error) { - return h.ac.PostItemPermissionUpdate(ctx, driveID, itemID, body) + return h.ac.Drives().PostItemPermissionUpdate(ctx, driveID, itemID, body) } func (h libraryRestoreHandler) PostItemLinkShareUpdate( @@ -219,7 +233,7 @@ func (h libraryRestoreHandler) PostItemLinkShareUpdate( driveID, itemID string, body *drives.ItemItemsItemCreateLinkPostRequestBody, ) (models.Permissionable, error) { - return h.ac.PostItemLinkShareUpdate(ctx, driveID, itemID, body) + return h.ac.Drives().PostItemLinkShareUpdate(ctx, driveID, itemID, body) } func (h libraryRestoreHandler) PostItemInContainer( @@ -228,21 +242,21 @@ func (h libraryRestoreHandler) PostItemInContainer( newItem models.DriveItemable, onCollision control.CollisionPolicy, ) (models.DriveItemable, error) { - return h.ac.PostItemInContainer(ctx, driveID, parentFolderID, newItem, onCollision) + return h.ac.Drives().PostItemInContainer(ctx, driveID, parentFolderID, newItem, onCollision) } func (h libraryRestoreHandler) GetFolderByName( ctx context.Context, driveID, parentFolderID, folderName string, ) (models.DriveItemable, error) { - return h.ac.GetFolderByName(ctx, driveID, parentFolderID, folderName) + return h.ac.Drives().GetFolderByName(ctx, driveID, parentFolderID, folderName) } func (h libraryRestoreHandler) GetRootFolder( ctx context.Context, driveID string, ) (models.DriveItemable, error) { - return h.ac.GetRootFolder(ctx, driveID) + return h.ac.Drives().GetRootFolder(ctx, driveID) } // --------------------------------------------------------------------------- diff --git a/src/internal/m365/sharepoint/restore.go b/src/internal/m365/sharepoint/restore.go index 417d6d87c..c38b82e08 100644 --- a/src/internal/m365/sharepoint/restore.go +++ b/src/internal/m365/sharepoint/restore.go @@ -10,6 +10,8 @@ import ( "github.com/alcionai/clues" "github.com/microsoftgraph/msgraph-sdk-go/models" + "github.com/alcionai/corso/src/internal/common/dttm" + "github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/diagnostics" @@ -33,17 +35,25 @@ func ConsumeRestoreCollections( ac api.Client, restoreCfg control.RestoreConfig, opts control.Options, + backupDriveIDNames idname.Cacher, dcs []data.RestoreCollection, deets *details.Builder, errs *fault.Bus, ctr *count.Bus, ) (*support.ControllerOperationStatus, error) { var ( - restoreMetrics support.CollectionMetrics - caches = onedrive.NewRestoreCaches() - el = errs.Local() + lrh = libraryRestoreHandler{ac} + protectedResourceID = dcs[0].FullPath().ResourceOwner() + restoreMetrics support.CollectionMetrics + caches = onedrive.NewRestoreCaches(backupDriveIDNames) + el = errs.Local() ) + err := caches.Populate(ctx, lrh, protectedResourceID) + if err != nil { + return nil, clues.Wrap(err, "initializing restore caches") + } + // Reorder collections so that the parents directories are created // before the child directories; a requirement for permissions. data.SortRestoreCollections(dcs) @@ -69,13 +79,14 @@ func ConsumeRestoreCollections( case path.LibrariesCategory: metrics, err = onedrive.RestoreCollection( ictx, - libraryRestoreHandler{ac.Drives()}, + lrh, restoreCfg, backupVersion, dc, caches, deets, opts.RestorePermissions, + control.DefaultRestoreContainerName(dttm.HumanReadableDriveItem), errs, ctr) diff --git a/src/internal/operations/inject/inject.go b/src/internal/operations/inject/inject.go index ae2c8d534..912b46743 100644 --- a/src/internal/operations/inject/inject.go +++ b/src/internal/operations/inject/inject.go @@ -46,6 +46,17 @@ type ( ) (*details.Details, error) Wait() *data.CollectionStats + + CacheItemInfoer + } + + CacheItemInfoer interface { + // CacheItemInfo is used by the consumer to cache metadata that is + // sourced from per-item info, but may be valuable to the restore at + // large. + // Ex: pairing drive ids with drive names as they appeared at the time + // of backup. + CacheItemInfo(v details.ItemInfo) } RepoMaintenancer interface { diff --git a/src/internal/operations/restore.go b/src/internal/operations/restore.go index e77b1104b..0f853a853 100644 --- a/src/internal/operations/restore.go +++ b/src/internal/operations/restore.go @@ -219,7 +219,13 @@ func (op *RestoreOperation) do( observe.Message(ctx, "Restoring", observe.Bullet, clues.Hide(bup.Selector.DiscreteOwner)) - paths, err := formatDetailsForRestoration(ctx, bup.Version, op.Selectors, deets, op.Errors) + paths, err := formatDetailsForRestoration( + ctx, + bup.Version, + op.Selectors, + deets, + op.rc, + op.Errors) if err != nil { return nil, clues.Wrap(err, "formatting paths from details") } @@ -359,6 +365,7 @@ func formatDetailsForRestoration( backupVersion int, sel selectors.Selector, deets *details.Details, + cii inject.CacheItemInfoer, errs *fault.Bus, ) ([]path.RestorePaths, error) { fds, err := sel.Reduce(ctx, deets, errs) @@ -366,6 +373,11 @@ func formatDetailsForRestoration( return nil, err } + // allow restore controllers to iterate over item metadata + for _, ent := range fds.Entries { + cii.CacheItemInfo(ent.ItemInfo) + } + paths, err := pathtransformer.GetPaths(ctx, backupVersion, fds.Items(), errs) if err != nil { return nil, clues.Wrap(err, "getting restore paths") diff --git a/src/pkg/control/restore.go b/src/pkg/control/restore.go index 5fc5f7be8..2b4129d9f 100644 --- a/src/pkg/control/restore.go +++ b/src/pkg/control/restore.go @@ -52,8 +52,9 @@ type RestoreConfig struct { // Defaults to "Corso_Restore_" Location string - // Drive specifies the drive into which the data will be restored. - // If empty, data is restored to the same drive that was backed up. + // Drive specifies the name of the drive into which the data will be + // restored. If empty, data is restored to the same drive that was backed + // up. // Defaults to empty. Drive string } @@ -65,6 +66,10 @@ func DefaultRestoreConfig(timeFormat dttm.TimeFormat) RestoreConfig { } } +func DefaultRestoreContainerName(timeFormat dttm.TimeFormat) string { + return defaultRestoreLocation + dttm.FormatNow(timeFormat) +} + // EnsureRestoreConfigDefaults sets all non-supported values in the config // struct to the default value. func EnsureRestoreConfigDefaults( diff --git a/src/pkg/services/m365/api/lists.go b/src/pkg/services/m365/api/lists.go new file mode 100644 index 000000000..fb6abaa48 --- /dev/null +++ b/src/pkg/services/m365/api/lists.go @@ -0,0 +1,64 @@ +package api + +import ( + "context" + + "github.com/alcionai/clues" + "github.com/microsoftgraph/msgraph-sdk-go/models" + + "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/internal/m365/graph" +) + +// --------------------------------------------------------------------------- +// controller +// --------------------------------------------------------------------------- + +func (c Client) Lists() Lists { + return Lists{c} +} + +// Lists is an interface-compliant provider of the client. +type Lists struct { + Client +} + +// PostDrive creates a new list of type drive. Specifically used to create +// documentLibraries for SharePoint Sites. +func (c Lists) PostDrive( + ctx context.Context, + siteID, driveName string, +) (models.Driveable, error) { + list := models.NewList() + list.SetDisplayName(&driveName) + list.SetDescription(ptr.To("corso auto-generated restore destination")) + + li := models.NewListInfo() + li.SetTemplate(ptr.To("documentLibrary")) + list.SetList(li) + + // creating a list of type documentLibrary will result in the creation + // of a new drive owned by the given site. + builder := c.Stable. + Client(). + Sites(). + BySiteId(siteID). + Lists() + + newList, err := builder.Post(ctx, list, nil) + if graph.IsErrItemAlreadyExistsConflict(err) { + return nil, clues.Stack(graph.ErrItemAlreadyExistsConflict, err).WithClues(ctx) + } + + if err != nil { + return nil, graph.Wrap(ctx, err, "creating documentLibrary list") + } + + // drive information is not returned by the list creation. + drive, err := builder. + ByListId(ptr.Val(newList.GetId())). + Drive(). + Get(ctx, nil) + + return drive, graph.Wrap(ctx, err, "fetching created documentLibrary").OrNil() +} diff --git a/src/pkg/services/m365/api/lists_test.go b/src/pkg/services/m365/api/lists_test.go new file mode 100644 index 000000000..5864427f2 --- /dev/null +++ b/src/pkg/services/m365/api/lists_test.go @@ -0,0 +1,57 @@ +package api_test + +import ( + "testing" + + "github.com/alcionai/clues" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/internal/m365/graph" + "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/internal/tester/tconfig" + "github.com/alcionai/corso/src/pkg/control/testdata" +) + +type ListsAPIIntgSuite struct { + tester.Suite + its intgTesterSetup +} + +func (suite *ListsAPIIntgSuite) SetupSuite() { + suite.its = newIntegrationTesterSetup(suite.T()) +} + +func TestListsAPIIntgSuite(t *testing.T) { + suite.Run(t, &ListsAPIIntgSuite{ + Suite: tester.NewIntegrationSuite( + t, + [][]string{tconfig.M365AcctCredEnvs}), + }) +} + +func (suite *ListsAPIIntgSuite) TestLists_PostDrive() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + var ( + acl = suite.its.ac.Lists() + driveName = testdata.DefaultRestoreConfig("list_api_post_drive").Location + siteID = suite.its.siteID + ) + + // first post, should have no errors + list, err := acl.PostDrive(ctx, siteID, driveName) + require.NoError(t, err, clues.ToCore(err)) + // the site name cannot be set when posting, only its DisplayName. + // so we double check here that we're still getting the name we expect. + assert.Equal(t, driveName, ptr.Val(list.GetName())) + + // second post, same name, should error on name conflict] + _, err = acl.PostDrive(ctx, siteID, driveName) + require.ErrorIs(t, err, graph.ErrItemAlreadyExistsConflict, clues.ToCore(err)) +} diff --git a/website/docs/support/known-issues.md b/website/docs/support/known-issues.md index 754bddfb6..e6bc12809 100644 --- a/website/docs/support/known-issues.md +++ b/website/docs/support/known-issues.md @@ -16,8 +16,6 @@ Below is a list of known Corso issues and limitations: from M365 while a backup creation is running. The next backup creation will correct any missing data. -* SharePoint document library data can't be restored after the library has been deleted. - * Sharing information of items in OneDrive/SharePoint using sharing links aren't backed up and restored. * Permissions/Access given to a site group can't be restored.