renaming/cleanup for sharepoint perms pt.1 (#3330)

No logic changes, just renaming and minor cleanup.

PRs to follow:
1. collect various maps in onedrive collections into a single cache.
2. logic changes and tests to produce sharepoint permissions backup/restore e2e.
3. extend permission identity-type retention.

---

#### Does this PR need a docs update or release note?

- [x]  No

#### Type of change

- [x] 🧹 Tech Debt/Cleanup

#### Issue(s)

* #3135

#### Test Plan

- [x]  Unit test
This commit is contained in:
Keepers 2023-05-12 15:23:52 -06:00 committed by GitHub
parent 695f8060da
commit 60f6d4a035
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 172 additions and 146 deletions

View File

@ -52,7 +52,7 @@ corso restore onedrive --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abc
# Restore the file with ID 98765abcdef along with its associated permissions # Restore the file with ID 98765abcdef along with its associated permissions
corso restore onedrive --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef --restore-permissions corso restore onedrive --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef --restore-permissions
# Restore files named "FY2021 Planning.xlsx in "Documents/Finance Reports" # Restore files named "FY2021 Planning.xlsx" in "Documents/Finance Reports"
corso restore onedrive --backup 1234abcd-12ab-cd34-56de-1234abcd \ corso restore onedrive --backup 1234abcd-12ab-cd34-56de-1234abcd \
--file "FY2021 Planning.xlsx" --folder "Documents/Finance Reports" --file "FY2021 Planning.xlsx" --folder "Documents/Finance Reports"

View File

@ -399,6 +399,10 @@ func (mw *MetricsMiddleware) Intercept(
status = "nil-resp" status = "nil-resp"
) )
if resp == nil {
return resp, err
}
if resp != nil { if resp != nil {
status = resp.Status status = resp.Status
} }
@ -410,11 +414,6 @@ func (mw *MetricsMiddleware) Intercept(
// track the graph "resource cost" for each call (if not provided, assume 1) // track the graph "resource cost" for each call (if not provided, assume 1)
// nil-pointer guard
if len(resp.Header) == 0 {
resp.Header = http.Header{}
}
// from msoft throttling documentation: // from msoft throttling documentation:
// x-ms-resource-unit - Indicates the resource unit used for this request. Values are positive integer // x-ms-resource-unit - Indicates the resource unit used for this request. Values are positive integer
xmru := resp.Header.Get(xmruHeader) xmru := resp.Header.Get(xmruHeader)

View File

@ -418,3 +418,23 @@ func GetFolderByName(
return foundItem, nil return foundItem, nil
} }
func PostItemPermissionUpdate(
ctx context.Context,
service graph.Servicer,
driveID, itemID string,
body *drive.ItemsItemInvitePostRequestBody,
) (drives.ItemItemsItemInviteResponseable, error) {
ctx = graph.ConsumeNTokens(ctx, graph.PermissionsLC)
itm, err := service.Client().
DrivesById(driveID).
ItemsById(itemID).
Invite().
Post(ctx, body, nil)
if err != nil {
return nil, graph.Wrap(ctx, err, "posting permissions")
}
return itm, nil
}

View File

@ -478,7 +478,7 @@ func (oc *Collection) populateItems(ctx context.Context, errs *fault.Bus) {
ctx = clues.Add( ctx = clues.Add(
ctx, ctx,
"item_id", itemID, "item_id", itemID,
"item_name", itemName, "item_name", clues.Hide(itemName),
"item_size", itemSize) "item_size", itemSize)
item.SetParentReference(setName(item.GetParentReference(), oc.driveName)) item.SetParentReference(setName(item.GetParentReference(), oc.driveName))

View File

@ -646,7 +646,7 @@ func (c *Collections) getCollectionPath(
if item.GetParentReference() == nil || if item.GetParentReference() == nil ||
item.GetParentReference().GetPath() == nil { item.GetParentReference().GetPath() == nil {
err := clues.New("no parent reference"). err := clues.New("no parent reference").
With("item_name", ptr.Val(item.GetName())) With("item_name", clues.Hide(ptr.Val(item.GetName())))
return nil, err return nil, err
} }
@ -711,7 +711,7 @@ func (c *Collections) UpdateCollections(
var ( var (
itemID = ptr.Val(item.GetId()) itemID = ptr.Val(item.GetId())
itemName = ptr.Val(item.GetName()) itemName = ptr.Val(item.GetName())
ictx = clues.Add(ctx, "item_id", itemID, "item_name", itemName) ictx = clues.Add(ctx, "item_id", itemID, "item_name", clues.Hide(itemName))
isFolder = item.GetFolder() != nil || item.GetPackage() != nil isFolder = item.GetFolder() != nil || item.GetPackage() != nil
) )
@ -758,7 +758,7 @@ func (c *Collections) UpdateCollections(
// Skip items that don't match the folder selectors we were given. // Skip items that don't match the folder selectors we were given.
if shouldSkipDrive(ctx, collectionPath, c.matcher, driveName) { if shouldSkipDrive(ctx, collectionPath, c.matcher, driveName) {
logger.Ctx(ictx).Debugw("Skipping drive path", "skipped_path", collectionPath.String()) logger.Ctx(ictx).Debugw("path not selected", "skipped_path", collectionPath.String())
continue continue
} }

View File

@ -335,8 +335,16 @@ func (suite *OneDriveIntgSuite) TestCreateGetDeleteFolder() {
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
restoreFolders := path.Builder{}.Append(folderElements...) restoreFolders := path.Builder{}.Append(folderElements...)
drivePath := path.DrivePath{
DriveID: driveID,
Root: "root:",
Folders: folderElements,
}
folderID, err := CreateRestoreFolders(ctx, gs, driveID, ptr.Val(rootFolder.GetId()), restoreFolders, NewFolderCache()) caches := NewRestoreCaches()
caches.DriveIDToRootFolderID[driveID] = ptr.Val(rootFolder.GetId())
folderID, err := CreateRestoreFolders(ctx, gs, &drivePath, restoreFolders, caches)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
folderIDs = append(folderIDs, folderID) folderIDs = append(folderIDs, folderID)
@ -344,7 +352,7 @@ func (suite *OneDriveIntgSuite) TestCreateGetDeleteFolder() {
folderName2 := "Corso_Folder_Test_" + dttm.FormatNow(dttm.SafeForTesting) folderName2 := "Corso_Folder_Test_" + dttm.FormatNow(dttm.SafeForTesting)
restoreFolders = restoreFolders.Append(folderName2) restoreFolders = restoreFolders.Append(folderName2)
folderID, err = CreateRestoreFolders(ctx, gs, driveID, ptr.Val(rootFolder.GetId()), restoreFolders, NewFolderCache()) folderID, err = CreateRestoreFolders(ctx, gs, &drivePath, restoreFolders, caches)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
folderIDs = append(folderIDs, folderID) folderIDs = append(folderIDs, folderID)

View File

@ -283,7 +283,7 @@ func TestItemUnitTestSuite(t *testing.T) {
suite.Run(t, &ItemUnitTestSuite{Suite: tester.NewUnitSuite(t)}) suite.Run(t, &ItemUnitTestSuite{Suite: tester.NewUnitSuite(t)})
} }
func (suite *ItemUnitTestSuite) TestOneDrivePermissionsFilter() { func (suite *ItemUnitTestSuite) TestDrivePermissionsFilter() {
permID := "fakePermId" permID := "fakePermId"
userID := "fakeuser@provider.com" userID := "fakeuser@provider.com"
userID2 := "fakeuser2@provider.com" userID2 := "fakeuser2@provider.com"

View File

@ -4,11 +4,12 @@ import (
"context" "context"
"github.com/alcionai/clues" "github.com/alcionai/clues"
msdrive "github.com/microsoftgraph/msgraph-sdk-go/drive" "github.com/microsoftgraph/msgraph-sdk-go/drive"
"github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/graph"
"github.com/alcionai/corso/src/internal/connector/onedrive/api"
"github.com/alcionai/corso/src/internal/connector/onedrive/metadata" "github.com/alcionai/corso/src/internal/connector/onedrive/metadata"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/version" "github.com/alcionai/corso/src/internal/version"
@ -18,9 +19,9 @@ import (
func getParentMetadata( func getParentMetadata(
parentPath path.Path, parentPath path.Path,
metas map[string]metadata.Metadata, parentDirToMeta map[string]metadata.Metadata,
) (metadata.Metadata, error) { ) (metadata.Metadata, error) {
parentMeta, ok := metas[parentPath.String()] parentMeta, ok := parentDirToMeta[parentPath.String()]
if !ok { if !ok {
drivePath, err := path.ToDrivePath(parentPath) drivePath, err := path.ToDrivePath(parentPath)
if err != nil { if err != nil {
@ -41,7 +42,7 @@ func getCollectionMetadata(
ctx context.Context, ctx context.Context,
drivePath *path.DrivePath, drivePath *path.DrivePath,
dc data.RestoreCollection, dc data.RestoreCollection,
metas map[string]metadata.Metadata, caches *restoreCaches,
backupVersion int, backupVersion int,
restorePerms bool, restorePerms bool,
) (metadata.Metadata, error) { ) (metadata.Metadata, error) {
@ -60,7 +61,7 @@ func getCollectionMetadata(
} }
if backupVersion < version.OneDrive4DirIncludesPermissions { if backupVersion < version.OneDrive4DirIncludesPermissions {
colMeta, err := getParentMetadata(collectionPath, metas) colMeta, err := getParentMetadata(collectionPath, caches.ParentDirToMeta)
if err != nil { if err != nil {
return metadata.Metadata{}, clues.Wrap(err, "collection metadata") return metadata.Metadata{}, clues.Wrap(err, "collection metadata")
} }
@ -85,12 +86,13 @@ func getCollectionMetadata(
} }
// computeParentPermissions computes the parent permissions by // computeParentPermissions computes the parent permissions by
// traversing folderMetas and finding the first item with custom // traversing parentMetas and finding the first item with custom
// permissions. folderMetas is expected to have all the parent // permissions. parentMetas is expected to have all the parent
// directory metas for this to work. // directory metas for this to work.
func computeParentPermissions( func computeParentPermissions(
itemPath path.Path, originDir path.Path,
folderMetas map[string]metadata.Metadata, // map parent dir -> parent's metadata
parentMetas map[string]metadata.Metadata,
) (metadata.Metadata, error) { ) (metadata.Metadata, error) {
var ( var (
parent path.Path parent path.Path
@ -100,7 +102,7 @@ func computeParentPermissions(
ok bool ok bool
) )
parent = itemPath parent = originDir
for { for {
parent, err = parent.Dir() parent, err = parent.Dir()
@ -110,14 +112,14 @@ func computeParentPermissions(
drivePath, err := path.ToDrivePath(parent) drivePath, err := path.ToDrivePath(parent)
if err != nil { if err != nil {
return metadata.Metadata{}, clues.New("get parent path") return metadata.Metadata{}, clues.New("transforming dir to drivePath")
} }
if len(drivePath.Folders) == 0 { if len(drivePath.Folders) == 0 {
return metadata.Metadata{}, nil return metadata.Metadata{}, nil
} }
meta, ok = folderMetas[parent.String()] meta, ok = parentMetas[parent.String()]
if !ok { if !ok {
return metadata.Metadata{}, clues.New("no parent meta") return metadata.Metadata{}, clues.New("no parent meta")
} }
@ -137,7 +139,7 @@ func UpdatePermissions(
driveID string, driveID string,
itemID string, itemID string,
permAdded, permRemoved []metadata.Permission, permAdded, permRemoved []metadata.Permission,
permissionIDMappings map[string]string, oldPermIDToNewID map[string]string,
) error { ) error {
// The ordering of the operations is important here. We first // The ordering of the operations is important here. We first
// remove all the removed permissions and then add the added ones. // remove all the removed permissions and then add the added ones.
@ -151,7 +153,7 @@ func UpdatePermissions(
return graph.Wrap(ctx, err, "creating delete client") return graph.Wrap(ctx, err, "creating delete client")
} }
pid, ok := permissionIDMappings[p.ID] pid, ok := oldPermIDToNewID[p.ID]
if !ok { if !ok {
return clues.New("no new permission id").WithClues(ctx) return clues.New("no new permission id").WithClues(ctx)
} }
@ -182,7 +184,7 @@ func UpdatePermissions(
continue continue
} }
pbody := msdrive.NewItemsItemInvitePostRequestBody() pbody := drive.NewItemsItemInvitePostRequestBody()
pbody.SetRoles(roles) pbody.SetRoles(roles)
if p.Expiration != nil { if p.Expiration != nil {
@ -207,16 +209,12 @@ func UpdatePermissions(
pbody.SetRecipients([]models.DriveRecipientable{rec}) pbody.SetRecipients([]models.DriveRecipientable{rec})
np, err := service.Client(). newPerm, err := api.PostItemPermissionUpdate(ctx, service, driveID, itemID, pbody)
DrivesById(driveID).
ItemsById(itemID).
Invite().
Post(graph.ConsumeNTokens(ctx, graph.PermissionsLC), pbody, nil)
if err != nil { if err != nil {
return graph.Wrap(ctx, err, "setting permissions") return clues.Stack(err)
} }
permissionIDMappings[p.ID] = ptr.Val(np.GetValue()[0].GetId()) oldPermIDToNewID[p.ID] = ptr.Val(newPerm.GetValue()[0].GetId())
} }
return nil return nil
@ -233,22 +231,29 @@ func RestorePermissions(
driveID string, driveID string,
itemID string, itemID string,
itemPath path.Path, itemPath path.Path,
meta metadata.Metadata, current metadata.Metadata,
folderMetas map[string]metadata.Metadata, caches *restoreCaches,
permissionIDMappings map[string]string,
) error { ) error {
if meta.SharingMode == metadata.SharingModeInherited { if current.SharingMode == metadata.SharingModeInherited {
return nil return nil
} }
ctx = clues.Add(ctx, "permission_item_id", itemID) ctx = clues.Add(ctx, "permission_item_id", itemID)
parentPermissions, err := computeParentPermissions(itemPath, folderMetas) parents, err := computeParentPermissions(itemPath, caches.ParentDirToMeta)
if err != nil { if err != nil {
return clues.Wrap(err, "parent permissions").WithClues(ctx) return clues.Wrap(err, "parent permissions").WithClues(ctx)
} }
permAdded, permRemoved := metadata.DiffPermissions(parentPermissions.Permissions, meta.Permissions) permAdded, permRemoved := metadata.DiffPermissions(parents.Permissions, current.Permissions)
return UpdatePermissions(ctx, creds, service, driveID, itemID, permAdded, permRemoved, permissionIDMappings) return UpdatePermissions(
ctx,
creds,
service,
driveID,
itemID,
permAdded,
permRemoved,
caches.OldPermIDToNewID)
} }

View File

@ -33,6 +33,22 @@ import (
// https://docs.microsoft.com/en-us/graph/api/driveitem-createuploadsession?view=graph-rest-1.0#best-practices // https://docs.microsoft.com/en-us/graph/api/driveitem-createuploadsession?view=graph-rest-1.0#best-practices
const copyBufferSize = 5 * 1024 * 1024 const copyBufferSize = 5 * 1024 * 1024
type restoreCaches struct {
Folders *folderCache
ParentDirToMeta map[string]metadata.Metadata
OldPermIDToNewID map[string]string
DriveIDToRootFolderID map[string]string
}
func NewRestoreCaches() *restoreCaches {
return &restoreCaches{
Folders: NewFolderCache(),
ParentDirToMeta: map[string]metadata.Metadata{},
OldPermIDToNewID: map[string]string{},
DriveIDToRootFolderID: map[string]string{},
}
}
// RestoreCollections will restore the specified data collections into OneDrive // RestoreCollections will restore the specified data collections into OneDrive
func RestoreCollections( func RestoreCollections(
ctx context.Context, ctx context.Context,
@ -47,14 +63,8 @@ func RestoreCollections(
) (*support.ConnectorOperationStatus, error) { ) (*support.ConnectorOperationStatus, error) {
var ( var (
restoreMetrics support.CollectionMetrics restoreMetrics support.CollectionMetrics
metrics support.CollectionMetrics caches = NewRestoreCaches()
folderMetas = map[string]metadata.Metadata{} el = errs.Local()
// permissionIDMappings is used to map between old and new id
// of permissions as we restore them
permissionIDMappings = map[string]string{}
fc = NewFolderCache()
rootIDCache = map[string]string{}
) )
ctx = clues.Add( ctx = clues.Add(
@ -68,8 +78,6 @@ func RestoreCollections(
return dcs[i].FullPath().String() < dcs[j].FullPath().String() return dcs[i].FullPath().String() < dcs[j].FullPath().String()
}) })
el := errs.Local()
// Iterate through the data collections and restore the contents of each // Iterate through the data collections and restore the contents of each
for _, dc := range dcs { for _, dc := range dcs {
if el.Failure() != nil { if el.Failure() != nil {
@ -77,8 +85,9 @@ func RestoreCollections(
} }
var ( var (
err error err error
ictx = clues.Add( metrics support.CollectionMetrics
ictx = clues.Add(
ctx, ctx,
"resource_owner", clues.Hide(dc.FullPath().ResourceOwner()), "resource_owner", clues.Hide(dc.FullPath().ResourceOwner()),
"category", dc.FullPath().Category(), "category", dc.FullPath().Category(),
@ -91,10 +100,7 @@ func RestoreCollections(
backupVersion, backupVersion,
service, service,
dc, dc,
folderMetas, caches,
permissionIDMappings,
fc,
rootIDCache,
OneDriveSource, OneDriveSource,
dest.ContainerName, dest.ContainerName,
deets, deets,
@ -132,10 +138,7 @@ func RestoreCollection(
backupVersion int, backupVersion int,
service graph.Servicer, service graph.Servicer,
dc data.RestoreCollection, dc data.RestoreCollection,
folderMetas map[string]metadata.Metadata, caches *restoreCaches,
permissionIDMappings map[string]string,
fc *folderCache,
rootIDCache map[string]string, // map of drive id -> root folder ID
source driveSource, source driveSource,
restoreContainerName string, restoreContainerName string,
deets *details.Builder, deets *details.Builder,
@ -157,17 +160,13 @@ func RestoreCollection(
return metrics, clues.Wrap(err, "creating drive path").WithClues(ctx) return metrics, clues.Wrap(err, "creating drive path").WithClues(ctx)
} }
if rootIDCache == nil { if _, ok := caches.DriveIDToRootFolderID[drivePath.DriveID]; !ok {
rootIDCache = map[string]string{}
}
if _, ok := rootIDCache[drivePath.DriveID]; !ok {
root, err := api.GetDriveRoot(ctx, service, drivePath.DriveID) root, err := api.GetDriveRoot(ctx, service, drivePath.DriveID)
if err != nil { if err != nil {
return metrics, clues.Wrap(err, "getting drive root id") return metrics, clues.Wrap(err, "getting drive root id")
} }
rootIDCache[drivePath.DriveID] = ptr.Val(root.GetId()) caches.DriveIDToRootFolderID[drivePath.DriveID] = ptr.Val(root.GetId())
} }
// Assemble folder hierarchy we're going to restore into (we recreate the folder hierarchy // Assemble folder hierarchy we're going to restore into (we recreate the folder hierarchy
@ -189,7 +188,7 @@ func RestoreCollection(
ctx, ctx,
drivePath, drivePath,
dc, dc,
folderMetas, caches,
backupVersion, backupVersion,
restorePerms) restorePerms)
if err != nil { if err != nil {
@ -202,19 +201,16 @@ func RestoreCollection(
creds, creds,
service, service,
drivePath, drivePath,
rootIDCache[drivePath.DriveID],
restoreFolderElements, restoreFolderElements,
dc.FullPath(), dc.FullPath(),
colMeta, colMeta,
folderMetas, caches,
fc,
permissionIDMappings,
restorePerms) restorePerms)
if err != nil { if err != nil {
return metrics, clues.Wrap(err, "creating folders for restore") return metrics, clues.Wrap(err, "creating folders for restore")
} }
folderMetas[dc.FullPath().String()] = colMeta caches.ParentDirToMeta[dc.FullPath().String()] = colMeta
items := dc.Items(ctx, errs) items := dc.Items(ctx, errs)
for { for {
@ -231,14 +227,16 @@ func RestoreCollection(
return metrics, nil return metrics, nil
} }
ictx := clues.Add(ctx, "restore_item_id", itemData.UUID())
itemPath, err := dc.FullPath().AppendItem(itemData.UUID()) itemPath, err := dc.FullPath().AppendItem(itemData.UUID())
if err != nil { if err != nil {
el.AddRecoverable(clues.Wrap(err, "appending item to full path").WithClues(ctx)) el.AddRecoverable(clues.Wrap(err, "appending item to full path").WithClues(ictx))
continue continue
} }
itemInfo, skipped, err := restoreItem( itemInfo, skipped, err := restoreItem(
ctx, ictx,
creds, creds,
dc, dc,
backupVersion, backupVersion,
@ -247,8 +245,7 @@ func RestoreCollection(
drivePath, drivePath,
restoreFolderID, restoreFolderID,
copyBuffer, copyBuffer,
folderMetas, caches,
permissionIDMappings,
restorePerms, restorePerms,
itemData, itemData,
itemPath) itemPath)
@ -265,7 +262,7 @@ func RestoreCollection(
} }
if skipped { if skipped {
logger.Ctx(ctx).With("item_path", itemPath).Debug("did not restore item") logger.Ctx(ictx).With("item_path", itemPath).Debug("did not restore item")
continue continue
} }
@ -276,7 +273,7 @@ func RestoreCollection(
itemInfo) itemInfo)
if err != nil { if err != nil {
// Not critical enough to need to stop restore operation. // Not critical enough to need to stop restore operation.
logger.CtxErr(ctx, err).Infow("adding restored item to details") logger.CtxErr(ictx, err).Infow("adding restored item to details")
} }
metrics.Successes++ metrics.Successes++
@ -298,8 +295,7 @@ func restoreItem(
drivePath *path.DrivePath, drivePath *path.DrivePath,
restoreFolderID string, restoreFolderID string,
copyBuffer []byte, copyBuffer []byte,
folderMetas map[string]metadata.Metadata, caches *restoreCaches,
permissionIDMappings map[string]string,
restorePerms bool, restorePerms bool,
itemData data.Stream, itemData data.Stream,
itemPath path.Path, itemPath path.Path,
@ -348,7 +344,7 @@ func restoreItem(
} }
trimmedPath := strings.TrimSuffix(itemPath.String(), metadata.DirMetaFileSuffix) trimmedPath := strings.TrimSuffix(itemPath.String(), metadata.DirMetaFileSuffix)
folderMetas[trimmedPath] = meta caches.ParentDirToMeta[trimmedPath] = meta
return details.ItemInfo{}, true, nil return details.ItemInfo{}, true, nil
} }
@ -366,8 +362,7 @@ func restoreItem(
restoreFolderID, restoreFolderID,
copyBuffer, copyBuffer,
restorePerms, restorePerms,
folderMetas, caches,
permissionIDMappings,
itemPath, itemPath,
itemData) itemData)
if err != nil { if err != nil {
@ -389,8 +384,7 @@ func restoreItem(
restoreFolderID, restoreFolderID,
copyBuffer, copyBuffer,
restorePerms, restorePerms,
folderMetas, caches,
permissionIDMappings,
itemPath, itemPath,
itemData) itemData)
if err != nil { if err != nil {
@ -439,8 +433,7 @@ func restoreV1File(
restoreFolderID string, restoreFolderID string,
copyBuffer []byte, copyBuffer []byte,
restorePerms bool, restorePerms bool,
folderMetas map[string]metadata.Metadata, caches *restoreCaches,
permissionIDMappings map[string]string,
itemPath path.Path, itemPath path.Path,
itemData data.Stream, itemData data.Stream,
) (details.ItemInfo, error) { ) (details.ItemInfo, error) {
@ -481,8 +474,7 @@ func restoreV1File(
itemID, itemID,
itemPath, itemPath,
meta, meta,
folderMetas, caches)
permissionIDMappings)
if err != nil { if err != nil {
return details.ItemInfo{}, clues.Wrap(err, "restoring item permissions") return details.ItemInfo{}, clues.Wrap(err, "restoring item permissions")
} }
@ -500,8 +492,7 @@ func restoreV6File(
restoreFolderID string, restoreFolderID string,
copyBuffer []byte, copyBuffer []byte,
restorePerms bool, restorePerms bool,
folderMetas map[string]metadata.Metadata, caches *restoreCaches,
permissionIDMappings map[string]string,
itemPath path.Path, itemPath path.Path,
itemData data.Stream, itemData data.Stream,
) (details.ItemInfo, error) { ) (details.ItemInfo, error) {
@ -515,6 +506,11 @@ func restoreV6File(
return details.ItemInfo{}, clues.Wrap(err, "restoring file") return details.ItemInfo{}, clues.Wrap(err, "restoring file")
} }
ctx = clues.Add(
ctx,
"count_perms", len(meta.Permissions),
"restore_item_name", clues.Hide(meta.FileName))
if err != nil { if err != nil {
return details.ItemInfo{}, clues.Wrap(err, "deserializing item metadata") return details.ItemInfo{}, clues.Wrap(err, "deserializing item metadata")
} }
@ -553,8 +549,7 @@ func restoreV6File(
itemID, itemID,
itemPath, itemPath,
meta, meta,
folderMetas, caches)
permissionIDMappings)
if err != nil { if err != nil {
return details.ItemInfo{}, clues.Wrap(err, "restoring item permissions") return details.ItemInfo{}, clues.Wrap(err, "restoring item permissions")
} }
@ -572,22 +567,18 @@ func createRestoreFoldersWithPermissions(
creds account.M365Config, creds account.M365Config,
service graph.Servicer, service graph.Servicer,
drivePath *path.DrivePath, drivePath *path.DrivePath,
driveRootID string,
restoreFolders *path.Builder, restoreFolders *path.Builder,
folderPath path.Path, folderPath path.Path,
folderMetadata metadata.Metadata, folderMetadata metadata.Metadata,
folderMetas map[string]metadata.Metadata, caches *restoreCaches,
fc *folderCache,
permissionIDMappings map[string]string,
restorePerms bool, restorePerms bool,
) (string, error) { ) (string, error) {
id, err := CreateRestoreFolders( id, err := CreateRestoreFolders(
ctx, ctx,
service, service,
drivePath.DriveID, drivePath,
driveRootID,
restoreFolders, restoreFolders,
fc) caches)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -609,8 +600,7 @@ func createRestoreFoldersWithPermissions(
id, id,
folderPath, folderPath,
folderMetadata, folderMetadata,
folderMetas, caches)
permissionIDMappings)
return id, err return id, err
} }
@ -621,16 +611,22 @@ func createRestoreFoldersWithPermissions(
func CreateRestoreFolders( func CreateRestoreFolders(
ctx context.Context, ctx context.Context,
service graph.Servicer, service graph.Servicer,
driveID, driveRootID string, drivePath *path.DrivePath,
restoreFolders *path.Builder, restoreDir *path.Builder,
fc *folderCache, caches *restoreCaches,
) (string, error) { ) (string, error) {
var ( var (
location = &path.Builder{} driveID = drivePath.DriveID
parentFolderID = driveRootID folders = restoreDir.Elements()
folders = restoreFolders.Elements() location = path.Builder{}.Append(driveID)
parentFolderID = caches.DriveIDToRootFolderID[drivePath.DriveID]
) )
ctx = clues.Add(
ctx,
"drive_id", drivePath.DriveID,
"root_folder_id", parentFolderID)
for _, folder := range folders { for _, folder := range folders {
location = location.Append(folder) location = location.Append(folder)
ictx := clues.Add( ictx := clues.Add(
@ -639,7 +635,7 @@ func CreateRestoreFolders(
"restore_folder_location", location, "restore_folder_location", location,
"parent_of_restore_folder", parentFolderID) "parent_of_restore_folder", parentFolderID)
if fl, ok := fc.get(location); ok { if fl, ok := caches.Folders.get(location); ok {
parentFolderID = ptr.Val(fl.GetId()) parentFolderID = ptr.Val(fl.GetId())
// folder was already created, move on to the child // folder was already created, move on to the child
continue continue
@ -647,27 +643,27 @@ func CreateRestoreFolders(
folderItem, err := api.GetFolderByName(ictx, service, driveID, parentFolderID, folder) folderItem, err := api.GetFolderByName(ictx, service, driveID, parentFolderID, folder)
if err != nil && !errors.Is(err, api.ErrFolderNotFound) { if err != nil && !errors.Is(err, api.ErrFolderNotFound) {
return "", clues.Wrap(err, "getting folder by display name").WithClues(ctx) return "", clues.Wrap(err, "getting folder by display name")
} }
// folder found, moving to next child // folder found, moving to next child
if err == nil { if err == nil {
parentFolderID = ptr.Val(folderItem.GetId()) parentFolderID = ptr.Val(folderItem.GetId())
fc.set(location, folderItem) caches.Folders.set(location, folderItem)
continue continue
} }
// create the folder if not found // create the folder if not found
folderItem, err = CreateItem(ctx, service, driveID, parentFolderID, newItem(folder, true)) folderItem, err = CreateItem(ictx, service, driveID, parentFolderID, newItem(folder, true))
if err != nil { if err != nil {
return "", clues.Wrap(err, "creating folder") return "", clues.Wrap(err, "creating folder")
} }
parentFolderID = ptr.Val(folderItem.GetId()) parentFolderID = ptr.Val(folderItem.GetId())
fc.set(location, folderItem) caches.Folders.set(location, folderItem)
logger.Ctx(ctx).Debug("resolved restore destination") logger.Ctx(ictx).Debug("resolved restore destination")
} }
return parentFolderID, nil return parentFolderID, nil
@ -686,10 +682,7 @@ func restoreData(
ctx, end := diagnostics.Span(ctx, "gc:oneDrive:restoreItem", diagnostics.Label("item_uuid", itemData.UUID())) ctx, end := diagnostics.Span(ctx, "gc:oneDrive:restoreItem", diagnostics.Label("item_uuid", itemData.UUID()))
defer end() defer end()
ctx = clues.Add(ctx, "item_name", itemData.UUID()) trace.Log(ctx, "gc:oneDrive:restoreItem", itemData.UUID())
itemName := itemData.UUID()
trace.Log(ctx, "gc:oneDrive:restoreItem", itemName)
// Get the stream size (needed to create the upload session) // Get the stream size (needed to create the upload session)
ss, ok := itemData.(data.StreamSize) ss, ok := itemData.(data.StreamSize)
@ -700,13 +693,13 @@ func restoreData(
// Create Item // Create Item
newItem, err := CreateItem(ctx, service, driveID, parentFolderID, newItem(name, false)) newItem, err := CreateItem(ctx, service, driveID, parentFolderID, newItem(name, false))
if err != nil { if err != nil {
return "", details.ItemInfo{}, clues.Wrap(err, "creating item") return "", details.ItemInfo{}, err
} }
// Get a drive item writer // Get a drive item writer
w, err := driveItemWriter(ctx, service, driveID, ptr.Val(newItem.GetId()), ss.Size()) w, err := driveItemWriter(ctx, service, driveID, ptr.Val(newItem.GetId()), ss.Size())
if err != nil { if err != nil {
return "", details.ItemInfo{}, clues.Wrap(err, "creating item writer") return "", details.ItemInfo{}, err
} }
iReader := itemData.ToReader() iReader := itemData.ToReader()

View File

@ -23,6 +23,8 @@ func (ms *MockGraphService) Adapter() *msgraphsdk.GraphRequestAdapter {
return nil return nil
} }
var _ graph.Servicer = &oneDriveService{}
// TODO(ashmrtn): Merge with similar structs in graph and exchange packages. // TODO(ashmrtn): Merge with similar structs in graph and exchange packages.
type oneDriveService struct { type oneDriveService struct {
client msgraphsdk.GraphServiceClient client msgraphsdk.GraphServiceClient

View File

@ -2,6 +2,7 @@ package sharepoint
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"io" "io"
"runtime/trace" "runtime/trace"
@ -12,7 +13,6 @@ import (
"github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/graph"
"github.com/alcionai/corso/src/internal/connector/onedrive" "github.com/alcionai/corso/src/internal/connector/onedrive"
"github.com/alcionai/corso/src/internal/connector/onedrive/metadata"
"github.com/alcionai/corso/src/internal/connector/sharepoint/api" "github.com/alcionai/corso/src/internal/connector/sharepoint/api"
"github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/connector/support"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
@ -51,20 +51,25 @@ func RestoreCollections(
errs *fault.Bus, errs *fault.Bus,
) (*support.ConnectorOperationStatus, error) { ) (*support.ConnectorOperationStatus, error) {
var ( var (
err error
restoreMetrics support.CollectionMetrics restoreMetrics support.CollectionMetrics
caches = onedrive.NewRestoreCaches()
el = errs.Local()
) )
// Iterate through the data collections and restore the contents of each // Iterate through the data collections and restore the contents of each
for _, dc := range dcs { for _, dc := range dcs {
if el.Failure() != nil {
break
}
var ( var (
err error
category = dc.FullPath().Category() category = dc.FullPath().Category()
metrics support.CollectionMetrics metrics support.CollectionMetrics
ictx = clues.Add(ctx, ictx = clues.Add(ctx,
"category", category, "category", category,
"destination", clues.Hide(dest.ContainerName), "destination", clues.Hide(dest.ContainerName),
"resource_owner", clues.Hide(dc.FullPath().ResourceOwner())) "resource_owner", clues.Hide(dc.FullPath().ResourceOwner()))
driveFolderCache = onedrive.NewFolderCache()
) )
switch dc.FullPath().Category() { switch dc.FullPath().Category() {
@ -75,10 +80,7 @@ func RestoreCollections(
backupVersion, backupVersion,
service, service,
dc, dc,
map[string]metadata.Metadata{}, // Currently permission data is not stored for sharepoint caches,
map[string]string{},
driveFolderCache,
nil,
onedrive.SharePointSource, onedrive.SharePointSource,
dest.ContainerName, dest.ContainerName,
deets, deets,
@ -110,6 +112,10 @@ func RestoreCollections(
restoreMetrics = support.CombineMetrics(restoreMetrics, metrics) restoreMetrics = support.CombineMetrics(restoreMetrics, metrics)
if err != nil { if err != nil {
el.AddRecoverable(err)
}
if errors.Is(err, context.Canceled) {
break break
} }
} }
@ -121,7 +127,7 @@ func RestoreCollections(
restoreMetrics, restoreMetrics,
dest.ContainerName) dest.ContainerName)
return status, err return status, el.Failure()
} }
// restoreListItem utility function restores a List to the siteID. // restoreListItem utility function restores a List to the siteID.

View File

@ -199,9 +199,7 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
op.Results.BackupID) op.Results.BackupID)
if err != nil { if err != nil {
// No return here! We continue down to persistResults, even in case of failure. // No return here! We continue down to persistResults, even in case of failure.
logger.Ctx(ctx). logger.CtxErr(ctx, err).Error("running backup")
With("err", err).
Errorw("running backup", clues.InErr(err).Slice()...)
op.Errors.Fail(clues.Wrap(err, "running backup")) op.Errors.Fail(clues.Wrap(err, "running backup"))
} }

View File

@ -1605,9 +1605,8 @@ func runDriveIncrementalTest(
*newFile.GetId(), *newFile.GetId(),
[]metadata.Permission{}, []metadata.Permission{},
[]metadata.Permission{writePerm}, []metadata.Permission{writePerm},
permissionIDMappings, permissionIDMappings)
) require.NoErrorf(t, err, "removing permission from file %v", clues.ToCore(err))
require.NoErrorf(t, err, "adding permission to file %v", clues.ToCore(err))
// no expectedDeets: metadata isn't tracked // no expectedDeets: metadata isn't tracked
}, },
itemsRead: 1, // .data file for newitem itemsRead: 1, // .data file for newitem
@ -1629,10 +1628,9 @@ func runDriveIncrementalTest(
targetContainer, targetContainer,
[]metadata.Permission{writePerm}, []metadata.Permission{writePerm},
[]metadata.Permission{}, []metadata.Permission{},
permissionIDMappings, permissionIDMappings)
) require.NoErrorf(t, err, "adding permission to container %v", clues.ToCore(err))
require.NoErrorf(t, err, "adding permission to file %v", clues.ToCore(err)) // no expectedDeets: metadata isn't tracked
// no expectedDeets: metadata isn't tracked5tgb
}, },
itemsRead: 0, itemsRead: 0,
itemsWritten: 1, // .dirmeta for collection itemsWritten: 1, // .dirmeta for collection
@ -1653,9 +1651,8 @@ func runDriveIncrementalTest(
targetContainer, targetContainer,
[]metadata.Permission{}, []metadata.Permission{},
[]metadata.Permission{writePerm}, []metadata.Permission{writePerm},
permissionIDMappings, permissionIDMappings)
) require.NoErrorf(t, err, "removing permission from container %v", clues.ToCore(err))
require.NoErrorf(t, err, "adding permission to file %v", clues.ToCore(err))
// no expectedDeets: metadata isn't tracked // no expectedDeets: metadata isn't tracked
}, },
itemsRead: 0, itemsRead: 0,

View File

@ -166,9 +166,7 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De
deets, err := op.do(ctx, &opStats, sstore, start) deets, err := op.do(ctx, &opStats, sstore, start)
if err != nil { if err != nil {
// No return here! We continue down to persistResults, even in case of failure. // No return here! We continue down to persistResults, even in case of failure.
logger.Ctx(ctx). logger.CtxErr(ctx, err).Error("running restore")
With("err", err).
Errorw("running restore", clues.InErr(err).Slice()...)
op.Errors.Fail(clues.Wrap(err, "running restore")) op.Errors.Fail(clues.Wrap(err, "running restore"))
} }