logic change for sharepoint permission support (#3333)

adds logical changes required for sharepoint permission handling, both backup and restore.

---

#### Does this PR need a docs update or release note?

- [x]  Yes, it's included

#### Type of change

- [x] 🌻 Feature

#### Issue(s)

* #3135

#### Test Plan

- [x] 💪 Manual
- [x]  Unit test
- [x] 💚 E2E
This commit is contained in:
Keepers 2023-05-15 15:03:00 -06:00 committed by GitHub
parent 451041c147
commit 97ed97c1c3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 598 additions and 276 deletions

View File

@ -3,6 +3,7 @@ on:
push:
branches:
- main
- 3135-logic
workflow_dispatch:
inputs:
user:
@ -35,11 +36,12 @@ jobs:
CORSO_BUCKET: ${{ secrets.CI_TESTS_S3_BUCKET }}
CORSO_LOG_DIR: testlog
CORSO_LOG_FILE: testlog/testlogging.log
CORSO_PASSPHRASE: ${{ secrets.INTEGRATION_TEST_CORSO_PASSPHRASE }}
RESTORE_DEST_PFX: Corso_Test_Sanity_
TEST_RESULT: test_results
TEST_USER: ${{ github.event.inputs.user != '' && github.event.inputs.user || secrets.CORSO_M365_TEST_USER_ID }}
TEST_SITE: ${{ secrets.CORSO_M365_TEST_SITE_URL }}
SECONDARY_TEST_USER : ${{ secrets.CORSO_SECONDARY_M365_TEST_USER_ID }}
CORSO_PASSPHRASE: ${{ secrets.INTEGRATION_TEST_CORSO_PASSPHRASE }}
TEST_RESULT: test_results
# The default working directory doesn't seem to apply to things without
# the 'run' directive. https://stackoverflow.com/a/67845456
WORKING_DIR: src
@ -105,15 +107,11 @@ jobs:
# only runs if the test was successful
- name: Exchange - Create new data
working-directory: ./src/cmd/factory
env:
AZURE_CLIENT_ID: ${{ secrets[needs.SetM365App.outputs.client_id_env] }}
AZURE_CLIENT_SECRET: ${{ secrets[needs.SetM365App.outputs.client_secret_env] }}
AZURE_TENANT_ID: ${{ secrets.TENANT_ID }}
run: |
go run . exchange emails \
--user ${TEST_USER} \
--tenant ${{ env.AZURE_TENANT_ID }} \
--destination Corso_Test_Sanity_${{ steps.repo-init.outputs.result }} \
--tenant ${AZURE_TENANT_ID} \
--destination ${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }} \
--count 4
- name: Exchange - Backup
@ -123,8 +121,8 @@ jobs:
service: exchange
kind: backup
backup-args: '--mailbox "${TEST_USER}" --data "email"'
restore-args: '--email-folder Corso_Test_Sanity_${{ steps.repo-init.outputs.result }}'
test-folder: 'Corso_Test_Sanity_${{ steps.repo-init.outputs.result }}'
restore-args: '--email-folder ${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }}'
test-folder: '${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }}'
- name: Exchange - Incremental backup
id: exchange-backup-incremental
@ -133,8 +131,8 @@ jobs:
service: exchange
kind: backup-incremental
backup-args: '--mailbox "${TEST_USER}" --data "email"'
restore-args: '--email-folder Corso_Test_Sanity_${{ steps.repo-init.outputs.result }}'
test-folder: 'Corso_Test_Sanity_${{ steps.repo-init.outputs.result }}'
restore-args: '--email-folder ${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }}'
test-folder: '${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }}'
base-backup: ${{ steps.exchange-backup.outputs.backup-id }}
- name: Exchange - Non delta backup
@ -144,8 +142,8 @@ jobs:
service: exchange
kind: backup-non-delta
backup-args: '--mailbox "${TEST_USER}" --data "email" --disable-delta'
restore-args: '--email-folder Corso_Test_Sanity_${{ steps.repo-init.outputs.result }}'
test-folder: 'Corso_Test_Sanity_${{ steps.repo-init.outputs.result }}'
restore-args: '--email-folder ${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }}'
test-folder: '${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }}'
base-backup: ${{ steps.exchange-backup.outputs.backup-id }}
- name: Exchange - Incremental backup after non-delta
@ -155,8 +153,8 @@ jobs:
service: exchange
kind: backup-incremental-after-non-delta
backup-args: '--mailbox "${TEST_USER}" --data "email"'
restore-args: '--email-folder Corso_Test_Sanity_${{ steps.repo-init.outputs.result }}'
test-folder: 'Corso_Test_Sanity_${{ steps.repo-init.outputs.result }}'
restore-args: '--email-folder ${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }}'
test-folder: '${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }}'
base-backup: ${{ steps.exchange-backup.outputs.backup-id }}
@ -168,21 +166,17 @@ jobs:
- name: OneDrive - Create new data
id: new-data-creation-onedrive
working-directory: ./src/cmd/factory
env:
AZURE_CLIENT_ID: ${{ secrets[needs.SetM365App.outputs.client_id_env] }}
AZURE_CLIENT_SECRET: ${{ secrets[needs.SetM365App.outputs.client_secret_env] }}
AZURE_TENANT_ID: ${{ secrets.TENANT_ID }}
run: |
suffix=$(date +"%Y-%m-%d_%H-%M")
suffix=$(date +"%Y-%m-%d_%H-%M-%S")
go run . onedrive files \
--user ${TEST_USER} \
--secondaryuser ${{ env.SECONDARY_TEST_USER }} \
--tenant ${{ env.AZURE_TENANT_ID }} \
--destination Corso_Test_Sanity_$suffix \
--secondaryuser ${SECONDARY_TEST_USER} \
--tenant ${AZURE_TENANT_ID} \
--destination ${RESTORE_DEST_PFX}$suffix \
--count 4
echo result="$suffix" >> $GITHUB_OUTPUT
echo result="${suffix}" >> $GITHUB_OUTPUT
- name: OneDrive - Backup
id: onedrive-backup
@ -191,22 +185,18 @@ jobs:
service: onedrive
kind: backup
backup-args: '--user "${TEST_USER}"'
restore-args: '--folder Corso_Test_Sanity_${{ steps.new-data-creation-onedrive.outputs.result }} --restore-permissions'
test-folder: 'Corso_Test_Sanity_${{ steps.new-data-creation-onedrive.outputs.result }}'
restore-args: '--folder ${RESTORE_DEST_PFX}${{ steps.new-data-creation-onedrive.outputs.result }} --restore-permissions'
test-folder: '${RESTORE_DEST_PFX}${{ steps.new-data-creation-onedrive.outputs.result }}'
# generate some more enteries for incremental check
- name: OneDrive - Create new data (for incremental)
working-directory: ./src/cmd/factory
env:
AZURE_CLIENT_ID: ${{ secrets[needs.SetM365App.outputs.client_id_env] }}
AZURE_CLIENT_SECRET: ${{ secrets[needs.SetM365App.outputs.client_secret_env] }}
AZURE_TENANT_ID: ${{ secrets.TENANT_ID }}
run: |
go run . onedrive files \
--user ${TEST_USER} \
--secondaryuser ${{ env.SECONDARY_TEST_USER }} \
--tenant ${{ env.AZURE_TENANT_ID }} \
--destination Corso_Test_Sanity_${{ steps.new-data-creation-onedrive.outputs.result }} \
--secondaryuser ${SECONDARY_TEST_USER} \
--tenant ${AZURE_TENANT_ID} \
--destination ${RESTORE_DEST_PFX}${{ steps.new-data-creation-onedrive.outputs.result }} \
--count 4
- name: OneDrive - Incremental backup
@ -216,15 +206,29 @@ jobs:
service: onedrive
kind: incremental
backup-args: '--user "${TEST_USER}"'
restore-args: '--folder Corso_Test_Sanity_${{ steps.new-data-creation-onedrive.outputs.result }} --restore-permissions'
test-folder: 'Corso_Test_Sanity_${{ steps.new-data-creation-onedrive.outputs.result }}'
restore-args: '--folder ${RESTORE_DEST_PFX}${{ steps.new-data-creation-onedrive.outputs.result }} --restore-permissions'
test-folder: '${RESTORE_DEST_PFX}${{ steps.new-data-creation-onedrive.outputs.result }}'
##########################################################################################################################################
# Sharepoint
# TODO(keepers): generate new entries for test
# TODO: Add '--restore-permissions' when supported
# generate new entries for test
- name: SharePoint - Create new data
id: new-data-creation-sharepoint
working-directory: ./src/cmd/factory
run: |
suffix=$(date +"%Y-%m-%d_%H-%M-%S")
go run . sharepoint files \
--site ${TEST_SITE} \
--user ${TEST_USER} \
--secondaryuser ${SECONDARY_TEST_USER} \
--tenant ${AZURE_TENANT_ID} \
--destination ${RESTORE_DEST_PFX}$suffix \
--count 4
echo result="${suffix}" >> $GITHUB_OUTPUT
- name: SharePoint - Backup
id: sharepoint-backup
@ -233,10 +237,20 @@ jobs:
service: sharepoint
kind: backup
backup-args: '--site "${TEST_SITE}"'
restore-args: ''
test-folder: ''
restore-args: '--folder ${RESTORE_DEST_PFX}${{ steps.new-data-creation-sharepoint.outputs.result }} --restore-permissions'
test-folder: '${RESTORE_DEST_PFX}${{ steps.new-data-creation-sharepoint.outputs.result }}'
# TODO(rkeepers): generate some more entries for incremental check
# generate some more enteries for incremental check
- name: SharePoint - Create new data (for incremental)
working-directory: ./src/cmd/factory
run: |
go run . sharepoint files \
--site ${TEST_SITE} \
--user ${TEST_USER} \
--secondaryuser ${SECONDARY_TEST_USER} \
--tenant ${AZURE_TENANT_ID} \
--destination ${RESTORE_DEST_PFX}${{ steps.new-data-creation-sharepoint.outputs.result }} \
--count 4
- name: SharePoint - Incremental backup
id: sharepoint-incremental
@ -245,8 +259,8 @@ jobs:
service: sharepoint
kind: incremental
backup-args: '--site "${TEST_SITE}"'
restore-args: ''
test-folder: ''
restore-args: '--folder ${RESTORE_DEST_PFX}${{ steps.new-data-creation-sharepoint.outputs.result }} --restore-permissions'
test-folder: '${RESTORE_DEST_PFX}${{ steps.new-data-creation-sharepoint.outputs.result }}'
##########################################################################################################################################

View File

@ -10,6 +10,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Added
- Released the --mask-sensitive-data flag, which will automatically obscure private data in logs.
- Added `--disable-delta` flag to disable delta based backups for Exchange
- Permission support for SharePoint libraries.
### Fixed
- Graph requests now automatically retry in case of a Bad Gateway or Gateway Timeout.
@ -26,6 +27,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Known Issues
- Restore operations will merge duplicate Exchange folders at the same hierarchy level into a single folder.
- Sharepoint SiteGroup permissions are not restored.
### Known Issues
- SharePoint document library data can't be restored after the library has been deleted.

View File

@ -33,6 +33,8 @@ func addSharePointCommands(cmd *cobra.Command) *cobra.Command {
utils.AddBackupIDFlag(c, true)
utils.AddSharePointDetailsAndRestoreFlags(c)
options.AddRestorePermissionsFlag(c)
options.AddFailFastFlag(c)
}
@ -47,7 +49,11 @@ const (
sharePointServiceCommandRestoreExamples = `# Restore file with ID 98765abcdef in Bob's latest backup (1234abcd...)
corso restore sharepoint --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef
# Restore files named "ServerRenderTemplate.xsl in the folder "Display Templates/Style Sheets".
# Restore the file with ID 98765abcdef along with its associated permissions
corso restore sharepoint --backup 1234abcd-12ab-cd34-56de-1234abcd \
--file 98765abcdef --restore-permissions
# Restore files named "ServerRenderTemplate.xsl" in the folder "Display Templates/Style Sheets".
corso restore sharepoint --backup 1234abcd-12ab-cd34-56de-1234abcd \
--file "ServerRenderTemplate.xsl" --folder "Display Templates/Style Sheets"

View File

@ -43,4 +43,6 @@ var (
PageFolderInput = []string{"pageFolder1", "pageFolder2"}
PageInput = []string{"page1", "page2"}
RestorePermissions = true
)

View File

@ -8,6 +8,7 @@ import (
. "github.com/alcionai/corso/src/cli/print"
"github.com/alcionai/corso/src/cmd/factory/impl"
"github.com/alcionai/corso/src/internal/common/crash"
"github.com/alcionai/corso/src/pkg/logger"
)
@ -29,6 +30,12 @@ var oneDriveCmd = &cobra.Command{
RunE: handleOneDriveFactory,
}
var sharePointCmd = &cobra.Command{
Use: "sharepoint",
Short: "Generate shareopint data",
RunE: handleSharePointFactory,
}
// ------------------------------------------------------------------------------------------
// CLI command handlers
// ------------------------------------------------------------------------------------------
@ -37,13 +44,19 @@ func main() {
ctx, _ := logger.SeedLevel(context.Background(), logger.Development)
ctx = SetRootCmd(ctx, factoryCmd)
defer logger.Flush(ctx)
defer func() {
if err := crash.Recovery(ctx, recover(), "backup"); err != nil {
logger.CtxErr(ctx, err).Error("panic in factory")
}
logger.Flush(ctx)
}()
// persistent flags that are common to all use cases
fs := factoryCmd.PersistentFlags()
fs.StringVar(&impl.Tenant, "tenant", "", "m365 tenant containing the user")
fs.StringVar(&impl.Site, "site", "", "sharepoint site owning the new data")
fs.StringVar(&impl.User, "user", "", "m365 user owning the new data")
cobra.CheckErr(factoryCmd.MarkPersistentFlagRequired("user"))
fs.StringVar(&impl.SecondaryUser, "secondaryuser", "", "m365 secondary user owning the new data")
fs.IntVar(&impl.Count, "count", 0, "count of items to produce")
cobra.CheckErr(factoryCmd.MarkPersistentFlagRequired("count"))
@ -54,6 +67,8 @@ func main() {
impl.AddExchangeCommands(exchangeCmd)
factoryCmd.AddCommand(oneDriveCmd)
impl.AddOneDriveCommands(oneDriveCmd)
factoryCmd.AddCommand(sharePointCmd)
impl.AddSharePointCommands(sharePointCmd)
if err := factoryCmd.ExecuteContext(ctx); err != nil {
logger.Flush(ctx)
@ -75,3 +90,8 @@ func handleOneDriveFactory(cmd *cobra.Command, args []string) error {
Err(cmd.Context(), impl.ErrNotYetImplemented)
return cmd.Help()
}
func handleSharePointFactory(cmd *cobra.Command, args []string) error {
Err(cmd.Context(), impl.ErrNotYetImplemented)
return cmd.Help()
}

View File

@ -36,6 +36,7 @@ import (
var (
Count int
Destination string
Site string
Tenant string
User string
SecondaryUser string
@ -109,9 +110,10 @@ func generateAndRestoreItems(
// Common Helpers
// ------------------------------------------------------------------------------------------
func getGCAndVerifyUser(
func getGCAndVerifyResourceOwner(
ctx context.Context,
userID string,
resource connector.Resource,
resourceOwner string,
) (
*connector.GraphConnector,
account.Account,
@ -135,15 +137,12 @@ func getGCAndVerifyUser(
return nil, account.Account{}, nil, clues.Wrap(err, "finding m365 account details")
}
gc, err := connector.NewGraphConnector(
ctx,
acct,
connector.Users)
gc, err := connector.NewGraphConnector(ctx, acct, resource)
if err != nil {
return nil, account.Account{}, nil, clues.Wrap(err, "connecting to graph api")
}
id, _, err := gc.PopulateOwnerIDAndNamesFrom(ctx, userID, nil)
id, _, err := gc.PopulateOwnerIDAndNamesFrom(ctx, resourceOwner, nil)
if err != nil {
return nil, account.Account{}, nil, clues.Wrap(err, "verifying user")
}
@ -252,7 +251,7 @@ var (
readPerm = []string{"read"}
)
func generateAndRestoreOnedriveItems(
func generateAndRestoreDriveItems(
gc *connector.GraphConnector,
resourceOwner, secondaryUserID, secondaryUserName string,
acct account.Account,
@ -273,8 +272,24 @@ func generateAndRestoreOnedriveItems(
dest.ContainerName = destFldr
print.Infof(ctx, "Restoring to folder %s", dest.ContainerName)
d, _ := gc.Service.Client().UsersById(resourceOwner).Drive().Get(ctx, nil)
driveID := ptr.Val(d.GetId())
var driveID string
switch service {
case path.SharePointService:
d, err := gc.Service.Client().SitesById(resourceOwner).Drive().Get(ctx, nil)
if err != nil {
return nil, clues.Wrap(err, "getting site's default drive")
}
driveID = ptr.Val(d.GetId())
default:
d, err := gc.Service.Client().UsersById(resourceOwner).Drive().Get(ctx, nil)
if err != nil {
return nil, clues.Wrap(err, "getting user's default drive")
}
driveID = ptr.Val(d.GetId())
}
var (
cols []onedriveColInfo

View File

@ -5,6 +5,7 @@ import (
. "github.com/alcionai/corso/src/cli/print"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/connector"
exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/fault"
@ -51,7 +52,7 @@ func handleExchangeEmailFactory(cmd *cobra.Command, args []string) error {
return nil
}
gc, acct, _, err := getGCAndVerifyUser(ctx, User)
gc, acct, _, err := getGCAndVerifyResourceOwner(ctx, connector.Users, User)
if err != nil {
return Only(ctx, err)
}
@ -98,7 +99,7 @@ func handleExchangeCalendarEventFactory(cmd *cobra.Command, args []string) error
return nil
}
gc, acct, _, err := getGCAndVerifyUser(ctx, User)
gc, acct, _, err := getGCAndVerifyResourceOwner(ctx, connector.Users, User)
if err != nil {
return Only(ctx, err)
}
@ -144,7 +145,7 @@ func handleExchangeContactFactory(cmd *cobra.Command, args []string) error {
return nil
}
gc, acct, _, err := getGCAndVerifyUser(ctx, User)
gc, acct, _, err := getGCAndVerifyResourceOwner(ctx, connector.Users, User)
if err != nil {
return Only(ctx, err)
}

View File

@ -7,20 +7,21 @@ import (
. "github.com/alcionai/corso/src/cli/print"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/connector"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/logger"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/selectors"
)
var filesCmd = &cobra.Command{
var odFilesCmd = &cobra.Command{
Use: "files",
Short: "Generate OneDrive files",
RunE: handleOneDriveFileFactory,
}
func AddOneDriveCommands(cmd *cobra.Command) {
cmd.AddCommand(filesCmd)
cmd.AddCommand(odFilesCmd)
}
func handleOneDriveFileFactory(cmd *cobra.Command, args []string) error {
@ -35,20 +36,23 @@ func handleOneDriveFileFactory(cmd *cobra.Command, args []string) error {
return nil
}
gc, acct, inp, err := getGCAndVerifyUser(ctx, User)
gc, acct, inp, err := getGCAndVerifyResourceOwner(ctx, connector.Users, User)
if err != nil {
return Only(ctx, err)
}
deets, err := generateAndRestoreOnedriveItems(
sel := selectors.NewOneDriveBackup([]string{User}).Selector
sel.SetDiscreteOwnerIDName(inp.ID(), inp.Name())
deets, err := generateAndRestoreDriveItems(
gc,
User,
inp.ID(),
SecondaryUser,
strings.ToLower(SecondaryUser),
acct,
service,
category,
selectors.NewOneDriveBackup([]string{User}).Selector,
sel,
Tenant,
Destination,
Count,

View File

@ -0,0 +1,71 @@
package impl
import (
"strings"
"github.com/spf13/cobra"
. "github.com/alcionai/corso/src/cli/print"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/connector"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/logger"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/selectors"
)
var spFilesCmd = &cobra.Command{
Use: "files",
Short: "Generate SharePoint files",
RunE: handleSharePointLibraryFileFactory,
}
func AddSharePointCommands(cmd *cobra.Command) {
cmd.AddCommand(spFilesCmd)
}
func handleSharePointLibraryFileFactory(cmd *cobra.Command, args []string) error {
var (
ctx = cmd.Context()
service = path.SharePointService
category = path.LibrariesCategory
errs = fault.New(false)
)
if utils.HasNoFlagsAndShownHelp(cmd) {
return nil
}
gc, acct, inp, err := getGCAndVerifyResourceOwner(ctx, connector.Sites, Site)
if err != nil {
return Only(ctx, err)
}
sel := selectors.NewSharePointBackup([]string{Site}).Selector
sel.SetDiscreteOwnerIDName(inp.ID(), inp.Name())
deets, err := generateAndRestoreDriveItems(
gc,
inp.ID(),
SecondaryUser,
strings.ToLower(SecondaryUser),
acct,
service,
category,
sel,
Tenant,
Destination,
Count,
errs)
if err != nil {
return Only(ctx, err)
}
for _, e := range errs.Recovered() {
logger.CtxErr(ctx, err).Error(e.Error())
}
deets.PrintEntries(ctx)
return nil
}

View File

@ -5,7 +5,7 @@ import (
"errors"
"fmt"
"os"
"path"
stdpath "path"
"strings"
"time"
@ -21,6 +21,7 @@ import (
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/filters"
"github.com/alcionai/corso/src/pkg/logger"
"github.com/alcionai/corso/src/pkg/path"
)
// ---------------------------------------------------------------------------
@ -86,7 +87,7 @@ func main() {
case "onedrive":
checkOneDriveRestoration(ctx, client, testUser, folder, dataFolder, startTime)
case "sharepoint":
checkSharePointRestoration(ctx, client, testSite, folder, dataFolder, startTime)
checkSharePointRestoration(ctx, client, testSite, testUser, folder, dataFolder, startTime)
default:
fatal(ctx, "no service specified", nil)
}
@ -225,7 +226,7 @@ func getAllMailSubFolders(
childDisplayName = ptr.Val(child.GetDisplayName())
childFolderCount = ptr.Val(child.GetChildFolderCount())
//nolint:forbidigo
fullFolderName = path.Join(parentFolder, childDisplayName)
fullFolderName = stdpath.Join(parentFolder, childDisplayName)
)
if filters.PathContains([]string{dataFolder}).Compare(fullFolderName) {
@ -274,7 +275,7 @@ func checkAllSubFolder(
var (
childDisplayName = ptr.Val(child.GetDisplayName())
//nolint:forbidigo
fullFolderName = path.Join(parentFolder, childDisplayName)
fullFolderName = stdpath.Join(parentFolder, childDisplayName)
)
if filters.PathContains([]string{dataFolder}).Compare(fullFolderName) {
@ -312,7 +313,7 @@ func checkOneDriveRestoration(
checkDriveRestoration(
ctx,
client,
userID,
path.OneDriveService,
folderName,
ptr.Val(drive.GetId()),
ptr.Val(drive.GetName()),
@ -328,7 +329,7 @@ func checkOneDriveRestoration(
func checkSharePointRestoration(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
siteID, folderName, dataFolder string,
siteID, userID, folderName, dataFolder string,
startTime time.Time,
) {
drive, err := client.
@ -342,7 +343,7 @@ func checkSharePointRestoration(
checkDriveRestoration(
ctx,
client,
siteID,
path.SharePointService,
folderName,
ptr.Val(drive.GetId()),
ptr.Val(drive.GetName()),
@ -358,7 +359,7 @@ func checkSharePointRestoration(
func checkDriveRestoration(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
resourceOwner,
service path.ServiceType,
folderName,
driveID,
driveName,
@ -428,6 +429,7 @@ func checkDriveRestoration(
checkRestoredDriveItemPermissions(
ctx,
service,
skipPermissionTest,
folderPermissions,
restoredFolderPermissions)
@ -450,6 +452,7 @@ func checkDriveRestoration(
func checkRestoredDriveItemPermissions(
ctx context.Context,
service path.ServiceType,
skip bool,
folderPermissions map[string][]permissionInfo,
restoredFolderPermissions map[string][]permissionInfo,
@ -458,6 +461,11 @@ func checkRestoredDriveItemPermissions(
return
}
/**
TODO: replace this check with testElementsMatch
from internal/connecter/graph_connector_helper_test.go
**/
for folderName, permissions := range folderPermissions {
logAndPrint(ctx, "checking for folder: %s", folderName)
@ -468,23 +476,32 @@ func checkRestoredDriveItemPermissions(
continue
}
permCheck := func() bool { return len(permissions) == len(restoreFolderPerm) }
if service == path.SharePointService {
permCheck = func() bool { return len(permissions) <= len(restoreFolderPerm) }
}
assert(
ctx,
func() bool { return len(permissions) == len(restoreFolderPerm) },
permCheck,
fmt.Sprintf("wrong number of restored permissions: %s", folderName),
permissions,
restoreFolderPerm)
for i, perm := range permissions {
// permissions should be sorted, so a by-index comparison works
restored := restoreFolderPerm[i]
for _, perm := range permissions {
eqID := func(pi permissionInfo) bool { return strings.EqualFold(pi.entityID, perm.entityID) }
i := slices.IndexFunc(restoreFolderPerm, eqID)
assert(
ctx,
func() bool { return strings.EqualFold(perm.entityID, restored.entityID) },
fmt.Sprintf("non-matching entity id: %s", folderName),
func() bool { return i >= 0 },
fmt.Sprintf("permission was restored in: %s", folderName),
perm.entityID,
restored.entityID)
restoreFolderPerm)
// permissions should be sorted, so a by-index comparison works
restored := restoreFolderPerm[i]
assert(
ctx,
@ -612,6 +629,7 @@ func permissionIn(
entityID string
)
// TODO: replace with filterUserPermissions in onedrive item.go
if gv2.GetUser() != nil {
entityID = ptr.Val(gv2.GetUser().GetId())
} else if gv2.GetGroup() != nil {

View File

@ -237,7 +237,7 @@ func (gc *GraphConnector) ConsumeRestoreCollections(
case selectors.ServiceOneDrive:
status, err = onedrive.RestoreCollections(ctx, creds, backupVersion, gc.Service, dest, opts, dcs, deets, errs)
case selectors.ServiceSharePoint:
status, err = sharepoint.RestoreCollections(ctx, backupVersion, creds, gc.Service, dest, dcs, deets, errs)
status, err = sharepoint.RestoreCollections(ctx, backupVersion, creds, gc.Service, dest, opts, dcs, deets, errs)
default:
err = clues.Wrap(clues.New(sels.Service.String()), "service not supported")
}

View File

@ -48,13 +48,16 @@ func testElementsMatch[T any](
t *testing.T,
expected []T,
got []T,
subset bool,
equalityCheck func(expectedItem, gotItem T) bool,
) {
t.Helper()
pending := make([]*T, len(expected))
for i := 0; i < len(expected); i++ {
pending[i] = &expected[i]
for i := range expected {
ei := expected[i]
pending[i] = &ei
}
unexpected := []T{}
@ -97,15 +100,20 @@ func testElementsMatch[T any](
return
}
if subset && len(missing) == 0 && len(unexpected) > 0 {
return
}
assert.Failf(
t,
"contain different elements",
"missing items: (%T)%v\nunexpected items: (%T)%v\n",
"elements differ",
"expected: (%T)%+v\ngot: (%T)%+v\nmissing: %+v\nextra: %+v\n",
expected,
expected,
missing,
got,
unexpected,
)
got,
missing,
unexpected)
}
type configInfo struct {
@ -211,7 +219,7 @@ func checkMessage(
expected models.Messageable,
got models.Messageable,
) {
testElementsMatch(t, expected.GetAttachments(), got.GetAttachments(), attachmentEqual)
testElementsMatch(t, expected.GetAttachments(), got.GetAttachments(), false, attachmentEqual)
assert.Equal(t, expected.GetBccRecipients(), got.GetBccRecipients(), "BccRecipients")
@ -289,7 +297,7 @@ func checkMessage(
assert.Equal(t, ptr.Val(expected.GetSubject()), ptr.Val(got.GetSubject()), "Subject")
testElementsMatch(t, expected.GetToRecipients(), got.GetToRecipients(), recipientEqual)
testElementsMatch(t, expected.GetToRecipients(), got.GetToRecipients(), false, recipientEqual)
// Skip WebLink as it's tied to this specific instance of the item.
@ -535,10 +543,10 @@ func checkEvent(
t,
[]models.Locationable{expected.GetLocation()},
[]models.Locationable{got.GetLocation()},
locationEqual,
)
false,
locationEqual)
testElementsMatch(t, expected.GetLocations(), got.GetLocations(), locationEqual)
testElementsMatch(t, expected.GetLocations(), got.GetLocations(), false, locationEqual)
assert.Equal(t, expected.GetOnlineMeeting(), got.GetOnlineMeeting(), "OnlineMeeting")
@ -726,7 +734,7 @@ func compareDriveItem(
t *testing.T,
expected map[string][]byte,
item data.Stream,
restorePermissions bool,
config configInfo,
rootDir bool,
) bool {
// Skip Drive permissions in the folder that used to be the root. We don't
@ -746,19 +754,15 @@ func compareDriveItem(
isMeta = metadata.HasMetaSuffix(name)
)
if isMeta {
var itemType *metadata.Item
assert.IsType(t, itemType, item)
} else {
if !isMeta {
oitem := item.(*onedrive.Item)
info := oitem.Info()
// Don't need to check SharePoint because it was added after we stopped
// adding meta files to backup details.
if info.OneDrive != nil {
displayName = oitem.Info().OneDrive.ItemName
// Don't need to check SharePoint because it was added after we stopped
// adding meta files to backup details.
assert.False(t, oitem.Info().OneDrive.IsMeta, "meta marker for non meta item %s", name)
} else if info.SharePoint != nil {
displayName = oitem.Info().SharePoint.ItemName
@ -768,6 +772,10 @@ func compareDriveItem(
}
if isMeta {
var itemType *metadata.Item
assert.IsType(t, itemType, item)
var (
itemMeta metadata.Metadata
expectedMeta metadata.Metadata
@ -806,7 +814,7 @@ func compareDriveItem(
assert.Equal(t, expectedMeta.FileName, itemMeta.FileName)
}
if !restorePermissions {
if !config.opts.RestorePermissions {
assert.Equal(t, 0, len(itemMeta.Permissions))
return true
}
@ -824,6 +832,10 @@ func compareDriveItem(
t,
expectedMeta.Permissions,
itemPerms,
// sharepoint retrieves a superset of permissions
// (all site admins, site groups, built in by default)
// relative to the permissions changed by the test.
config.service == path.SharePointService,
permissionEqual)
return true
@ -865,7 +877,7 @@ func compareItem(
service path.ServiceType,
category path.CategoryType,
item data.Stream,
restorePermissions bool,
config configInfo,
rootDir bool,
) bool {
if mt, ok := item.(data.StreamModTime); ok {
@ -886,7 +898,7 @@ func compareItem(
}
case path.OneDriveService:
return compareDriveItem(t, expected, item, restorePermissions, rootDir)
return compareDriveItem(t, expected, item, config, rootDir)
case path.SharePointService:
if category != path.LibrariesCategory {
@ -894,7 +906,7 @@ func compareItem(
}
// SharePoint libraries reuses OneDrive code.
return compareDriveItem(t, expected, item, restorePermissions, rootDir)
return compareDriveItem(t, expected, item, config, rootDir)
default:
assert.FailNowf(t, "unexpected service: %s", service.String())
@ -959,8 +971,7 @@ func checkCollections(
expectedItems int,
expected map[string]map[string][]byte,
got []data.BackupCollection,
dest control.RestoreDestination,
restorePermissions bool,
config configInfo,
) int {
collectionsWithItems := []data.BackupCollection{}
@ -974,7 +985,7 @@ func checkCollections(
category = returned.FullPath().Category()
expectedColData = expected[returned.FullPath().String()]
folders = returned.FullPath().Elements()
rootDir = folders[len(folders)-1] == dest.ContainerName
rootDir = folders[len(folders)-1] == config.dest.ContainerName
)
// Need to iterate through all items even if we don't expect to find a match
@ -1007,7 +1018,7 @@ func checkCollections(
service,
category,
item,
restorePermissions,
config,
rootDir) {
gotItems--
}
@ -1239,7 +1250,7 @@ func collectionsForInfo(
baseDestPath := backupOutputPathFromRestore(t, dest, pth)
baseExpected := expectedData[baseDestPath.String()]
if baseExpected == nil {
if len(baseExpected) == 0 {
expectedData[baseDestPath.String()] = make(map[string][]byte, len(info.items))
baseExpected = expectedData[baseDestPath.String()]
}

View File

@ -286,7 +286,7 @@ type itemData struct {
perms permData
}
type onedriveColInfo struct {
type driveColInfo struct {
pathElements []string
perms permData
files []itemData
@ -296,7 +296,7 @@ type onedriveColInfo struct {
func testDataForInfo(
t *testing.T,
service path.ServiceType,
cols []onedriveColInfo,
cols []driveColInfo,
backupVersion int,
) []colInfo {
var res []colInfo
@ -431,11 +431,6 @@ func (si suiteInfoImpl) Resource() Resource {
// SharePoint shares most of its libraries implementation with OneDrive so we
// only test simple things here and leave the more extensive testing to
// OneDrive.
//
// TODO(ashmrtn): SharePoint doesn't have permissions backup/restore enabled
// right now. Adjust the tests here when that is enabled so we have at least
// basic assurances that it's doing the right thing. We can leave the more
// extensive permissions tests to OneDrive as well.
type GraphConnectorSharePointIntegrationSuite struct {
tester.Suite
@ -486,6 +481,23 @@ func (suite *GraphConnectorSharePointIntegrationSuite) TestRestoreAndBackup_Mult
testRestoreAndBackupMultipleFilesAndFoldersNoPermissions(suite, version.Backup)
}
func (suite *GraphConnectorSharePointIntegrationSuite) TestPermissionsRestoreAndBackup() {
testPermissionsRestoreAndBackup(suite, version.Backup)
}
func (suite *GraphConnectorSharePointIntegrationSuite) TestPermissionsBackupAndNoRestore() {
testPermissionsBackupAndNoRestore(suite, version.Backup)
}
func (suite *GraphConnectorSharePointIntegrationSuite) TestPermissionsInheritanceRestoreAndBackup() {
testPermissionsInheritanceRestoreAndBackup(suite, version.Backup)
}
func (suite *GraphConnectorSharePointIntegrationSuite) TestRestoreFolderNamedFolderRegression() {
// No reason why it couldn't work with previous versions, but this is when it got introduced.
testRestoreFolderNamedFolderRegression(suite, version.All8MigrateUserPNToID)
}
// ---------------------------------------------------------------------------
// OneDrive most recent backup version
// ---------------------------------------------------------------------------
@ -663,7 +675,7 @@ func testRestoreAndBackupMultipleFilesAndFoldersNoPermissions(
folderBName,
}
cols := []onedriveColInfo{
cols := []driveColInfo{
{
pathElements: rootPath,
files: []itemData{
@ -807,7 +819,7 @@ func testPermissionsRestoreAndBackup(suite oneDriveSuite, startVersion int) {
folderCName,
}
cols := []onedriveColInfo{
cols := []driveColInfo{
{
pathElements: rootPath,
files: []itemData{
@ -939,9 +951,10 @@ func testPermissionsRestoreAndBackup(suite oneDriveSuite, startVersion int) {
}
expected := testDataForInfo(suite.T(), suite.BackupService(), cols, version.Backup)
bss := suite.BackupService().String()
for vn := startVersion; vn <= version.Backup; vn++ {
suite.Run(fmt.Sprintf("Version%d", vn), func() {
suite.Run(fmt.Sprintf("%s-Version%d", bss, vn), func() {
t := suite.T()
// Ideally this can always be true or false and still
// work, but limiting older versions to use emails so as
@ -984,7 +997,7 @@ func testPermissionsBackupAndNoRestore(suite oneDriveSuite, startVersion int) {
suite.Service(),
suite.BackupResourceOwner())
inputCols := []onedriveColInfo{
inputCols := []driveColInfo{
{
pathElements: []string{
odConsts.DrivesPathDir,
@ -1005,7 +1018,7 @@ func testPermissionsBackupAndNoRestore(suite oneDriveSuite, startVersion int) {
},
}
expectedCols := []onedriveColInfo{
expectedCols := []driveColInfo{
{
pathElements: []string{
odConsts.DrivesPathDir,
@ -1023,9 +1036,10 @@ func testPermissionsBackupAndNoRestore(suite oneDriveSuite, startVersion int) {
}
expected := testDataForInfo(suite.T(), suite.BackupService(), expectedCols, version.Backup)
bss := suite.BackupService().String()
for vn := startVersion; vn <= version.Backup; vn++ {
suite.Run(fmt.Sprintf("Version%d", vn), func() {
suite.Run(fmt.Sprintf("%s-Version%d", bss, vn), func() {
t := suite.T()
input := testDataForInfo(t, suite.BackupService(), inputCols, vn)
@ -1150,7 +1164,7 @@ func testPermissionsInheritanceRestoreAndBackup(suite oneDriveSuite, startVersio
// - inherted-permission-file
// - empty-permission-file (empty/empty might have interesting behavior)
cols := []onedriveColInfo{
cols := []driveColInfo{
{
pathElements: rootPath,
files: []itemData{},
@ -1199,9 +1213,10 @@ func testPermissionsInheritanceRestoreAndBackup(suite oneDriveSuite, startVersio
}
expected := testDataForInfo(suite.T(), suite.BackupService(), cols, version.Backup)
bss := suite.BackupService().String()
for vn := startVersion; vn <= version.Backup; vn++ {
suite.Run(fmt.Sprintf("Version%d", vn), func() {
suite.Run(fmt.Sprintf("%s-Version%d", bss, vn), func() {
t := suite.T()
// Ideally this can always be true or false and still
// work, but limiting older versions to use emails so as
@ -1264,7 +1279,7 @@ func testRestoreFolderNamedFolderRegression(
folderBName,
}
cols := []onedriveColInfo{
cols := []driveColInfo{
{
pathElements: rootPath,
files: []itemData{
@ -1313,9 +1328,10 @@ func testRestoreFolderNamedFolderRegression(
}
expected := testDataForInfo(suite.T(), suite.BackupService(), cols, version.Backup)
bss := suite.BackupService().String()
for vn := startVersion; vn <= version.Backup; vn++ {
suite.Run(fmt.Sprintf("Version%d", vn), func() {
suite.Run(fmt.Sprintf("%s-Version%d", bss, vn), func() {
t := suite.T()
input := testDataForInfo(t, suite.BackupService(), cols, vn)

View File

@ -430,8 +430,7 @@ func getCollectionsAndExpected(
owner,
config.dest,
testCollections,
backupVersion,
)
backupVersion)
collections = append(collections, ownerCollections...)
totalItems += numItems
@ -550,8 +549,7 @@ func runBackupAndCompare(
totalKopiaItems,
expectedData,
dcs,
config.dest,
config.opts.RestorePermissions)
config)
status := backupGC.Wait()
@ -1125,17 +1123,15 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames
t.Log("Backup enumeration complete")
ci := configInfo{
opts: control.Options{RestorePermissions: true},
// Alright to be empty, needed for OneDrive.
dest: control.RestoreDestination{},
}
// Pull the data prior to waiting for the status as otherwise it will
// deadlock.
skipped := checkCollections(
t,
ctx,
allItems,
allExpectedData,
dcs,
// Alright to be empty, needed for OneDrive.
control.RestoreDestination{},
true)
skipped := checkCollections(t, ctx, allItems, allExpectedData, dcs, ci)
status := backupGC.Wait()
assert.Equal(t, allItems+skipped, status.Objects, "status.Objects")

View File

@ -20,6 +20,7 @@ import (
"github.com/alcionai/corso/src/internal/connector/onedrive/api"
"github.com/alcionai/corso/src/internal/connector/onedrive/api/mock"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/logger"
@ -282,6 +283,7 @@ func (suite *OneDriveUnitSuite) TestDrives() {
type OneDriveIntgSuite struct {
tester.Suite
userID string
creds account.M365Config
}
func TestOneDriveSuite(t *testing.T) {
@ -293,7 +295,15 @@ func TestOneDriveSuite(t *testing.T) {
}
func (suite *OneDriveIntgSuite) SetupSuite() {
suite.userID = tester.SecondaryM365UserID(suite.T())
t := suite.T()
suite.userID = tester.SecondaryM365UserID(t)
acct := tester.NewM365Account(t)
creds, err := acct.M365Config()
require.NoError(t, err)
suite.creds = creds
}
func (suite *OneDriveIntgSuite) TestCreateGetDeleteFolder() {
@ -334,7 +344,7 @@ func (suite *OneDriveIntgSuite) TestCreateGetDeleteFolder() {
rootFolder, err := api.GetDriveRoot(ctx, gs, driveID)
require.NoError(t, err, clues.ToCore(err))
restoreFolders := path.Builder{}.Append(folderElements...)
restoreDir := path.Builder{}.Append(folderElements...)
drivePath := path.DrivePath{
DriveID: driveID,
Root: "root:",
@ -344,15 +354,15 @@ func (suite *OneDriveIntgSuite) TestCreateGetDeleteFolder() {
caches := NewRestoreCaches()
caches.DriveIDToRootFolderID[driveID] = ptr.Val(rootFolder.GetId())
folderID, err := CreateRestoreFolders(ctx, gs, &drivePath, restoreFolders, caches)
folderID, err := createRestoreFolders(ctx, gs, &drivePath, restoreDir, caches)
require.NoError(t, err, clues.ToCore(err))
folderIDs = append(folderIDs, folderID)
folderName2 := "Corso_Folder_Test_" + dttm.FormatNow(dttm.SafeForTesting)
restoreFolders = restoreFolders.Append(folderName2)
restoreDir = restoreDir.Append(folderName2)
folderID, err = CreateRestoreFolders(ctx, gs, &drivePath, restoreFolders, caches)
folderID, err = createRestoreFolders(ctx, gs, &drivePath, restoreDir, caches)
require.NoError(t, err, clues.ToCore(err))
folderIDs = append(folderIDs, folderID)

View File

@ -233,8 +233,7 @@ func filterUserPermissions(ctx context.Context, perms []models.Permissionable) [
continue
}
gv2 := p.GetGrantedToV2()
var (
// Below are the mapping from roles to "Advanced" permissions
// screen entries:
//
@ -242,24 +241,36 @@ func filterUserPermissions(ctx context.Context, perms []models.Permissionable) [
// write - Design | Edit | Contribute (no difference in /permissions api)
// read - Read
// empty - Restricted View
roles := p.GetRoles()
entityID := ""
if gv2.GetUser() != nil {
entityID = ptr.Val(gv2.GetUser().GetId())
} else if gv2.GetGroup() != nil {
entityID = ptr.Val(gv2.GetGroup().GetId())
} else {
// TODO Add application permissions when adding permissions for SharePoint
//
// helpful docs:
// https://devblogs.microsoft.com/microsoft365dev/controlling-app-access-on-specific-sharepoint-site-collections/
logm := logger.Ctx(ctx)
if gv2.GetApplication() != nil {
logm.With("application_id", ptr.Val(gv2.GetApplication().GetId()))
}
if gv2.GetDevice() != nil {
logm.With("device_id", ptr.Val(gv2.GetDevice().GetId()))
}
logm.Info("untracked permission")
roles = p.GetRoles()
gv2 = p.GetGrantedToV2()
entityID string
gv2t metadata.GV2Type
)
switch true {
case gv2.GetUser() != nil:
gv2t = metadata.GV2User
entityID = ptr.Val(gv2.GetUser().GetId())
case gv2.GetSiteUser() != nil:
gv2t = metadata.GV2SiteUser
entityID = ptr.Val(gv2.GetSiteUser().GetId())
case gv2.GetGroup() != nil:
gv2t = metadata.GV2Group
entityID = ptr.Val(gv2.GetGroup().GetId())
case gv2.GetSiteGroup() != nil:
gv2t = metadata.GV2SiteGroup
entityID = ptr.Val(gv2.GetSiteGroup().GetId())
case gv2.GetApplication() != nil:
gv2t = metadata.GV2App
entityID = ptr.Val(gv2.GetApplication().GetId())
case gv2.GetDevice() != nil:
gv2t = metadata.GV2Device
entityID = ptr.Val(gv2.GetDevice().GetId())
default:
logger.Ctx(ctx).Info("untracked permission")
}
// Technically GrantedToV2 can also contain devices, but the
@ -273,6 +284,7 @@ func filterUserPermissions(ctx context.Context, perms []models.Permissionable) [
ID: ptr.Val(p.GetId()),
Roles: roles,
EntityID: entityID,
EntityType: gv2t,
Expiration: p.GetExpirationDateTime(),
})
}

View File

@ -243,36 +243,56 @@ func (suite *ItemIntegrationSuite) TestDriveGetFolder() {
}
}
func getPermsUperms(permID, userID, entity string, scopes []string) (models.Permissionable, metadata.Permission) {
func getPermsAndResourceOwnerPerms(
permID, resourceOwner string,
gv2t metadata.GV2Type,
scopes []string,
) (models.Permissionable, metadata.Permission) {
sharepointIdentitySet := models.NewSharePointIdentitySet()
switch gv2t {
case metadata.GV2App, metadata.GV2Device, metadata.GV2Group, metadata.GV2User:
identity := models.NewIdentity()
identity.SetId(&userID)
identity.SetAdditionalData(map[string]any{"email": &userID})
identity.SetId(&resourceOwner)
identity.SetAdditionalData(map[string]any{"email": &resourceOwner})
sharepointIdentity := models.NewSharePointIdentitySet()
switch gv2t {
case metadata.GV2User:
sharepointIdentitySet.SetUser(identity)
case metadata.GV2Group:
sharepointIdentitySet.SetGroup(identity)
case metadata.GV2App:
sharepointIdentitySet.SetApplication(identity)
case metadata.GV2Device:
sharepointIdentitySet.SetDevice(identity)
}
switch entity {
case "user":
sharepointIdentity.SetUser(identity)
case "group":
sharepointIdentity.SetGroup(identity)
case "application":
sharepointIdentity.SetApplication(identity)
case "device":
sharepointIdentity.SetDevice(identity)
case metadata.GV2SiteUser, metadata.GV2SiteGroup:
spIdentity := models.NewSharePointIdentity()
spIdentity.SetId(&resourceOwner)
spIdentity.SetAdditionalData(map[string]any{"email": &resourceOwner})
switch gv2t {
case metadata.GV2SiteUser:
sharepointIdentitySet.SetSiteUser(spIdentity)
case metadata.GV2SiteGroup:
sharepointIdentitySet.SetSiteGroup(spIdentity)
}
}
perm := models.NewPermission()
perm.SetId(&permID)
perm.SetRoles([]string{"read"})
perm.SetGrantedToV2(sharepointIdentity)
perm.SetGrantedToV2(sharepointIdentitySet)
uperm := metadata.Permission{
ownersPerm := metadata.Permission{
ID: permID,
Roles: []string{"read"},
EntityID: userID,
EntityID: resourceOwner,
EntityType: gv2t,
}
return perm, uperm
return perm, ownersPerm
}
type ItemUnitTestSuite struct {
@ -284,18 +304,28 @@ func TestItemUnitTestSuite(t *testing.T) {
}
func (suite *ItemUnitTestSuite) TestDrivePermissionsFilter() {
permID := "fakePermId"
userID := "fakeuser@provider.com"
userID2 := "fakeuser2@provider.com"
var (
pID = "fakePermId"
uID = "fakeuser@provider.com"
uID2 = "fakeuser2@provider.com"
own = []string{"owner"}
r = []string{"read"}
rw = []string{"read", "write"}
)
userOwnerPerm, userOwnerUperm := getPermsUperms(permID, userID, "user", []string{"owner"})
userReadPerm, userReadUperm := getPermsUperms(permID, userID, "user", []string{"read"})
userReadWritePerm, userReadWriteUperm := getPermsUperms(permID, userID2, "user", []string{"read", "write"})
userOwnerPerm, userOwnerROperm := getPermsAndResourceOwnerPerms(pID, uID, metadata.GV2User, own)
userReadPerm, userReadROperm := getPermsAndResourceOwnerPerms(pID, uID, metadata.GV2User, r)
userReadWritePerm, userReadWriteROperm := getPermsAndResourceOwnerPerms(pID, uID2, metadata.GV2User, rw)
siteUserOwnerPerm, siteUserOwnerROperm := getPermsAndResourceOwnerPerms(pID, uID, metadata.GV2SiteUser, own)
siteUserReadPerm, siteUserReadROperm := getPermsAndResourceOwnerPerms(pID, uID, metadata.GV2SiteUser, r)
siteUserReadWritePerm, siteUserReadWriteROperm := getPermsAndResourceOwnerPerms(pID, uID2, metadata.GV2SiteUser, rw)
groupReadPerm, groupReadUperm := getPermsUperms(permID, userID, "group", []string{"read"})
groupReadWritePerm, groupReadWriteUperm := getPermsUperms(permID, userID2, "group", []string{"read", "write"})
groupReadPerm, groupReadROperm := getPermsAndResourceOwnerPerms(pID, uID, metadata.GV2Group, r)
groupReadWritePerm, groupReadWriteROperm := getPermsAndResourceOwnerPerms(pID, uID2, metadata.GV2Group, rw)
siteGroupReadPerm, siteGroupReadROperm := getPermsAndResourceOwnerPerms(pID, uID, metadata.GV2SiteGroup, r)
siteGroupReadWritePerm, siteGroupReadWriteROperm := getPermsAndResourceOwnerPerms(pID, uID2, metadata.GV2SiteGroup, rw)
noPerm, _ := getPermsUperms(permID, userID, "user", []string{"read"})
noPerm, _ := getPermsAndResourceOwnerPerms(pID, uID, "user", []string{"read"})
noPerm.SetGrantedToV2(nil) // eg: link shares
cases := []struct {
@ -318,39 +348,78 @@ func (suite *ItemUnitTestSuite) TestDrivePermissionsFilter() {
{
name: "user with read permissions",
graphPermissions: []models.Permissionable{userReadPerm},
parsedPermissions: []metadata.Permission{userReadUperm},
parsedPermissions: []metadata.Permission{userReadROperm},
},
{
name: "user with owner permissions",
graphPermissions: []models.Permissionable{userOwnerPerm},
parsedPermissions: []metadata.Permission{userOwnerUperm},
parsedPermissions: []metadata.Permission{userOwnerROperm},
},
{
name: "user with read and write permissions",
graphPermissions: []models.Permissionable{userReadWritePerm},
parsedPermissions: []metadata.Permission{userReadWriteUperm},
parsedPermissions: []metadata.Permission{userReadWriteROperm},
},
{
name: "multiple users with separate permissions",
graphPermissions: []models.Permissionable{userReadPerm, userReadWritePerm},
parsedPermissions: []metadata.Permission{userReadUperm, userReadWriteUperm},
parsedPermissions: []metadata.Permission{userReadROperm, userReadWriteROperm},
},
// site-user
{
name: "site user with read permissions",
graphPermissions: []models.Permissionable{siteUserReadPerm},
parsedPermissions: []metadata.Permission{siteUserReadROperm},
},
{
name: "site user with owner permissions",
graphPermissions: []models.Permissionable{siteUserOwnerPerm},
parsedPermissions: []metadata.Permission{siteUserOwnerROperm},
},
{
name: "site user with read and write permissions",
graphPermissions: []models.Permissionable{siteUserReadWritePerm},
parsedPermissions: []metadata.Permission{siteUserReadWriteROperm},
},
{
name: "multiple site users with separate permissions",
graphPermissions: []models.Permissionable{siteUserReadPerm, siteUserReadWritePerm},
parsedPermissions: []metadata.Permission{siteUserReadROperm, siteUserReadWriteROperm},
},
// group
{
name: "group with read permissions",
graphPermissions: []models.Permissionable{groupReadPerm},
parsedPermissions: []metadata.Permission{groupReadUperm},
parsedPermissions: []metadata.Permission{groupReadROperm},
},
{
name: "group with read and write permissions",
graphPermissions: []models.Permissionable{groupReadWritePerm},
parsedPermissions: []metadata.Permission{groupReadWriteUperm},
parsedPermissions: []metadata.Permission{groupReadWriteROperm},
},
{
name: "multiple groups with separate permissions",
graphPermissions: []models.Permissionable{groupReadPerm, groupReadWritePerm},
parsedPermissions: []metadata.Permission{groupReadUperm, groupReadWriteUperm},
parsedPermissions: []metadata.Permission{groupReadROperm, groupReadWriteROperm},
},
// site-group
{
name: "site group with read permissions",
graphPermissions: []models.Permissionable{siteGroupReadPerm},
parsedPermissions: []metadata.Permission{siteGroupReadROperm},
},
{
name: "site group with read and write permissions",
graphPermissions: []models.Permissionable{siteGroupReadWritePerm},
parsedPermissions: []metadata.Permission{siteGroupReadWriteROperm},
},
{
name: "multiple site groups with separate permissions",
graphPermissions: []models.Permissionable{siteGroupReadPerm, siteGroupReadWritePerm},
parsedPermissions: []metadata.Permission{siteGroupReadROperm, siteGroupReadWriteROperm},
},
}
for _, tc := range cases {

View File

@ -13,6 +13,17 @@ const (
SharingModeInherited
)
type GV2Type string
const (
GV2App GV2Type = "application"
GV2Device GV2Type = "device"
GV2Group GV2Type = "group"
GV2SiteUser GV2Type = "site_user"
GV2SiteGroup GV2Type = "site_group"
GV2User GV2Type = "user"
)
// FilePermission is used to store permissions of a specific resource owner
// to a drive item.
type Permission struct {
@ -20,6 +31,7 @@ type Permission struct {
Roles []string `json:"role,omitempty"`
Email string `json:"email,omitempty"` // DEPRECATED: Replaced with EntityID in newer backups
EntityID string `json:"entityId,omitempty"` // this is the resource owner's ID
EntityType GV2Type `json:"entityType,omitempty"`
Expiration *time.Time `json:"expiration,omitempty"`
}

View File

@ -2,6 +2,7 @@ package onedrive
import (
"context"
"fmt"
"github.com/alcionai/clues"
"github.com/microsoftgraph/msgraph-sdk-go/drive"
@ -52,7 +53,7 @@ func getCollectionMetadata(
var (
err error
collectionPath = dc.FullPath()
fullPath = dc.FullPath()
)
if len(drivePath.Folders) == 0 {
@ -61,7 +62,7 @@ func getCollectionMetadata(
}
if backupVersion < version.OneDrive4DirIncludesPermissions {
colMeta, err := getParentMetadata(collectionPath, caches.ParentDirToMeta)
colMeta, err := getParentMetadata(fullPath, caches.ParentDirToMeta)
if err != nil {
return metadata.Metadata{}, clues.Wrap(err, "collection metadata")
}
@ -69,8 +70,7 @@ func getCollectionMetadata(
return colMeta, nil
}
// Root folder doesn't have a metadata file associated with it.
folders := collectionPath.Folders()
folders := fullPath.Folders()
metaName := folders[len(folders)-1] + metadata.DirMetaFileSuffix
if backupVersion >= version.OneDrive5DirMetaNoName {
@ -90,6 +90,7 @@ func getCollectionMetadata(
// permissions. parentMetas is expected to have all the parent
// directory metas for this to work.
func computeParentPermissions(
ctx context.Context,
originDir path.Path,
// map parent dir -> parent's metadata
parentMetas map[string]metadata.Metadata,
@ -107,12 +108,16 @@ func computeParentPermissions(
for {
parent, err = parent.Dir()
if err != nil {
return metadata.Metadata{}, clues.New("getting parent")
return metadata.Metadata{}, clues.New("getting parent").WithClues(ctx)
}
fmt.Println("pd", parent)
ictx := clues.Add(ctx, "parent_dir", parent)
drivePath, err := path.ToDrivePath(parent)
if err != nil {
return metadata.Metadata{}, clues.New("transforming dir to drivePath")
return metadata.Metadata{}, clues.New("transforming dir to drivePath").WithClues(ictx)
}
if len(drivePath.Folders) == 0 {
@ -121,7 +126,7 @@ func computeParentPermissions(
meta, ok = parentMetas[parent.String()]
if !ok {
return metadata.Metadata{}, clues.New("no parent meta")
return metadata.Metadata{}, clues.New("no metadata found for parent folder: " + parent.String()).WithClues(ictx)
}
if meta.SharingMode == metadata.SharingModeCustom {
@ -144,13 +149,18 @@ func UpdatePermissions(
// The ordering of the operations is important here. We first
// remove all the removed permissions and then add the added ones.
for _, p := range permRemoved {
ictx := clues.Add(
ctx,
"permission_entity_type", p.EntityType,
"permission_entity_id", clues.Hide(p.EntityID))
// deletes require unique http clients
// https://github.com/alcionai/corso/issues/2707
// this is bad citizenship, and could end up consuming a lot of
// system resources if servicers leak client connections (sockets, etc).
a, err := graph.CreateAdapter(creds.AzureTenantID, creds.AzureClientID, creds.AzureClientSecret)
if err != nil {
return graph.Wrap(ctx, err, "creating delete client")
return graph.Wrap(ictx, err, "creating delete client")
}
pid, ok := oldPermIDToNewID[p.ID]
@ -163,13 +173,18 @@ func UpdatePermissions(
DrivesById(driveID).
ItemsById(itemID).
PermissionsById(pid).
Delete(graph.ConsumeNTokens(ctx, graph.PermissionsLC), nil)
Delete(graph.ConsumeNTokens(ictx, graph.PermissionsLC), nil)
if err != nil {
return graph.Wrap(ctx, err, "removing permissions")
return graph.Wrap(ictx, err, "removing permissions")
}
}
for _, p := range permAdded {
ictx := clues.Add(
ctx,
"permission_entity_type", p.EntityType,
"permission_entity_id", clues.Hide(p.EntityID))
// We are not able to restore permissions when there are no
// roles or for owner, this seems to be restriction in graph
roles := []string{}
@ -180,7 +195,9 @@ func UpdatePermissions(
}
}
if len(roles) == 0 {
// TODO: sitegroup support. Currently errors with "One or more users could not be resolved",
// likely due to the site group entityID consisting of a single integer (ex: 4)
if len(roles) == 0 || p.EntityType == metadata.GV2SiteGroup {
continue
}
@ -192,14 +209,11 @@ func UpdatePermissions(
pbody.SetExpirationDateTime(&expiry)
}
si := false
pbody.SetSendInvitation(&si)
rs := true
pbody.SetRequireSignIn(&rs)
pbody.SetSendInvitation(ptr.To(false))
pbody.SetRequireSignIn(ptr.To(true))
rec := models.NewDriveRecipient()
if p.EntityID != "" {
if len(p.EntityID) > 0 {
rec.SetObjectId(&p.EntityID)
} else {
// Previous versions used to only store email for a
@ -209,7 +223,7 @@ func UpdatePermissions(
pbody.SetRecipients([]models.DriveRecipientable{rec})
newPerm, err := api.PostItemPermissionUpdate(ctx, service, driveID, itemID, pbody)
newPerm, err := api.PostItemPermissionUpdate(ictx, service, driveID, itemID, pbody)
if err != nil {
return clues.Stack(err)
}
@ -240,9 +254,9 @@ func RestorePermissions(
ctx = clues.Add(ctx, "permission_item_id", itemID)
parents, err := computeParentPermissions(itemPath, caches.ParentDirToMeta)
parents, err := computeParentPermissions(ctx, itemPath, caches.ParentDirToMeta)
if err != nil {
return clues.Wrap(err, "parent permissions").WithClues(ctx)
return clues.Wrap(err, "parent permissions")
}
permAdded, permRemoved := metadata.DiffPermissions(parents.Permissions, current.Permissions)

View File

@ -22,10 +22,14 @@ func TestPermissionsUnitTestSuite(t *testing.T) {
suite.Run(t, &PermissionsUnitTestSuite{Suite: tester.NewUnitSuite(t)})
}
func (suite *PermissionsUnitTestSuite) TestComputeParentPermissions() {
func (suite *PermissionsUnitTestSuite) TestComputeParentPermissions_oneDrive() {
runComputeParentPermissionsTest(suite, path.OneDriveService, path.FilesCategory, "user")
}
func (suite *PermissionsUnitTestSuite) TestComputeParentPermissions_sharePoint() {
runComputeParentPermissionsTest(suite, path.SharePointService, path.LibrariesCategory, "site")
}
func runComputeParentPermissionsTest(
suite *PermissionsUnitTestSuite,
service path.ServiceType,
@ -147,12 +151,12 @@ func runComputeParentPermissionsTest(
for _, test := range table {
suite.Run(test.name, func() {
_, flush := tester.NewContext()
ctx, flush := tester.NewContext()
defer flush()
t := suite.T()
m, err := computeParentPermissions(test.item, test.parentPerms)
m, err := computeParentPermissions(ctx, test.item, test.parentPerms)
require.NoError(t, err, "compute permissions")
assert.Equal(t, m, test.meta)

View File

@ -73,10 +73,8 @@ func RestoreCollections(
"destination", dest.ContainerName)
// Reorder collections so that the parents directories are created
// before the child directories
sort.Slice(dcs, func(i, j int) bool {
return dcs[i].FullPath().String() < dcs[j].FullPath().String()
})
// before the child directories; a requirement for permissions.
data.SortRestoreCollections(dcs)
// Iterate through the data collections and restore the contents of each
for _, dc := range dcs {
@ -89,9 +87,10 @@ func RestoreCollections(
metrics support.CollectionMetrics
ictx = clues.Add(
ctx,
"resource_owner", clues.Hide(dc.FullPath().ResourceOwner()),
"category", dc.FullPath().Category(),
"path", dc.FullPath())
"destination", clues.Hide(dest.ContainerName),
"resource_owner", clues.Hide(dc.FullPath().ResourceOwner()),
"full_path", dc.FullPath())
)
metrics, err = RestoreCollection(
@ -152,7 +151,7 @@ func RestoreCollection(
el = errs.Local()
)
ctx, end := diagnostics.Span(ctx, "gc:oneDrive:restoreCollection", diagnostics.Label("path", directory))
ctx, end := diagnostics.Span(ctx, "gc:drive:restoreCollection", diagnostics.Label("path", directory))
defer end()
drivePath, err := path.ToDrivePath(directory)
@ -173,12 +172,12 @@ func RestoreCollection(
// from the backup under this the restore folder instead of root)
// i.e. Restore into `<restoreContainerName>/<original folder path>`
// the drive into which this folder gets restored is tracked separately in drivePath.
restoreFolderElements := path.Builder{}.Append(restoreContainerName).Append(drivePath.Folders...)
restoreDir := path.Builder{}.Append(restoreContainerName).Append(drivePath.Folders...)
ctx = clues.Add(
ctx,
"directory", dc.FullPath().Folder(false),
"destination_elements", restoreFolderElements,
"restore_destination", restoreDir,
"drive_id", drivePath.DriveID)
trace.Log(ctx, "gc:oneDrive:restoreCollection", directory.String())
@ -196,12 +195,12 @@ func RestoreCollection(
}
// Create restore folders and get the folder ID of the folder the data stream will be restored in
restoreFolderID, err := createRestoreFoldersWithPermissions(
restoreFolderID, err := CreateRestoreFolders(
ctx,
creds,
service,
drivePath,
restoreFolderElements,
restoreDir,
dc.FullPath(),
colMeta,
caches,
@ -301,6 +300,7 @@ func restoreItem(
itemPath path.Path,
) (details.ItemInfo, bool, error) {
itemUUID := itemData.UUID()
ctx = clues.Add(ctx, "item_id", itemUUID)
if backupVersion < version.OneDrive1DataAndMetaFiles {
itemInfo, err := restoreV0File(
@ -557,27 +557,27 @@ func restoreV6File(
return itemInfo, nil
}
// createRestoreFoldersWithPermissions creates the restore folder hierarchy in
// CreateRestoreFolders creates the restore folder hierarchy in
// the specified drive and returns the folder ID of the last folder entry in the
// hierarchy. Permissions are only applied to the last folder in the hierarchy.
// Passing nil for the permissions results in just creating the folder(s).
// folderCache is mutated, as a side effect of populating the items.
func createRestoreFoldersWithPermissions(
func CreateRestoreFolders(
ctx context.Context,
creds account.M365Config,
service graph.Servicer,
drivePath *path.DrivePath,
restoreFolders *path.Builder,
restoreDir *path.Builder,
folderPath path.Path,
folderMetadata metadata.Metadata,
caches *restoreCaches,
restorePerms bool,
) (string, error) {
id, err := CreateRestoreFolders(
id, err := createRestoreFolders(
ctx,
service,
drivePath,
restoreFolders,
restoreDir,
caches)
if err != nil {
return "", err
@ -605,10 +605,10 @@ func createRestoreFoldersWithPermissions(
return id, err
}
// CreateRestoreFolders creates the restore folder hierarchy in the specified
// createRestoreFolders creates the restore folder hierarchy in the specified
// drive and returns the folder ID of the last folder entry in the hierarchy.
// folderCache is mutated, as a side effect of populating the items.
func CreateRestoreFolders(
func createRestoreFolders(
ctx context.Context,
service graph.Servicer,
drivePath *path.DrivePath,
@ -735,10 +735,11 @@ func fetchAndReadMetadata(
fetcher fileFetcher,
metaName string,
) (metadata.Metadata, error) {
ctx = clues.Add(ctx, "meta_file_name", metaName)
metaFile, err := fetcher.Fetch(ctx, metaName)
if err != nil {
err = clues.Wrap(err, "getting item metadata").With("meta_file_name", metaName)
return metadata.Metadata{}, err
return metadata.Metadata{}, clues.Wrap(err, "getting item metadata")
}
metaReader := metaFile.ToReader()
@ -746,8 +747,7 @@ func fetchAndReadMetadata(
meta, err := getMetadata(metaReader)
if err != nil {
err = clues.Wrap(err, "deserializing item metadata").With("meta_file_name", metaName)
return metadata.Metadata{}, err
return metadata.Metadata{}, clues.Wrap(err, "deserializing item metadata")
}
return meta, nil

View File

@ -46,6 +46,7 @@ func RestoreCollections(
creds account.M365Config,
service graph.Servicer,
dest control.RestoreDestination,
opts control.Options,
dcs []data.RestoreCollection,
deets *details.Builder,
errs *fault.Bus,
@ -56,6 +57,10 @@ func RestoreCollections(
el = errs.Local()
)
// Reorder collections so that the parents directories are created
// before the child directories; a requirement for permissions.
data.SortRestoreCollections(dcs)
// Iterate through the data collections and restore the contents of each
for _, dc := range dcs {
if el.Failure() != nil {
@ -69,7 +74,8 @@ func RestoreCollections(
ictx = clues.Add(ctx,
"category", category,
"destination", clues.Hide(dest.ContainerName),
"resource_owner", clues.Hide(dc.FullPath().ResourceOwner()))
"resource_owner", clues.Hide(dc.FullPath().ResourceOwner()),
"full_path", dc.FullPath())
)
switch dc.FullPath().Category() {
@ -84,7 +90,7 @@ func RestoreCollections(
onedrive.SharePointSource,
dest.ContainerName,
deets,
false,
opts.RestorePermissions,
errs)
case path.ListsCategory:

View File

@ -0,0 +1,10 @@
package data
import "sort"
// SortRestoreCollections performs an in-place sort on the provided collection.
func SortRestoreCollections(rcs []RestoreCollection) {
sort.Slice(rcs, func(i, j int) bool {
return rcs[i].FullPath().String() < rcs[j].FullPath().String()
})
}

View File

@ -389,13 +389,16 @@ func generateContainerOfItems(
dest,
collections)
opts := control.Defaults()
opts.RestorePermissions = true
deets, err := gc.ConsumeRestoreCollections(
ctx,
backupVersion,
acct,
sel,
dest,
control.Options{RestorePermissions: true},
opts,
dataColls,
fault.New(true))
require.NoError(t, err, clues.ToCore(err))
@ -1537,7 +1540,6 @@ func runDriveIncrementalTest(
updateFiles func(t *testing.T)
itemsRead int
itemsWritten int
skip bool
}{
{
name: "clean incremental, no changes",
@ -1569,7 +1571,6 @@ func runDriveIncrementalTest(
},
{
name: "add permission to new file",
skip: skipPermissionsTests,
updateFiles: func(t *testing.T) {
driveItem := models.NewDriveItem()
driveItem.SetName(&newFileName)
@ -1592,7 +1593,6 @@ func runDriveIncrementalTest(
},
{
name: "remove permission from new file",
skip: skipPermissionsTests,
updateFiles: func(t *testing.T) {
driveItem := models.NewDriveItem()
driveItem.SetName(&newFileName)
@ -1614,7 +1614,6 @@ func runDriveIncrementalTest(
},
{
name: "add permission to container",
skip: skipPermissionsTests,
updateFiles: func(t *testing.T) {
targetContainer := containerIDs[container1]
driveItem := models.NewDriveItem()
@ -1637,7 +1636,6 @@ func runDriveIncrementalTest(
},
{
name: "remove permission from container",
skip: skipPermissionsTests,
updateFiles: func(t *testing.T) {
targetContainer := containerIDs[container1]
driveItem := models.NewDriveItem()
@ -1849,10 +1847,6 @@ func runDriveIncrementalTest(
}
for _, test := range table {
suite.Run(test.name, func() {
if test.skip {
suite.T().Skip("flagged to skip")
}
cleanGC, err := connector.NewGraphConnector(ctx, acct, resource)
require.NoError(t, err, clues.ToCore(err))
@ -1877,8 +1871,23 @@ func runDriveIncrementalTest(
// do some additional checks to ensure the incremental dealt with fewer items.
// +2 on read/writes to account for metadata: 1 delta and 1 path.
assert.Equal(t, test.itemsWritten+2, incBO.Results.ItemsWritten, "incremental items written")
assert.Equal(t, test.itemsRead+2, incBO.Results.ItemsRead, "incremental items read")
var (
expectWrites = test.itemsWritten + 2
expectReads = test.itemsRead + 2
assertReadWrite = assert.Equal
)
// Sharepoint can produce a superset of permissions by nature of
// its drive type. Since this counter comparison is a bit hacky
// to begin with, it's easiest to assert a <= comparison instead
// of fine tuning each test case.
if service == path.SharePointService {
assertReadWrite = assert.LessOrEqual
}
assertReadWrite(t, expectWrites, incBO.Results.ItemsWritten, "incremental items written")
assertReadWrite(t, expectReads, incBO.Results.ItemsRead, "incremental items read")
assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(incBO.Errors.Failure()))
assert.Empty(t, incBO.Errors.Recovered(), "incremental recoverable/iteration errors")
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")

View File

@ -59,18 +59,18 @@ func LogFaultErrors(ctx context.Context, fe *fault.Errors, prefix string) {
}
if fe.Failure != nil {
log.With("error", fe.Failure).Error(pfxMsg + " primary failure")
log.With("error", fe.Failure).Errorf("%s primary failure: %s", pfxMsg, fe.Failure.Msg)
}
for i, item := range fe.Items {
log.With("failed_item", item).Errorf("%s item failure %d of %d", pfxMsg, i+1, li)
log.With("failed_item", item).Errorf("%s item failure %d of %d: %s", pfxMsg, i+1, li, item.Cause)
}
for i, item := range fe.Skipped {
log.With("skipped_item", item).Errorf("%s skipped item %d of %d", pfxMsg, i+1, ls)
log.With("skipped_item", item).Errorf("%s skipped item %d of %d: %s", pfxMsg, i+1, ls, item.Item.Cause)
}
for i, err := range fe.Recovered {
log.With("recovered_error", err).Errorf("%s recoverable error %d of %d", pfxMsg, i+1, lr)
log.With("recovered_error", err).Errorf("%s recoverable error %d of %d: %s", pfxMsg, i+1, lr, err.Msg)
}
}

View File

@ -360,7 +360,7 @@ func formatDetailsForRestoration(
return nil, clues.Wrap(err, "getting restore paths")
}
if sel.Service == selectors.ServiceOneDrive {
if sel.Service == selectors.ServiceOneDrive || sel.Service == selectors.ServiceSharePoint {
paths, err = onedrive.AugmentRestorePaths(backupVersion, paths)
if err != nil {
return nil, clues.Wrap(err, "augmenting paths")

View File

@ -226,11 +226,11 @@ func (dm DetailsModel) FilterMetaFiles() DetailsModel {
}
// Check if a file is a metadata file. These are used to store
// additional data like permissions in case of OneDrive and are not to
// be treated as regular files.
// additional data like permissions (in case of Drive items) and are
// not to be treated as regular files.
func (de Entry) isMetaFile() bool {
// TODO: Add meta file filtering to SharePoint as well once we add
// meta files for SharePoint.
// sharepoint types not needed, since sharepoint permissions were
// added after IsMeta was deprecated.
return de.ItemInfo.OneDrive != nil && de.ItemInfo.OneDrive.IsMeta
}