Enhancement Sanity Test: verify permissions and defined data (#2938)

<!-- PR description-->
onedrive -  
- verify permissions - grantedToV2 and roles 
- recursively check subfolder's data and permission

Exchange -
- since the data in email is huge, create custom data and check if that data is getting restored properly
 
#### Does this PR need a docs update or release note?

- [ ]  Yes, it's included
- [ ] 🕐 Yes, but in a later PR
- [x]  No

#### Type of change

<!--- Please check the type of change your PR introduces: --->
- [ ] 🌻 Feature
- [ ] 🐛 Bugfix
- [ ] 🗺️ Documentation
- [x] 🤖 Supportability/Tests
- [x] 💻 CI/Deployment
- [ ] 🧹 Tech Debt/Cleanup

#### Issue(s)

<!-- Can reference multiple issues. Use one of the following "magic words" - "closes, fixes" to auto-close the Github issue. -->
* #<issue>

#### Test Plan

<!-- How will this be tested prior to merging.-->
- [x] 💪 Manual
- [ ]  Unit test
- [ ] 💚 E2E
This commit is contained in:
neha_gupta 2023-04-03 16:13:48 +05:30 committed by GitHub
parent 52de2a32e8
commit 4087b69a39
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 323 additions and 149 deletions

View File

@ -97,15 +97,32 @@ jobs:
exit 1 exit 1
fi fi
# generate new entries to roll into the next load test
# only runs if the test was successful
- name: New Data Creation
working-directory: ./src/cmd/factory
env:
AZURE_CLIENT_ID: ${{ secrets.CLIENT_ID }}
AZURE_CLIENT_SECRET: ${{ secrets.CLIENT_SECRET }}
AZURE_TENANT_ID: ${{ secrets.TENANT_ID }}
CORSO_M365_LOAD_TEST_USER_ID: ${{ secrets.CORSO_M365_LOAD_TEST_USER_ID }}
run: |
go run . exchange emails \
--user ${{ env.CORSO_M365_TEST_USER_ID }} \
--tenant ${{ env.AZURE_TENANT_ID }} \
--destination Corso_Restore_st_${{ steps.repo-init.outputs.result }} \
--count 4
# run the tests # run the tests
- name: Backup exchange test - name: Backup exchange test
id: exchange-test id: exchange-test
run: | run: |
./corso backup create exchange \ ./corso backup create exchange \
--user "${CORSO_M365_TEST_USER_ID}" \ --user "${CORSO_M365_TEST_USER_ID}" \
--hide-progress \ --hide-progress \
--json \ --data 'email' \
2>&1 | tee $TEST_RESULT/backup_exchange.txt --json \
2>&1 | tee $TEST_RESULT/backup_exchange.txt
resultjson=$(sed -e '1,/Completed Backups/d' $TEST_RESULT/backup_exchange.txt ) resultjson=$(sed -e '1,/Completed Backups/d' $TEST_RESULT/backup_exchange.txt )
@ -152,6 +169,7 @@ jobs:
run: | run: |
set -euo pipefail set -euo pipefail
./corso restore exchange \ ./corso restore exchange \
--email-folder Corso_Restore_st_${{ steps.repo-init.outputs.result }} \
--hide-progress \ --hide-progress \
--backup "${{ steps.exchange-test.outputs.result }}" \ --backup "${{ steps.exchange-test.outputs.result }}" \
2>&1 | tee $TEST_RESULT/exchange-restore-test.txt 2>&1 | tee $TEST_RESULT/exchange-restore-test.txt
@ -161,6 +179,7 @@ jobs:
env: env:
SANITY_RESTORE_FOLDER: ${{ steps.exchange-restore-test.outputs.result }} SANITY_RESTORE_FOLDER: ${{ steps.exchange-restore-test.outputs.result }}
SANITY_RESTORE_SERVICE: "exchange" SANITY_RESTORE_SERVICE: "exchange"
TEST_DATA: Corso_Restore_st_${{ steps.repo-init.outputs.result }}
run: | run: |
set -euo pipefail set -euo pipefail
./sanityCheck ./sanityCheck
@ -193,6 +212,7 @@ jobs:
./corso restore exchange \ ./corso restore exchange \
--hide-progress \ --hide-progress \
--backup "${{ steps.exchange-incremental-test.outputs.result }}" \ --backup "${{ steps.exchange-incremental-test.outputs.result }}" \
--email-folder Corso_Restore_st_${{ steps.repo-init.outputs.result }} \
2>&1 | tee $TEST_RESULT/exchange-incremantal-restore-test.txt 2>&1 | tee $TEST_RESULT/exchange-incremantal-restore-test.txt
echo result=$(grep -i -e 'Restoring to folder ' $TEST_RESULT/exchange-incremantal-restore-test.txt | sed "s/Restoring to folder//" ) >> $GITHUB_OUTPUT echo result=$(grep -i -e 'Restoring to folder ' $TEST_RESULT/exchange-incremantal-restore-test.txt | sed "s/Restoring to folder//" ) >> $GITHUB_OUTPUT
@ -200,6 +220,8 @@ jobs:
env: env:
SANITY_RESTORE_FOLDER: ${{ steps.exchange-incremantal-restore-test.outputs.result }} SANITY_RESTORE_FOLDER: ${{ steps.exchange-incremantal-restore-test.outputs.result }}
SANITY_RESTORE_SERVICE: "exchange" SANITY_RESTORE_SERVICE: "exchange"
TEST_DATA: Corso_Restore_st_${{ steps.repo-init.outputs.result }}
BASE_BACKUP: ${{ steps.exchange-restore-test.outputs.result }}
run: | run: |
set -euo pipefail set -euo pipefail
./sanityCheck ./sanityCheck
@ -215,6 +237,7 @@ jobs:
./corso backup create onedrive \ ./corso backup create onedrive \
--hide-progress \ --hide-progress \
--user "${CORSO_M365_TEST_USER_ID}" \ --user "${CORSO_M365_TEST_USER_ID}" \
--enable-permissions-backup \
--json \ --json \
2>&1 | tee $TEST_RESULT/backup_onedrive.txt 2>&1 | tee $TEST_RESULT/backup_onedrive.txt
@ -263,6 +286,7 @@ jobs:
run: | run: |
set -euo pipefail set -euo pipefail
./corso restore onedrive \ ./corso restore onedrive \
--restore-permissions \
--hide-progress \ --hide-progress \
--backup "${{ steps.onedrive-test.outputs.result }}" \ --backup "${{ steps.onedrive-test.outputs.result }}" \
2>&1 | tee $TEST_RESULT/onedrive-restore-test.txt 2>&1 | tee $TEST_RESULT/onedrive-restore-test.txt
@ -283,7 +307,8 @@ jobs:
set -euo pipefail set -euo pipefail
./corso backup create onedrive \ ./corso backup create onedrive \
--hide-progress \ --hide-progress \
--user "${CORSO_M365_TEST_USER_ID}"\ --user "${CORSO_M365_TEST_USER_ID}" \
--enable-permissions-backup \
--json \ --json \
2>&1 | tee $TEST_RESULT/backup_onedrive_incremental.txt 2>&1 | tee $TEST_RESULT/backup_onedrive_incremental.txt
@ -303,6 +328,7 @@ jobs:
run: | run: |
set -euo pipefail set -euo pipefail
./corso restore onedrive \ ./corso restore onedrive \
--restore-permissions \
--hide-progress \ --hide-progress \
--backup "${{ steps.onedrive-incremental-test.outputs.result }}" \ --backup "${{ steps.onedrive-incremental-test.outputs.result }}" \
2>&1 | tee $TEST_RESULT/onedrive-incremental-restore-test.txt 2>&1 | tee $TEST_RESULT/onedrive-incremental-restore-test.txt

View File

@ -13,15 +13,21 @@ import (
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go" msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
"github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/microsoftgraph/msgraph-sdk-go/users" "github.com/microsoftgraph/msgraph-sdk-go/users"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
"github.com/alcionai/corso/src/internal/common" "github.com/alcionai/corso/src/internal/common"
"github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/graph"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/filters"
"github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/logger"
) )
type permissionInfo struct {
entityID string
roles []string
}
func main() { func main() {
ctx, log := logger.Seed(context.Background(), "info", logger.GetLogFile("")) ctx, log := logger.Seed(context.Background(), "info", logger.GetLogFile(""))
defer func() { defer func() {
@ -29,7 +35,7 @@ func main() {
}() }()
adapter, err := graph.CreateAdapter( adapter, err := graph.CreateAdapter(
os.Getenv("AZURE_TENANT_ID"), tester.GetM365TenantID(ctx),
os.Getenv("AZURE_CLIENT_ID"), os.Getenv("AZURE_CLIENT_ID"),
os.Getenv("AZURE_CLIENT_SECRET")) os.Getenv("AZURE_CLIENT_SECRET"))
if err != nil { if err != nil {
@ -37,11 +43,13 @@ func main() {
} }
var ( var (
client = msgraphsdk.NewGraphServiceClient(adapter) client = msgraphsdk.NewGraphServiceClient(adapter)
testUser = os.Getenv("CORSO_M365_TEST_USER_ID") testUser = tester.GetM365UserID(ctx)
testService = os.Getenv("SANITY_RESTORE_SERVICE") testService = os.Getenv("SANITY_RESTORE_SERVICE")
folder = strings.TrimSpace(os.Getenv("SANITY_RESTORE_FOLDER")) folder = strings.TrimSpace(os.Getenv("SANITY_RESTORE_FOLDER"))
startTime, _ = mustGetTimeFromName(ctx, folder) startTime, _ = mustGetTimeFromName(ctx, folder)
dataFolder = os.Getenv("TEST_DATA")
baseBackupFolder = os.Getenv("BASE_BACKUP")
) )
ctx = clues.Add( ctx = clues.Add(
@ -55,7 +63,7 @@ func main() {
switch testService { switch testService {
case "exchange": case "exchange":
checkEmailRestoration(ctx, client, testUser, folder, startTime) checkEmailRestoration(ctx, client, testUser, folder, dataFolder, baseBackupFolder, startTime)
case "onedrive": case "onedrive":
checkOnedriveRestoration(ctx, client, testUser, folder, startTime) checkOnedriveRestoration(ctx, client, testUser, folder, startTime)
default: default:
@ -68,13 +76,14 @@ func main() {
func checkEmailRestoration( func checkEmailRestoration(
ctx context.Context, ctx context.Context,
client *msgraphsdk.GraphServiceClient, client *msgraphsdk.GraphServiceClient,
testUser, folderName string, testUser, folderName, dataFolder, baseBackupFolder string,
startTime time.Time, startTime time.Time,
) { ) {
var ( var (
itemCount = make(map[string]int32) restoreFolder models.MailFolderable
restoreFolder models.MailFolderable itemCount = make(map[string]int32)
builder = client.UsersById(testUser).MailFolders() restoreItemCount = make(map[string]int32)
builder = client.UsersById(testUser).MailFolders()
) )
for { for {
@ -85,29 +94,20 @@ func checkEmailRestoration(
values := result.GetValue() values := result.GetValue()
// recursive restore folder discovery before proceeding with tests
for _, v := range values { for _, v := range values {
var ( itemName := ptr.Val(v.GetDisplayName())
itemID = ptr.Val(v.GetId())
itemName = ptr.Val(v.GetDisplayName())
ictx = clues.Add(ctx, "item_id", itemID, "item_name", itemName)
folderTime, hasTime = mustGetTimeFromName(ctx, itemName)
)
if !isWithinTimeBound(ictx, startTime, folderTime, hasTime) {
continue
}
// if we found the folder to testt against, back out of this loop.
if itemName == folderName { if itemName == folderName {
restoreFolder = v restoreFolder = v
continue continue
} }
// otherwise, recursively aggregate all child folders. if itemName == dataFolder || itemName == baseBackupFolder {
getAllSubFolder(ctx, client, testUser, v, itemName, itemCount) // otherwise, recursively aggregate all child folders.
getAllSubFolder(ctx, client, testUser, v, itemName, dataFolder, itemCount)
itemCount[itemName] = ptr.Val(v.GetTotalItemCount()) itemCount[itemName] = ptr.Val(v.GetTotalItemCount())
}
} }
link, ok := ptr.ValOK(result.GetOdataNextLink()) link, ok := ptr.ValOK(result.GetOdataNextLink())
@ -135,28 +135,36 @@ func checkEmailRestoration(
} }
for _, fld := range childFolder.GetValue() { for _, fld := range childFolder.GetValue() {
var ( restoreDisplayName := ptr.Val(fld.GetDisplayName())
fldID = ptr.Val(fld.GetId())
fldName = ptr.Val(fld.GetDisplayName()) // check if folder is the data folder we loaded or the base backup to verify
count = ptr.Val(fld.GetTotalItemCount()) // the incremental backup worked fine
ictx = clues.Add( if strings.EqualFold(restoreDisplayName, dataFolder) || strings.EqualFold(restoreDisplayName, baseBackupFolder) {
ctx, count, _ := ptr.ValOK(fld.GetTotalItemCount())
"child_folder_id", fldID,
"child_folder_name", fldName, restoreItemCount[restoreDisplayName] = count
"expected_count", itemCount[fldName], checkAllSubFolder(ctx, client, fld, testUser, restoreDisplayName, dataFolder, restoreItemCount)
"actual_count", count) }
) }
verifyEmailData(ctx, restoreItemCount, itemCount)
}
func verifyEmailData(ctx context.Context, restoreMessageCount, messageCount map[string]int32) {
for fldName, emailCount := range messageCount {
if restoreMessageCount[fldName] != emailCount {
logger.Ctx(ctx).Errorw(
"test failure: Restore item counts do not match",
"expected:", emailCount,
"actual:", restoreMessageCount[fldName])
fmt.Println(
"test failure: Restore item counts do not match",
"* expected:", emailCount,
"* actual:", restoreMessageCount[fldName])
if itemCount[fldName] != count {
logger.Ctx(ictx).Error("test failure: Restore item counts do not match")
fmt.Println("Restore item counts do not match:")
fmt.Println("* expected:", itemCount[fldName])
fmt.Println("* actual:", count)
fmt.Println("Folder:", fldName, ptr.Val(fld.GetId()))
os.Exit(1) os.Exit(1)
} }
checkAllSubFolder(ctx, client, testUser, fld, fldName, itemCount)
} }
} }
@ -167,7 +175,8 @@ func getAllSubFolder(
client *msgraphsdk.GraphServiceClient, client *msgraphsdk.GraphServiceClient,
testUser string, testUser string,
r models.MailFolderable, r models.MailFolderable,
parentFolder string, parentFolder,
dataFolder string,
messageCount map[string]int32, messageCount map[string]int32,
) { ) {
var ( var (
@ -195,16 +204,18 @@ func getAllSubFolder(
var ( var (
childDisplayName = ptr.Val(child.GetDisplayName()) childDisplayName = ptr.Val(child.GetDisplayName())
childFolderCount = ptr.Val(child.GetChildFolderCount()) childFolderCount = ptr.Val(child.GetChildFolderCount())
fullFolderName = parentFolder + "/" + childDisplayName //nolint:forbidigo
fullFolderName = path.Join(parentFolder, childDisplayName)
) )
messageCount[fullFolderName], _ = ptr.ValOK(child.GetTotalItemCount()) if filters.PathContains([]string{dataFolder}).Compare(fullFolderName) {
messageCount[fullFolderName] = ptr.Val(child.GetTotalItemCount())
// recursively check for subfolders
if childFolderCount > 0 {
parentFolder := fullFolderName
// recursively check for subfolders getAllSubFolder(ctx, client, testUser, child, parentFolder, dataFolder, messageCount)
if childFolderCount > 0 { }
parentFolder := fullFolderName
getAllSubFolder(ctx, client, testUser, child, parentFolder, messageCount)
} }
} }
} }
@ -214,10 +225,11 @@ func getAllSubFolder(
func checkAllSubFolder( func checkAllSubFolder(
ctx context.Context, ctx context.Context,
client *msgraphsdk.GraphServiceClient, client *msgraphsdk.GraphServiceClient,
testUser string,
r models.MailFolderable, r models.MailFolderable,
parentFolder string, testUser,
messageCount map[string]int32, parentFolder,
dataFolder string,
restoreMessageCount map[string]int32,
) { ) {
var ( var (
folderID = ptr.Val(r.GetId()) folderID = ptr.Val(r.GetId())
@ -241,23 +253,20 @@ func checkAllSubFolder(
for _, child := range childFolder.GetValue() { for _, child := range childFolder.GetValue() {
var ( var (
childDisplayName = ptr.Val(child.GetDisplayName()) childDisplayName = ptr.Val(child.GetDisplayName())
childTotalCount = ptr.Val(child.GetTotalItemCount())
//nolint:forbidigo //nolint:forbidigo
fullFolderName = path.Join(parentFolder, childDisplayName) fullFolderName = path.Join(parentFolder, childDisplayName)
) )
if messageCount[fullFolderName] != childTotalCount { if filters.PathContains([]string{dataFolder}).Compare(fullFolderName) {
fmt.Println("Message count doesn't match:") childTotalCount, _ := ptr.ValOK(child.GetTotalItemCount())
fmt.Println("* expected:", messageCount[fullFolderName]) restoreMessageCount[fullFolderName] = childTotalCount
fmt.Println("* actual:", childTotalCount)
fmt.Println("Item:", fullFolderName, folderID)
os.Exit(1)
} }
childFolderCount := ptr.Val(child.GetChildFolderCount()) childFolderCount := ptr.Val(child.GetChildFolderCount())
if childFolderCount > 0 { if childFolderCount > 0 {
checkAllSubFolder(ctx, client, testUser, child, fullFolderName, messageCount) parentFolder := fullFolderName
checkAllSubFolder(ctx, client, child, testUser, parentFolder, dataFolder, restoreMessageCount)
} }
} }
} }
@ -265,14 +274,17 @@ func checkAllSubFolder(
func checkOnedriveRestoration( func checkOnedriveRestoration(
ctx context.Context, ctx context.Context,
client *msgraphsdk.GraphServiceClient, client *msgraphsdk.GraphServiceClient,
testUser, folderName string, testUser,
folderName string,
startTime time.Time, startTime time.Time,
) { ) {
var ( var (
// map itemID -> item size // map itemID -> item size
fileSizes = make(map[string]int64) fileSizes = make(map[string]int64)
// map itemID -> permission id -> []permission roles // map itemID -> permission id -> []permission roles
folderPermission = make(map[string]map[string][]string) folderPermission = make(map[string][]permissionInfo)
restoreFile = make(map[string]int64)
restoreFolderPermission = make(map[string][]permissionInfo)
) )
drive, err := client. drive, err := client.
@ -313,7 +325,6 @@ func checkOnedriveRestoration(
} }
folderTime, hasTime := mustGetTimeFromName(ictx, itemName) folderTime, hasTime := mustGetTimeFromName(ictx, itemName)
if !isWithinTimeBound(ctx, startTime, folderTime, hasTime) { if !isWithinTimeBound(ctx, startTime, folderTime, hasTime) {
continue continue
} }
@ -323,21 +334,185 @@ func checkOnedriveRestoration(
fileSizes[itemName] = ptr.Val(driveItem.GetSize()) fileSizes[itemName] = ptr.Val(driveItem.GetSize())
} }
folderPermission[itemID] = permissionsIn(ctx, client, driveID, itemID, folderPermission[itemID]) if driveItem.GetFolder() == nil && driveItem.GetPackage() == nil {
continue
}
// currently we don't restore blank folders.
// skip permission check for empty folders
if ptr.Val(driveItem.GetFolder().GetChildCount()) == 0 {
logger.Ctx(ctx).Info("skipped empty folder: ", itemName)
fmt.Println("skipped empty folder: ", itemName)
continue
}
permissionIn(ctx, client, driveID, itemID, itemName, folderPermission)
getOneDriveChildFolder(ctx, client, driveID, itemID, itemName, fileSizes, folderPermission, startTime)
} }
checkFileData(ctx, client, driveID, restoreFolderID, fileSizes, folderPermission) getRestoreData(ctx, client, *drive.GetId(), restoreFolderID, restoreFile, restoreFolderPermission, startTime)
for folderName, permissions := range folderPermission {
logger.Ctx(ctx).Info("checking for folder: %s \n", folderName)
fmt.Printf("checking for folder: %s \n", folderName)
restoreFolderPerm := restoreFolderPermission[folderName]
if len(permissions) < 1 {
logger.Ctx(ctx).Info("no permissions found for folder :", folderName)
fmt.Println("no permissions found for folder :", folderName)
continue
}
if len(restoreFolderPerm) < 1 {
logger.Ctx(ctx).Info("permission roles are not equal for :",
"Item:", folderName,
"* Permission found: ", permissions,
"* blank permission found in restore.")
fmt.Println("permission roles are not equal for:")
fmt.Println("Item:", folderName)
fmt.Println("* Permission found: ", permissions)
fmt.Println("blank permission found in restore.")
os.Exit(1)
}
for i, orginalPerm := range permissions {
restorePerm := restoreFolderPerm[i]
if !(orginalPerm.entityID != restorePerm.entityID) &&
!slices.Equal(orginalPerm.roles, restorePerm.roles) {
logger.Ctx(ctx).Info("permission roles are not equal for :",
"Item:", folderName,
"* Original permission: ", orginalPerm.entityID,
"* Restored permission: ", restorePerm.entityID)
fmt.Println("permission roles are not equal for:")
fmt.Println("Item:", folderName)
fmt.Println("* Original permission: ", orginalPerm.entityID)
fmt.Println("* Restored permission: ", restorePerm.entityID)
os.Exit(1)
}
}
}
for fileName, fileSize := range fileSizes {
if fileSize != restoreFile[fileName] {
logger.Ctx(ctx).Info("File size does not match for:",
"Item:", fileName,
"* expected:", fileSize,
"* actual:", restoreFile[fileName])
fmt.Println("File size does not match for:")
fmt.Println("item:", fileName)
fmt.Println("* expected:", fileSize)
fmt.Println("* actual:", restoreFile[fileName])
os.Exit(1)
}
}
fmt.Println("Success") fmt.Println("Success")
} }
func checkFileData( func getOneDriveChildFolder(
ctx context.Context, ctx context.Context,
client *msgraphsdk.GraphServiceClient, client *msgraphsdk.GraphServiceClient,
driveID, driveID, itemID, parentName string,
restoreFolderID string,
fileSizes map[string]int64, fileSizes map[string]int64,
folderPermission map[string]map[string][]string, folderPermission map[string][]permissionInfo,
startTime time.Time,
) {
response, err := client.DrivesById(driveID).ItemsById(itemID).Children().Get(ctx, nil)
if err != nil {
fatal(ctx, "getting child folder", err)
}
for _, driveItem := range response.GetValue() {
var (
itemID = ptr.Val(driveItem.GetId())
itemName = ptr.Val(driveItem.GetName())
fullName = parentName + "/" + itemName
)
folderTime, hasTime := mustGetTimeFromName(ctx, itemName)
if !isWithinTimeBound(ctx, startTime, folderTime, hasTime) {
continue
}
// if it's a file check the size
if driveItem.GetFile() != nil {
fileSizes[fullName] = ptr.Val(driveItem.GetSize())
}
if driveItem.GetFolder() == nil && driveItem.GetPackage() == nil {
continue
}
// currently we don't restore blank folders.
// skip permission check for empty folders
if ptr.Val(driveItem.GetFolder().GetChildCount()) == 0 {
logger.Ctx(ctx).Info("skipped empty folder: ", fullName)
fmt.Println("skipped empty folder: ", fullName)
continue
}
permissionIn(ctx, client, driveID, itemID, fullName, folderPermission)
getOneDriveChildFolder(ctx, client, driveID, itemID, fullName, fileSizes, folderPermission, startTime)
}
}
func permissionIn(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
driveID, itemID, folderName string,
permMap map[string][]permissionInfo,
) {
permMap[folderName] = []permissionInfo{}
pcr, err := client.
DrivesById(driveID).
ItemsById(itemID).
Permissions().
Get(ctx, nil)
if err != nil {
fatal(ctx, "getting permission", err)
}
for _, perm := range pcr.GetValue() {
if perm.GetGrantedToV2() == nil {
continue
}
var (
gv2 = perm.GetGrantedToV2()
perInfo = permissionInfo{}
)
if gv2.GetUser() != nil {
perInfo.entityID = ptr.Val(gv2.GetUser().GetId())
} else if gv2.GetGroup() != nil {
perInfo.entityID = ptr.Val(gv2.GetGroup().GetId())
}
perInfo.roles = perm.GetRoles()
slices.Sort(perInfo.roles)
permMap[folderName] = append(permMap[folderName], perInfo)
}
}
func getRestoreData(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
driveID, restoreFolderID string,
restoreFile map[string]int64,
restoreFolder map[string][]permissionInfo,
startTime time.Time,
) { ) {
restored, err := client. restored, err := client.
DrivesById(driveID). DrivesById(driveID).
@ -356,14 +531,7 @@ func checkFileData(
) )
if item.GetFile() != nil { if item.GetFile() != nil {
if itemSize != fileSizes[itemName] { restoreFile[itemName] = itemSize
fmt.Println("File size does not match:")
fmt.Println("* expected:", fileSizes[itemName])
fmt.Println("* actual:", itemSize)
fmt.Println("Item:", itemName, itemID)
os.Exit(1)
}
continue continue
} }
@ -371,23 +539,8 @@ func checkFileData(
continue continue
} }
var ( permissionIn(ctx, client, driveID, itemID, itemName, restoreFolder)
expectItem = folderPermission[itemID] getOneDriveChildFolder(ctx, client, driveID, itemID, itemName, restoreFile, restoreFolder, startTime)
results = permissionsIn(ctx, client, driveID, itemID, nil)
)
for pid, result := range results {
expect := expectItem[pid]
if !slices.Equal(expect, result) {
fmt.Println("permissions are not equal")
fmt.Println("* expected: ", expect)
fmt.Println("* actual: ", result)
fmt.Println("Item:", itemName, itemID)
fmt.Println("Permission:", pid)
os.Exit(1)
}
}
} }
} }
@ -401,41 +554,6 @@ func fatal(ctx context.Context, msg string, err error) {
os.Exit(1) os.Exit(1)
} }
func permissionsIn(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
driveID, itemID string,
init map[string][]string,
) map[string][]string {
result := map[string][]string{}
pcr, err := client.
DrivesById(driveID).
ItemsById(itemID).
Permissions().
Get(ctx, nil)
if err != nil {
fatal(ctx, "getting permission", err)
}
if len(init) > 0 {
maps.Copy(result, init)
}
for _, p := range pcr.GetValue() {
var (
pid = ptr.Val(p.GetId())
roles = p.GetRoles()
)
slices.Sort(roles)
result[pid] = roles
}
return result
}
func mustGetTimeFromName(ctx context.Context, name string) (time.Time, bool) { func mustGetTimeFromName(ctx context.Context, name string) (time.Time, bool) {
t, err := common.ExtractTime(name) t, err := common.ExtractTime(name)
if err != nil && !errors.Is(err, common.ErrNoTimeString) { if err != nil && !errors.Is(err, common.ErrNoTimeString) {
@ -445,17 +563,15 @@ func mustGetTimeFromName(ctx context.Context, name string) (time.Time, bool) {
return t, !errors.Is(err, common.ErrNoTimeString) return t, !errors.Is(err, common.ErrNoTimeString)
} }
func isWithinTimeBound(ctx context.Context, bound, check time.Time, skip bool) bool { func isWithinTimeBound(ctx context.Context, bound, check time.Time, hasTime bool) bool {
if skip { if hasTime {
return true if bound.Before(check) {
} logger.Ctx(ctx).
With("boundary_time", bound, "check_time", check).
Info("skipping restore folder: not older than time bound")
if bound.Before(check) { return false
logger.Ctx(ctx). }
With("boundary_time", bound, "check_time", check).
Info("skipping restore folder: not older than time bound")
return false
} }
return true return true

View File

@ -53,6 +53,7 @@ var (
dateOnlyRE = regexp.MustCompile(`.*(\d{4}-\d{2}-\d{2}).*`) dateOnlyRE = regexp.MustCompile(`.*(\d{4}-\d{2}-\d{2}).*`)
legacyTimeRE = regexp.MustCompile( legacyTimeRE = regexp.MustCompile(
`.*(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}?([Zz]|[a-zA-Z]{2}|([\+|\-]([01]\d|2[0-3])))).*`) `.*(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}?([Zz]|[a-zA-Z]{2}|([\+|\-]([01]\d|2[0-3])))).*`)
simpleTimeTestingRE = regexp.MustCompile(`.*(\d{2}-[a-zA-Z]{3}-\d{4}_\d{2}-\d{2}-\d{2}.\d{6}).*`)
simpleDateTimeRE = regexp.MustCompile(`.*(\d{2}-[a-zA-Z]{3}-\d{4}_\d{2}:\d{2}:\d{2}).*`) simpleDateTimeRE = regexp.MustCompile(`.*(\d{2}-[a-zA-Z]{3}-\d{4}_\d{2}:\d{2}:\d{2}).*`)
simpleDateTimeOneDriveRE = regexp.MustCompile(`.*(\d{2}-[a-zA-Z]{3}-\d{4}_\d{2}-\d{2}-\d{2}).*`) simpleDateTimeOneDriveRE = regexp.MustCompile(`.*(\d{2}-[a-zA-Z]{3}-\d{4}_\d{2}-\d{2}-\d{2}).*`)
standardTimeRE = regexp.MustCompile( standardTimeRE = regexp.MustCompile(
@ -65,6 +66,7 @@ var (
// get eagerly chosen as the parsable format, slicing out some data. // get eagerly chosen as the parsable format, slicing out some data.
formats = []TimeFormat{ formats = []TimeFormat{
StandardTime, StandardTime,
SimpleTimeTesting,
SimpleDateTime, SimpleDateTime,
SimpleDateTimeOneDrive, SimpleDateTimeOneDrive,
LegacyTime, LegacyTime,
@ -75,6 +77,7 @@ var (
} }
regexes = []*regexp.Regexp{ regexes = []*regexp.Regexp{
standardTimeRE, standardTimeRE,
simpleTimeTestingRE,
simpleDateTimeRE, simpleDateTimeRE,
simpleDateTimeOneDriveRE, simpleDateTimeOneDriveRE,
legacyTimeRE, legacyTimeRE,

View File

@ -1,12 +1,15 @@
package tester package tester
import ( import (
"context"
"os" "os"
"strings" "strings"
"testing" "testing"
"github.com/alcionai/clues" "github.com/alcionai/clues"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/alcionai/corso/src/pkg/logger"
) )
// M365TenantID returns a tenantID string representing the azureTenantID described // M365TenantID returns a tenantID string representing the azureTenantID described
@ -15,7 +18,20 @@ import (
// last-attempt fallback that will only work on alcion's testing org. // last-attempt fallback that will only work on alcion's testing org.
func M365TenantID(t *testing.T) string { func M365TenantID(t *testing.T) string {
cfg, err := readTestConfig() cfg, err := readTestConfig()
require.NoError(t, err, "retrieving m365 user id from test configuration", clues.ToCore(err)) require.NoError(t, err, "retrieving m365 tenant ID from test configuration", clues.ToCore(err))
return cfg[TestCfgAzureTenantID]
}
// M365TenantID returns a tenantID string representing the azureTenantID described
// by either the env var AZURE_TENANT_ID, the corso_test.toml config
// file or the default value (in that order of priority). The default is a
// last-attempt fallback that will only work on alcion's testing org.
func GetM365TenantID(ctx context.Context) string {
cfg, err := readTestConfig()
if err != nil {
logger.Ctx(ctx).Error(err, "retrieving m365 tenant ID from test configuration")
}
return cfg[TestCfgAzureTenantID] return cfg[TestCfgAzureTenantID]
} }
@ -31,6 +47,19 @@ func M365UserID(t *testing.T) string {
return cfg[TestCfgUserID] return cfg[TestCfgUserID]
} }
// GetM365UserID returns an userID string representing the m365UserID described
// by either the env var CORSO_M365_TEST_USER_ID, the corso_test.toml config
// file or the default value (in that order of priority). The default is a
// last-attempt fallback that will only work on alcion's testing org.
func GetM365UserID(ctx context.Context) string {
cfg, err := readTestConfig()
if err != nil {
logger.Ctx(ctx).Error(err, "retrieving m365 user id from test configuration")
}
return cfg[TestCfgUserID]
}
// SecondaryM365UserID returns an userID string representing the m365UserID // SecondaryM365UserID returns an userID string representing the m365UserID
// described by either the env var CORSO_SECONDARY_M365_TEST_USER_ID, the // described by either the env var CORSO_SECONDARY_M365_TEST_USER_ID, the
// corso_test.toml config file or the default value (in that order of priority). // corso_test.toml config file or the default value (in that order of priority).