From 4087b69a39e605d84297d9dc5a9e66987fb1f49d Mon Sep 17 00:00:00 2001 From: neha_gupta Date: Mon, 3 Apr 2023 16:13:48 +0530 Subject: [PATCH] Enhancement Sanity Test: verify permissions and defined data (#2938) onedrive - - verify permissions - grantedToV2 and roles - recursively check subfolder's data and permission Exchange - - since the data in email is huge, create custom data and check if that data is getting restored properly #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [x] :robot: Supportability/Tests - [x] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Issue(s) * # #### Test Plan - [x] :muscle: Manual - [ ] :zap: Unit test - [ ] :green_heart: E2E --- .github/workflows/sanity-test.yaml | 34 ++- src/cmd/sanity_test/sanity_tests.go | 404 ++++++++++++++++--------- src/internal/common/time.go | 3 + src/internal/tester/resource_owners.go | 31 +- 4 files changed, 323 insertions(+), 149 deletions(-) diff --git a/.github/workflows/sanity-test.yaml b/.github/workflows/sanity-test.yaml index 2bce1ca57..57f988fe3 100644 --- a/.github/workflows/sanity-test.yaml +++ b/.github/workflows/sanity-test.yaml @@ -97,15 +97,32 @@ jobs: exit 1 fi + # generate new entries to roll into the next load test + # only runs if the test was successful + - name: New Data Creation + working-directory: ./src/cmd/factory + env: + AZURE_CLIENT_ID: ${{ secrets.CLIENT_ID }} + AZURE_CLIENT_SECRET: ${{ secrets.CLIENT_SECRET }} + AZURE_TENANT_ID: ${{ secrets.TENANT_ID }} + CORSO_M365_LOAD_TEST_USER_ID: ${{ secrets.CORSO_M365_LOAD_TEST_USER_ID }} + run: | + go run . exchange emails \ + --user ${{ env.CORSO_M365_TEST_USER_ID }} \ + --tenant ${{ env.AZURE_TENANT_ID }} \ + --destination Corso_Restore_st_${{ steps.repo-init.outputs.result }} \ + --count 4 + # run the tests - name: Backup exchange test id: exchange-test run: | ./corso backup create exchange \ --user "${CORSO_M365_TEST_USER_ID}" \ - --hide-progress \ - --json \ - 2>&1 | tee $TEST_RESULT/backup_exchange.txt + --hide-progress \ + --data 'email' \ + --json \ + 2>&1 | tee $TEST_RESULT/backup_exchange.txt resultjson=$(sed -e '1,/Completed Backups/d' $TEST_RESULT/backup_exchange.txt ) @@ -152,6 +169,7 @@ jobs: run: | set -euo pipefail ./corso restore exchange \ + --email-folder Corso_Restore_st_${{ steps.repo-init.outputs.result }} \ --hide-progress \ --backup "${{ steps.exchange-test.outputs.result }}" \ 2>&1 | tee $TEST_RESULT/exchange-restore-test.txt @@ -161,6 +179,7 @@ jobs: env: SANITY_RESTORE_FOLDER: ${{ steps.exchange-restore-test.outputs.result }} SANITY_RESTORE_SERVICE: "exchange" + TEST_DATA: Corso_Restore_st_${{ steps.repo-init.outputs.result }} run: | set -euo pipefail ./sanityCheck @@ -193,6 +212,7 @@ jobs: ./corso restore exchange \ --hide-progress \ --backup "${{ steps.exchange-incremental-test.outputs.result }}" \ + --email-folder Corso_Restore_st_${{ steps.repo-init.outputs.result }} \ 2>&1 | tee $TEST_RESULT/exchange-incremantal-restore-test.txt echo result=$(grep -i -e 'Restoring to folder ' $TEST_RESULT/exchange-incremantal-restore-test.txt | sed "s/Restoring to folder//" ) >> $GITHUB_OUTPUT @@ -200,6 +220,8 @@ jobs: env: SANITY_RESTORE_FOLDER: ${{ steps.exchange-incremantal-restore-test.outputs.result }} SANITY_RESTORE_SERVICE: "exchange" + TEST_DATA: Corso_Restore_st_${{ steps.repo-init.outputs.result }} + BASE_BACKUP: ${{ steps.exchange-restore-test.outputs.result }} run: | set -euo pipefail ./sanityCheck @@ -215,6 +237,7 @@ jobs: ./corso backup create onedrive \ --hide-progress \ --user "${CORSO_M365_TEST_USER_ID}" \ + --enable-permissions-backup \ --json \ 2>&1 | tee $TEST_RESULT/backup_onedrive.txt @@ -263,6 +286,7 @@ jobs: run: | set -euo pipefail ./corso restore onedrive \ + --restore-permissions \ --hide-progress \ --backup "${{ steps.onedrive-test.outputs.result }}" \ 2>&1 | tee $TEST_RESULT/onedrive-restore-test.txt @@ -283,7 +307,8 @@ jobs: set -euo pipefail ./corso backup create onedrive \ --hide-progress \ - --user "${CORSO_M365_TEST_USER_ID}"\ + --user "${CORSO_M365_TEST_USER_ID}" \ + --enable-permissions-backup \ --json \ 2>&1 | tee $TEST_RESULT/backup_onedrive_incremental.txt @@ -303,6 +328,7 @@ jobs: run: | set -euo pipefail ./corso restore onedrive \ + --restore-permissions \ --hide-progress \ --backup "${{ steps.onedrive-incremental-test.outputs.result }}" \ 2>&1 | tee $TEST_RESULT/onedrive-incremental-restore-test.txt diff --git a/src/cmd/sanity_test/sanity_tests.go b/src/cmd/sanity_test/sanity_tests.go index 82f0f1002..40c4acbfc 100644 --- a/src/cmd/sanity_test/sanity_tests.go +++ b/src/cmd/sanity_test/sanity_tests.go @@ -13,15 +13,21 @@ import ( msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go" "github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/microsoftgraph/msgraph-sdk-go/users" - "golang.org/x/exp/maps" "golang.org/x/exp/slices" "github.com/alcionai/corso/src/internal/common" "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/connector/graph" + "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/pkg/filters" "github.com/alcionai/corso/src/pkg/logger" ) +type permissionInfo struct { + entityID string + roles []string +} + func main() { ctx, log := logger.Seed(context.Background(), "info", logger.GetLogFile("")) defer func() { @@ -29,7 +35,7 @@ func main() { }() adapter, err := graph.CreateAdapter( - os.Getenv("AZURE_TENANT_ID"), + tester.GetM365TenantID(ctx), os.Getenv("AZURE_CLIENT_ID"), os.Getenv("AZURE_CLIENT_SECRET")) if err != nil { @@ -37,11 +43,13 @@ func main() { } var ( - client = msgraphsdk.NewGraphServiceClient(adapter) - testUser = os.Getenv("CORSO_M365_TEST_USER_ID") - testService = os.Getenv("SANITY_RESTORE_SERVICE") - folder = strings.TrimSpace(os.Getenv("SANITY_RESTORE_FOLDER")) - startTime, _ = mustGetTimeFromName(ctx, folder) + client = msgraphsdk.NewGraphServiceClient(adapter) + testUser = tester.GetM365UserID(ctx) + testService = os.Getenv("SANITY_RESTORE_SERVICE") + folder = strings.TrimSpace(os.Getenv("SANITY_RESTORE_FOLDER")) + startTime, _ = mustGetTimeFromName(ctx, folder) + dataFolder = os.Getenv("TEST_DATA") + baseBackupFolder = os.Getenv("BASE_BACKUP") ) ctx = clues.Add( @@ -55,7 +63,7 @@ func main() { switch testService { case "exchange": - checkEmailRestoration(ctx, client, testUser, folder, startTime) + checkEmailRestoration(ctx, client, testUser, folder, dataFolder, baseBackupFolder, startTime) case "onedrive": checkOnedriveRestoration(ctx, client, testUser, folder, startTime) default: @@ -68,13 +76,14 @@ func main() { func checkEmailRestoration( ctx context.Context, client *msgraphsdk.GraphServiceClient, - testUser, folderName string, + testUser, folderName, dataFolder, baseBackupFolder string, startTime time.Time, ) { var ( - itemCount = make(map[string]int32) - restoreFolder models.MailFolderable - builder = client.UsersById(testUser).MailFolders() + restoreFolder models.MailFolderable + itemCount = make(map[string]int32) + restoreItemCount = make(map[string]int32) + builder = client.UsersById(testUser).MailFolders() ) for { @@ -85,29 +94,20 @@ func checkEmailRestoration( values := result.GetValue() - // recursive restore folder discovery before proceeding with tests for _, v := range values { - var ( - itemID = ptr.Val(v.GetId()) - itemName = ptr.Val(v.GetDisplayName()) - ictx = clues.Add(ctx, "item_id", itemID, "item_name", itemName) - folderTime, hasTime = mustGetTimeFromName(ctx, itemName) - ) + itemName := ptr.Val(v.GetDisplayName()) - if !isWithinTimeBound(ictx, startTime, folderTime, hasTime) { - continue - } - - // if we found the folder to testt against, back out of this loop. if itemName == folderName { restoreFolder = v continue } - // otherwise, recursively aggregate all child folders. - getAllSubFolder(ctx, client, testUser, v, itemName, itemCount) + if itemName == dataFolder || itemName == baseBackupFolder { + // otherwise, recursively aggregate all child folders. + getAllSubFolder(ctx, client, testUser, v, itemName, dataFolder, itemCount) - itemCount[itemName] = ptr.Val(v.GetTotalItemCount()) + itemCount[itemName] = ptr.Val(v.GetTotalItemCount()) + } } link, ok := ptr.ValOK(result.GetOdataNextLink()) @@ -135,28 +135,36 @@ func checkEmailRestoration( } for _, fld := range childFolder.GetValue() { - var ( - fldID = ptr.Val(fld.GetId()) - fldName = ptr.Val(fld.GetDisplayName()) - count = ptr.Val(fld.GetTotalItemCount()) - ictx = clues.Add( - ctx, - "child_folder_id", fldID, - "child_folder_name", fldName, - "expected_count", itemCount[fldName], - "actual_count", count) - ) + restoreDisplayName := ptr.Val(fld.GetDisplayName()) + + // check if folder is the data folder we loaded or the base backup to verify + // the incremental backup worked fine + if strings.EqualFold(restoreDisplayName, dataFolder) || strings.EqualFold(restoreDisplayName, baseBackupFolder) { + count, _ := ptr.ValOK(fld.GetTotalItemCount()) + + restoreItemCount[restoreDisplayName] = count + checkAllSubFolder(ctx, client, fld, testUser, restoreDisplayName, dataFolder, restoreItemCount) + } + } + + verifyEmailData(ctx, restoreItemCount, itemCount) +} + +func verifyEmailData(ctx context.Context, restoreMessageCount, messageCount map[string]int32) { + for fldName, emailCount := range messageCount { + if restoreMessageCount[fldName] != emailCount { + logger.Ctx(ctx).Errorw( + "test failure: Restore item counts do not match", + "expected:", emailCount, + "actual:", restoreMessageCount[fldName]) + + fmt.Println( + "test failure: Restore item counts do not match", + "* expected:", emailCount, + "* actual:", restoreMessageCount[fldName]) - if itemCount[fldName] != count { - logger.Ctx(ictx).Error("test failure: Restore item counts do not match") - fmt.Println("Restore item counts do not match:") - fmt.Println("* expected:", itemCount[fldName]) - fmt.Println("* actual:", count) - fmt.Println("Folder:", fldName, ptr.Val(fld.GetId())) os.Exit(1) } - - checkAllSubFolder(ctx, client, testUser, fld, fldName, itemCount) } } @@ -167,7 +175,8 @@ func getAllSubFolder( client *msgraphsdk.GraphServiceClient, testUser string, r models.MailFolderable, - parentFolder string, + parentFolder, + dataFolder string, messageCount map[string]int32, ) { var ( @@ -195,16 +204,18 @@ func getAllSubFolder( var ( childDisplayName = ptr.Val(child.GetDisplayName()) childFolderCount = ptr.Val(child.GetChildFolderCount()) - fullFolderName = parentFolder + "/" + childDisplayName + //nolint:forbidigo + fullFolderName = path.Join(parentFolder, childDisplayName) ) - messageCount[fullFolderName], _ = ptr.ValOK(child.GetTotalItemCount()) + if filters.PathContains([]string{dataFolder}).Compare(fullFolderName) { + messageCount[fullFolderName] = ptr.Val(child.GetTotalItemCount()) + // recursively check for subfolders + if childFolderCount > 0 { + parentFolder := fullFolderName - // recursively check for subfolders - if childFolderCount > 0 { - parentFolder := fullFolderName - - getAllSubFolder(ctx, client, testUser, child, parentFolder, messageCount) + getAllSubFolder(ctx, client, testUser, child, parentFolder, dataFolder, messageCount) + } } } } @@ -214,10 +225,11 @@ func getAllSubFolder( func checkAllSubFolder( ctx context.Context, client *msgraphsdk.GraphServiceClient, - testUser string, r models.MailFolderable, - parentFolder string, - messageCount map[string]int32, + testUser, + parentFolder, + dataFolder string, + restoreMessageCount map[string]int32, ) { var ( folderID = ptr.Val(r.GetId()) @@ -241,23 +253,20 @@ func checkAllSubFolder( for _, child := range childFolder.GetValue() { var ( childDisplayName = ptr.Val(child.GetDisplayName()) - childTotalCount = ptr.Val(child.GetTotalItemCount()) //nolint:forbidigo fullFolderName = path.Join(parentFolder, childDisplayName) ) - if messageCount[fullFolderName] != childTotalCount { - fmt.Println("Message count doesn't match:") - fmt.Println("* expected:", messageCount[fullFolderName]) - fmt.Println("* actual:", childTotalCount) - fmt.Println("Item:", fullFolderName, folderID) - os.Exit(1) + if filters.PathContains([]string{dataFolder}).Compare(fullFolderName) { + childTotalCount, _ := ptr.ValOK(child.GetTotalItemCount()) + restoreMessageCount[fullFolderName] = childTotalCount } childFolderCount := ptr.Val(child.GetChildFolderCount()) if childFolderCount > 0 { - checkAllSubFolder(ctx, client, testUser, child, fullFolderName, messageCount) + parentFolder := fullFolderName + checkAllSubFolder(ctx, client, child, testUser, parentFolder, dataFolder, restoreMessageCount) } } } @@ -265,14 +274,17 @@ func checkAllSubFolder( func checkOnedriveRestoration( ctx context.Context, client *msgraphsdk.GraphServiceClient, - testUser, folderName string, + testUser, + folderName string, startTime time.Time, ) { var ( // map itemID -> item size fileSizes = make(map[string]int64) // map itemID -> permission id -> []permission roles - folderPermission = make(map[string]map[string][]string) + folderPermission = make(map[string][]permissionInfo) + restoreFile = make(map[string]int64) + restoreFolderPermission = make(map[string][]permissionInfo) ) drive, err := client. @@ -313,7 +325,6 @@ func checkOnedriveRestoration( } folderTime, hasTime := mustGetTimeFromName(ictx, itemName) - if !isWithinTimeBound(ctx, startTime, folderTime, hasTime) { continue } @@ -323,21 +334,185 @@ func checkOnedriveRestoration( fileSizes[itemName] = ptr.Val(driveItem.GetSize()) } - folderPermission[itemID] = permissionsIn(ctx, client, driveID, itemID, folderPermission[itemID]) + if driveItem.GetFolder() == nil && driveItem.GetPackage() == nil { + continue + } + + // currently we don't restore blank folders. + // skip permission check for empty folders + if ptr.Val(driveItem.GetFolder().GetChildCount()) == 0 { + logger.Ctx(ctx).Info("skipped empty folder: ", itemName) + fmt.Println("skipped empty folder: ", itemName) + + continue + } + + permissionIn(ctx, client, driveID, itemID, itemName, folderPermission) + getOneDriveChildFolder(ctx, client, driveID, itemID, itemName, fileSizes, folderPermission, startTime) } - checkFileData(ctx, client, driveID, restoreFolderID, fileSizes, folderPermission) + getRestoreData(ctx, client, *drive.GetId(), restoreFolderID, restoreFile, restoreFolderPermission, startTime) + + for folderName, permissions := range folderPermission { + logger.Ctx(ctx).Info("checking for folder: %s \n", folderName) + fmt.Printf("checking for folder: %s \n", folderName) + + restoreFolderPerm := restoreFolderPermission[folderName] + + if len(permissions) < 1 { + logger.Ctx(ctx).Info("no permissions found for folder :", folderName) + fmt.Println("no permissions found for folder :", folderName) + + continue + } + + if len(restoreFolderPerm) < 1 { + logger.Ctx(ctx).Info("permission roles are not equal for :", + "Item:", folderName, + "* Permission found: ", permissions, + "* blank permission found in restore.") + + fmt.Println("permission roles are not equal for:") + fmt.Println("Item:", folderName) + fmt.Println("* Permission found: ", permissions) + fmt.Println("blank permission found in restore.") + + os.Exit(1) + } + + for i, orginalPerm := range permissions { + restorePerm := restoreFolderPerm[i] + + if !(orginalPerm.entityID != restorePerm.entityID) && + !slices.Equal(orginalPerm.roles, restorePerm.roles) { + logger.Ctx(ctx).Info("permission roles are not equal for :", + "Item:", folderName, + "* Original permission: ", orginalPerm.entityID, + "* Restored permission: ", restorePerm.entityID) + + fmt.Println("permission roles are not equal for:") + fmt.Println("Item:", folderName) + fmt.Println("* Original permission: ", orginalPerm.entityID) + fmt.Println("* Restored permission: ", restorePerm.entityID) + os.Exit(1) + } + } + } + + for fileName, fileSize := range fileSizes { + if fileSize != restoreFile[fileName] { + logger.Ctx(ctx).Info("File size does not match for:", + "Item:", fileName, + "* expected:", fileSize, + "* actual:", restoreFile[fileName]) + + fmt.Println("File size does not match for:") + fmt.Println("item:", fileName) + fmt.Println("* expected:", fileSize) + fmt.Println("* actual:", restoreFile[fileName]) + os.Exit(1) + } + } fmt.Println("Success") } -func checkFileData( +func getOneDriveChildFolder( ctx context.Context, client *msgraphsdk.GraphServiceClient, - driveID, - restoreFolderID string, + driveID, itemID, parentName string, fileSizes map[string]int64, - folderPermission map[string]map[string][]string, + folderPermission map[string][]permissionInfo, + startTime time.Time, +) { + response, err := client.DrivesById(driveID).ItemsById(itemID).Children().Get(ctx, nil) + if err != nil { + fatal(ctx, "getting child folder", err) + } + + for _, driveItem := range response.GetValue() { + var ( + itemID = ptr.Val(driveItem.GetId()) + itemName = ptr.Val(driveItem.GetName()) + fullName = parentName + "/" + itemName + ) + + folderTime, hasTime := mustGetTimeFromName(ctx, itemName) + if !isWithinTimeBound(ctx, startTime, folderTime, hasTime) { + continue + } + + // if it's a file check the size + if driveItem.GetFile() != nil { + fileSizes[fullName] = ptr.Val(driveItem.GetSize()) + } + + if driveItem.GetFolder() == nil && driveItem.GetPackage() == nil { + continue + } + + // currently we don't restore blank folders. + // skip permission check for empty folders + if ptr.Val(driveItem.GetFolder().GetChildCount()) == 0 { + logger.Ctx(ctx).Info("skipped empty folder: ", fullName) + fmt.Println("skipped empty folder: ", fullName) + + continue + } + + permissionIn(ctx, client, driveID, itemID, fullName, folderPermission) + getOneDriveChildFolder(ctx, client, driveID, itemID, fullName, fileSizes, folderPermission, startTime) + } +} + +func permissionIn( + ctx context.Context, + client *msgraphsdk.GraphServiceClient, + driveID, itemID, folderName string, + permMap map[string][]permissionInfo, +) { + permMap[folderName] = []permissionInfo{} + + pcr, err := client. + DrivesById(driveID). + ItemsById(itemID). + Permissions(). + Get(ctx, nil) + if err != nil { + fatal(ctx, "getting permission", err) + } + + for _, perm := range pcr.GetValue() { + if perm.GetGrantedToV2() == nil { + continue + } + + var ( + gv2 = perm.GetGrantedToV2() + perInfo = permissionInfo{} + ) + + if gv2.GetUser() != nil { + perInfo.entityID = ptr.Val(gv2.GetUser().GetId()) + } else if gv2.GetGroup() != nil { + perInfo.entityID = ptr.Val(gv2.GetGroup().GetId()) + } + + perInfo.roles = perm.GetRoles() + + slices.Sort(perInfo.roles) + + permMap[folderName] = append(permMap[folderName], perInfo) + } +} + +func getRestoreData( + ctx context.Context, + client *msgraphsdk.GraphServiceClient, + driveID, restoreFolderID string, + restoreFile map[string]int64, + restoreFolder map[string][]permissionInfo, + startTime time.Time, ) { restored, err := client. DrivesById(driveID). @@ -356,14 +531,7 @@ func checkFileData( ) if item.GetFile() != nil { - if itemSize != fileSizes[itemName] { - fmt.Println("File size does not match:") - fmt.Println("* expected:", fileSizes[itemName]) - fmt.Println("* actual:", itemSize) - fmt.Println("Item:", itemName, itemID) - os.Exit(1) - } - + restoreFile[itemName] = itemSize continue } @@ -371,23 +539,8 @@ func checkFileData( continue } - var ( - expectItem = folderPermission[itemID] - results = permissionsIn(ctx, client, driveID, itemID, nil) - ) - - for pid, result := range results { - expect := expectItem[pid] - - if !slices.Equal(expect, result) { - fmt.Println("permissions are not equal") - fmt.Println("* expected: ", expect) - fmt.Println("* actual: ", result) - fmt.Println("Item:", itemName, itemID) - fmt.Println("Permission:", pid) - os.Exit(1) - } - } + permissionIn(ctx, client, driveID, itemID, itemName, restoreFolder) + getOneDriveChildFolder(ctx, client, driveID, itemID, itemName, restoreFile, restoreFolder, startTime) } } @@ -401,41 +554,6 @@ func fatal(ctx context.Context, msg string, err error) { os.Exit(1) } -func permissionsIn( - ctx context.Context, - client *msgraphsdk.GraphServiceClient, - driveID, itemID string, - init map[string][]string, -) map[string][]string { - result := map[string][]string{} - - pcr, err := client. - DrivesById(driveID). - ItemsById(itemID). - Permissions(). - Get(ctx, nil) - if err != nil { - fatal(ctx, "getting permission", err) - } - - if len(init) > 0 { - maps.Copy(result, init) - } - - for _, p := range pcr.GetValue() { - var ( - pid = ptr.Val(p.GetId()) - roles = p.GetRoles() - ) - - slices.Sort(roles) - - result[pid] = roles - } - - return result -} - func mustGetTimeFromName(ctx context.Context, name string) (time.Time, bool) { t, err := common.ExtractTime(name) if err != nil && !errors.Is(err, common.ErrNoTimeString) { @@ -445,17 +563,15 @@ func mustGetTimeFromName(ctx context.Context, name string) (time.Time, bool) { return t, !errors.Is(err, common.ErrNoTimeString) } -func isWithinTimeBound(ctx context.Context, bound, check time.Time, skip bool) bool { - if skip { - return true - } +func isWithinTimeBound(ctx context.Context, bound, check time.Time, hasTime bool) bool { + if hasTime { + if bound.Before(check) { + logger.Ctx(ctx). + With("boundary_time", bound, "check_time", check). + Info("skipping restore folder: not older than time bound") - if bound.Before(check) { - logger.Ctx(ctx). - With("boundary_time", bound, "check_time", check). - Info("skipping restore folder: not older than time bound") - - return false + return false + } } return true diff --git a/src/internal/common/time.go b/src/internal/common/time.go index 9a39a2a02..23db15b77 100644 --- a/src/internal/common/time.go +++ b/src/internal/common/time.go @@ -53,6 +53,7 @@ var ( dateOnlyRE = regexp.MustCompile(`.*(\d{4}-\d{2}-\d{2}).*`) legacyTimeRE = regexp.MustCompile( `.*(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}?([Zz]|[a-zA-Z]{2}|([\+|\-]([01]\d|2[0-3])))).*`) + simpleTimeTestingRE = regexp.MustCompile(`.*(\d{2}-[a-zA-Z]{3}-\d{4}_\d{2}-\d{2}-\d{2}.\d{6}).*`) simpleDateTimeRE = regexp.MustCompile(`.*(\d{2}-[a-zA-Z]{3}-\d{4}_\d{2}:\d{2}:\d{2}).*`) simpleDateTimeOneDriveRE = regexp.MustCompile(`.*(\d{2}-[a-zA-Z]{3}-\d{4}_\d{2}-\d{2}-\d{2}).*`) standardTimeRE = regexp.MustCompile( @@ -65,6 +66,7 @@ var ( // get eagerly chosen as the parsable format, slicing out some data. formats = []TimeFormat{ StandardTime, + SimpleTimeTesting, SimpleDateTime, SimpleDateTimeOneDrive, LegacyTime, @@ -75,6 +77,7 @@ var ( } regexes = []*regexp.Regexp{ standardTimeRE, + simpleTimeTestingRE, simpleDateTimeRE, simpleDateTimeOneDriveRE, legacyTimeRE, diff --git a/src/internal/tester/resource_owners.go b/src/internal/tester/resource_owners.go index c36386b96..b5a1625a0 100644 --- a/src/internal/tester/resource_owners.go +++ b/src/internal/tester/resource_owners.go @@ -1,12 +1,15 @@ package tester import ( + "context" "os" "strings" "testing" "github.com/alcionai/clues" "github.com/stretchr/testify/require" + + "github.com/alcionai/corso/src/pkg/logger" ) // M365TenantID returns a tenantID string representing the azureTenantID described @@ -15,7 +18,20 @@ import ( // last-attempt fallback that will only work on alcion's testing org. func M365TenantID(t *testing.T) string { cfg, err := readTestConfig() - require.NoError(t, err, "retrieving m365 user id from test configuration", clues.ToCore(err)) + require.NoError(t, err, "retrieving m365 tenant ID from test configuration", clues.ToCore(err)) + + return cfg[TestCfgAzureTenantID] +} + +// M365TenantID returns a tenantID string representing the azureTenantID described +// by either the env var AZURE_TENANT_ID, the corso_test.toml config +// file or the default value (in that order of priority). The default is a +// last-attempt fallback that will only work on alcion's testing org. +func GetM365TenantID(ctx context.Context) string { + cfg, err := readTestConfig() + if err != nil { + logger.Ctx(ctx).Error(err, "retrieving m365 tenant ID from test configuration") + } return cfg[TestCfgAzureTenantID] } @@ -31,6 +47,19 @@ func M365UserID(t *testing.T) string { return cfg[TestCfgUserID] } +// GetM365UserID returns an userID string representing the m365UserID described +// by either the env var CORSO_M365_TEST_USER_ID, the corso_test.toml config +// file or the default value (in that order of priority). The default is a +// last-attempt fallback that will only work on alcion's testing org. +func GetM365UserID(ctx context.Context) string { + cfg, err := readTestConfig() + if err != nil { + logger.Ctx(ctx).Error(err, "retrieving m365 user id from test configuration") + } + + return cfg[TestCfgUserID] +} + // SecondaryM365UserID returns an userID string representing the m365UserID // described by either the env var CORSO_SECONDARY_M365_TEST_USER_ID, the // corso_test.toml config file or the default value (in that order of priority).