diff --git a/.github/workflows/load_test.yml b/.github/workflows/load_test.yml index 9baea5775..c75b29db2 100644 --- a/.github/workflows/load_test.yml +++ b/.github/workflows/load_test.yml @@ -137,7 +137,7 @@ jobs: strategy: matrix: user: ${{ fromJson(needs.setup.outputs.matrix).user }} - folder: [Corso_Restore_,''] + folder: [Corso_Restore_, ''] steps: - uses: actions/checkout@v3 - name: Set folder boundary datetime diff --git a/.github/workflows/sanity-test.yaml b/.github/workflows/sanity-test.yaml index 951876ab5..2bce1ca57 100644 --- a/.github/workflows/sanity-test.yaml +++ b/.github/workflows/sanity-test.yaml @@ -18,21 +18,21 @@ concurrency: group: sanity_testing-${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true - jobs: Sanity-Tests: environment: Testing runs-on: ubuntu-latest env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_ACCESS_KEY_SECRET }} AZURE_CLIENT_ID: ${{ secrets.CLIENT_ID }} AZURE_CLIENT_SECRET: ${{ secrets.CLIENT_SECRET }} AZURE_TENANT_ID: ${{ secrets.TENANT_ID }} + CORSO_BUCKET: ${{ secrets.CI_TESTS_S3_BUCKET }} + CORSO_LOG_FILE: ./src/testlog/testlogging.log CORSO_M365_TEST_USER_ID: ${{ github.event.inputs.user != '' && github.event.inputs.user || secrets.CORSO_M365_TEST_USER_ID }} CORSO_PASSPHRASE: ${{ secrets.INTEGRATION_TEST_CORSO_PASSPHRASE }} TEST_RESULT: "test_results" - CORSO_BUCKET: ${{ secrets.CI_TESTS_S3_BUCKET }} - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_ACCESS_KEY_SECRET }} defaults: run: @@ -76,8 +76,8 @@ jobs: if ! grep -q 'Initialized a S3 repository within bucket' $TEST_RESULT/initrepo.txt then - echo "repo could not be initiated" - exit 1 + echo "repo could not be initiated" + exit 1 fi echo result="$prefix" >> $GITHUB_OUTPUT @@ -93,8 +93,8 @@ jobs: if ! grep -q 'Connected to S3 bucket' $TEST_RESULT/connect.txt then - echo "repo could not be connected" - exit 1 + echo "repo could not be connected" + exit 1 fi # run the tests @@ -103,29 +103,47 @@ jobs: run: | ./corso backup create exchange \ --user "${CORSO_M365_TEST_USER_ID}" \ - --hide-progress --json 2>&1 | tee $TEST_RESULT/backup_exchange.txt + --hide-progress \ + --json \ + 2>&1 | tee $TEST_RESULT/backup_exchange.txt resultjson=$(sed -e '1,/Completed Backups/d' $TEST_RESULT/backup_exchange.txt ) if [[ $( echo $resultjson | jq -r '.[0] | .errorCount') -ne 0 ]]; then - echo "backup was not successfull" - exit 1 + echo "backup was not successful" + exit 1 fi data=$( echo $resultjson | jq -r '.[0] | .id' ) echo result=$data >> $GITHUB_OUTPUT - # list the backup exhange + # list all exchange backups - name: Backup exchange list test run: | set -euo pipefail ./corso backup list exchange \ - --hide-progress 2>&1 | tee $TEST_RESULT/backup_exchange_list.txt + --hide-progress \ + 2>&1 | tee $TEST_RESULT/backup_exchange_list.txt if ! grep -q ${{ steps.exchange-test.outputs.result }} $TEST_RESULT/backup_exchange_list.txt then - echo "listing of backup was not successfull" - exit 1 + echo "listing of backup was not successful" + exit 1 + fi + + # list the previous exchange backups + - name: Backup exchange list single backup test + run: | + set -euo pipefail + ./corso backup list exchange \ + --hide-progress \ + --backup "${{ steps.exchange-test.outputs.result }}" \ + 2>&1 | tee $TEST_RESULT/backup_exchange_list_single.txt + + if ! grep -q ${{ steps.exchange-test.outputs.result }} $TEST_RESULT/backup_exchange_list.txt + then + echo "listing of backup was not successful" + exit 1 fi # test exchange restore @@ -135,31 +153,34 @@ jobs: set -euo pipefail ./corso restore exchange \ --hide-progress \ - --backup "${{ steps.exchange-test.outputs.result }}" 2>&1 | tee $TEST_RESULT/exchange-restore-test.txt + --backup "${{ steps.exchange-test.outputs.result }}" \ + 2>&1 | tee $TEST_RESULT/exchange-restore-test.txt echo result=$(grep -i -e 'Restoring to folder ' $TEST_RESULT/exchange-restore-test.txt | sed "s/Restoring to folder//" ) >> $GITHUB_OUTPUT - name: Restoration check env: - RESTORE_FOLDER: ${{ steps.exchange-restore-test.outputs.result }} - RESTORE_SERVICE: "exchange" + SANITY_RESTORE_FOLDER: ${{ steps.exchange-restore-test.outputs.result }} + SANITY_RESTORE_SERVICE: "exchange" run: | set -euo pipefail ./sanityCheck - # test incremental backup exhange + # test incremental backup exchange - name: Backup exchange incremental id: exchange-incremental-test run: | set -euo pipefail ./corso backup create exchange \ + --hide-progress \ --user "${CORSO_M365_TEST_USER_ID}" \ - --hide-progress --json 2>&1 | tee $TEST_RESULT/backup_exchange_incremental.txt + --json \ + 2>&1 | tee $TEST_RESULT/backup_exchange_incremental.txt resultjson=$(sed -e '1,/Completed Backups/d' $TEST_RESULT/backup_exchange_incremental.txt ) if [[ $( echo $resultjson | jq -r '.[0] | .errorCount') -ne 0 ]]; then - echo "backup was not successfull" - exit 1 + echo "backup was not successful" + exit 1 fi echo result=$( echo $resultjson | jq -r '.[0] | .id' ) >> $GITHUB_OUTPUT @@ -171,13 +192,14 @@ jobs: set -euo pipefail ./corso restore exchange \ --hide-progress \ - --backup "${{ steps.exchange-incremental-test.outputs.result }}" 2>&1 | tee $TEST_RESULT/exchange-incremantal-restore-test.txt + --backup "${{ steps.exchange-incremental-test.outputs.result }}" \ + 2>&1 | tee $TEST_RESULT/exchange-incremantal-restore-test.txt echo result=$(grep -i -e 'Restoring to folder ' $TEST_RESULT/exchange-incremantal-restore-test.txt | sed "s/Restoring to folder//" ) >> $GITHUB_OUTPUT - name: Restoration check env: - RESTORE_FOLDER: ${{ steps.exchange-incremantal-restore-test.outputs.result }} - RESTORE_SERVICE: "exchange" + SANITY_RESTORE_FOLDER: ${{ steps.exchange-incremantal-restore-test.outputs.result }} + SANITY_RESTORE_SERVICE: "exchange" run: | set -euo pipefail ./sanityCheck @@ -191,30 +213,48 @@ jobs: run: | set -euo pipefail ./corso backup create onedrive \ + --hide-progress \ --user "${CORSO_M365_TEST_USER_ID}" \ - --hide-progress --json 2>&1 | tee $TEST_RESULT/backup_onedrive.txt + --json \ + 2>&1 | tee $TEST_RESULT/backup_onedrive.txt resultjson=$(sed -e '1,/Completed Backups/d' $TEST_RESULT/backup_onedrive.txt ) if [[ $( echo $resultjson | jq -r '.[0] | .errorCount') -ne 0 ]]; then - echo "backup was not successfull" - exit 1 + echo "backup was not successful" + exit 1 fi data=$( echo $resultjson | jq -r '.[0] | .id' ) echo result=$data >> $GITHUB_OUTPUT - # list the bakcup onedrive + # list all onedrive backups - name: Backup onedrive list test run: | set -euo pipefail ./corso backup list onedrive \ - --hide-progress 2>&1 | tee $TEST_RESULT/backup_onedrive_list.txt + --hide-progress \ + 2>&1 | tee $TEST_RESULT/backup_onedrive_list.txt if ! grep -q ${{ steps.onedrive-test.outputs.result }} $TEST_RESULT/backup_onedrive_list.txt then - echo "listing of backup was not successfull" - exit 1 + echo "listing of backup was not successful" + exit 1 + fi + + # list the previous onedrive backup + - name: Backup onedrive list test + run: | + set -euo pipefail + ./corso backup list onedrive \ + --hide-progress \ + --backup "${{ steps.onedrive-test.outputs.result }}" \ + 2>&1 | tee $TEST_RESULT/backup_onedrive_list_single.txt + + if ! grep -q ${{ steps.onedrive-test.outputs.result }} $TEST_RESULT/backup_onedrive_list.txt + then + echo "listing of backup was not successful" + exit 1 fi # test onedrive restore @@ -222,12 +262,16 @@ jobs: id: onedrive-restore-test run: | set -euo pipefail - ./corso restore onedrive --backup "${{ steps.onedrive-test.outputs.result }}" --hide-progress 2>&1 | tee $TEST_RESULT/onedrive-restore-test.txt + ./corso restore onedrive \ + --hide-progress \ + --backup "${{ steps.onedrive-test.outputs.result }}" \ + 2>&1 | tee $TEST_RESULT/onedrive-restore-test.txt echo result=$(grep -i -e 'Restoring to folder ' $TEST_RESULT/onedrive-restore-test.txt | sed "s/Restoring to folder//") >> $GITHUB_OUTPUT - name: Restoration oneDrive check env: - RESTORE_FOLDER: ${{ steps.onedrive-restore-test.outputs.result }} + SANITY_RESTORE_FOLDER: ${{ steps.onedrive-restore-test.outputs.result }} + SANITY_RESTORE_SERVICE: "onedrive" run: | set -euo pipefail ./sanityCheck @@ -238,14 +282,16 @@ jobs: run: | set -euo pipefail ./corso backup create onedrive \ + --hide-progress \ --user "${CORSO_M365_TEST_USER_ID}"\ - --hide-progress --json 2>&1 | tee $TEST_RESULT/backup_onedrive_incremental.txt + --json \ + 2>&1 | tee $TEST_RESULT/backup_onedrive_incremental.txt resultjson=$(sed -e '1,/Completed Backups/d' $TEST_RESULT/backup_onedrive_incremental.txt ) if [[ $( echo $resultjson | jq -r '.[0] | .errorCount') -ne 0 ]]; then - echo "backup was not successfull" - exit 1 + echo "backup was not successful" + exit 1 fi data=$( echo $resultjson | jq -r '.[0] | .id' ) @@ -256,13 +302,27 @@ jobs: id: onedrive-incremental-restore-test run: | set -euo pipefail - ./corso restore onedrive --backup "${{ steps.onedrive-incremental-test.outputs.result }}" --hide-progress 2>&1 | tee $TEST_RESULT/onedrive-incremental-restore-test.txt + ./corso restore onedrive \ + --hide-progress \ + --backup "${{ steps.onedrive-incremental-test.outputs.result }}" \ + 2>&1 | tee $TEST_RESULT/onedrive-incremental-restore-test.txt echo result=$(grep -i -e 'Restoring to folder ' $TEST_RESULT/onedrive-incremental-restore-test.txt | sed "s/Restoring to folder//") >> $GITHUB_OUTPUT - name: Restoration oneDrive check env: - RESTORE_FOLDER: ${{ steps.onedrive-incremental-restore-test.outputs.result }} + SANITY_RESTORE_FOLDER: ${{ steps.onedrive-incremental-restore-test.outputs.result }} + SANITY_RESTORE_SERVICE: "onedrive" run: | set -euo pipefail ./sanityCheck + + # Upload the original go test output as an artifact for later review. + - name: Upload test log + if: failure() + uses: actions/upload-artifact@v3 + with: + name: test-log + path: src/testlog/* + if-no-files-found: error + retention-days: 14 \ No newline at end of file diff --git a/src/cmd/sanity_test/sanity_tests.go b/src/cmd/sanity_test/sanity_tests.go index 65fbb9307..82f0f1002 100644 --- a/src/cmd/sanity_test/sanity_tests.go +++ b/src/cmd/sanity_test/sanity_tests.go @@ -2,99 +2,112 @@ package main import ( "context" + "errors" "fmt" "os" - "reflect" + "path" "strings" "time" - "github.com/alcionai/corso/src/internal/common/ptr" - "github.com/alcionai/corso/src/internal/connector/graph" + "github.com/alcionai/clues" msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go" "github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/microsoftgraph/msgraph-sdk-go/users" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" + + "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/internal/connector/graph" + "github.com/alcionai/corso/src/pkg/logger" ) func main() { + ctx, log := logger.Seed(context.Background(), "info", logger.GetLogFile("")) + defer func() { + _ = log.Sync() // flush all logs in the buffer + }() + adapter, err := graph.CreateAdapter( os.Getenv("AZURE_TENANT_ID"), os.Getenv("AZURE_CLIENT_ID"), os.Getenv("AZURE_CLIENT_SECRET")) if err != nil { - fmt.Println("error while creating adapter: ", err) - os.Exit(1) - - return + fatal(ctx, "creating adapter", err) } - testUser := os.Getenv("CORSO_M365_TEST_USER_ID") - folder := strings.TrimSpace(os.Getenv("RESTORE_FOLDER")) + var ( + client = msgraphsdk.NewGraphServiceClient(adapter) + testUser = os.Getenv("CORSO_M365_TEST_USER_ID") + testService = os.Getenv("SANITY_RESTORE_SERVICE") + folder = strings.TrimSpace(os.Getenv("SANITY_RESTORE_FOLDER")) + startTime, _ = mustGetTimeFromName(ctx, folder) + ) - restoreStartTime := strings.SplitAfter(folder, "Corso_Restore_")[1] - startTime, _ := time.Parse(time.RFC822, restoreStartTime) + ctx = clues.Add( + ctx, + "resource_owner", testUser, + "service", testService, + "sanity_restore_folder", folder, + "start_time", startTime.Format(time.RFC3339Nano)) - fmt.Println("Restore folder: ", folder) + logger.Ctx(ctx).Info("starting sanity test check") - client := msgraphsdk.NewGraphServiceClient(adapter) - - switch service := os.Getenv("RESTORE_SERVICE"); service { + switch testService { case "exchange": - checkEmailRestoration(client, testUser, folder, startTime) + checkEmailRestoration(ctx, client, testUser, folder, startTime) + case "onedrive": + checkOnedriveRestoration(ctx, client, testUser, folder, startTime) default: - checkOnedriveRestoration(client, testUser, folder, startTime) + fatal(ctx, "no service specified", nil) } } // checkEmailRestoration verifies that the emails count in restored folder is equivalent to // emails in actual m365 account func checkEmailRestoration( + ctx context.Context, client *msgraphsdk.GraphServiceClient, - testUser, - folderName string, + testUser, folderName string, startTime time.Time, ) { var ( - messageCount = make(map[string]int32) + itemCount = make(map[string]int32) restoreFolder models.MailFolderable + builder = client.UsersById(testUser).MailFolders() ) - user := client.UsersById(testUser) - builder := user.MailFolders() - for { - result, err := builder.Get(context.Background(), nil) + result, err := builder.Get(ctx, nil) if err != nil { - fmt.Printf("Error getting the drive: %v\n", err) - os.Exit(1) + fatal(ctx, "getting mail folders", err) } - res := result.GetValue() + values := result.GetValue() - for _, r := range res { - name, ok := ptr.ValOK(r.GetDisplayName()) - if !ok { + // recursive restore folder discovery before proceeding with tests + for _, v := range values { + var ( + itemID = ptr.Val(v.GetId()) + itemName = ptr.Val(v.GetDisplayName()) + ictx = clues.Add(ctx, "item_id", itemID, "item_name", itemName) + folderTime, hasTime = mustGetTimeFromName(ctx, itemName) + ) + + if !isWithinTimeBound(ictx, startTime, folderTime, hasTime) { continue } - var rStartTime time.Time - - restoreStartTime := strings.SplitAfter(name, "Corso_Restore_") - if len(restoreStartTime) > 1 { - rStartTime, _ = time.Parse(time.RFC822, restoreStartTime[1]) - if startTime.Before(rStartTime) { - fmt.Printf("The restore folder %s was created after %s. Will skip check.", name, folderName) - continue - } - } - - if name == folderName { - restoreFolder = r + // if we found the folder to testt against, back out of this loop. + if itemName == folderName { + restoreFolder = v continue } - getAllSubFolder(client, testUser, r, name, messageCount) + // otherwise, recursively aggregate all child folders. + getAllSubFolder(ctx, client, testUser, v, itemName, itemCount) - messageCount[name], _ = ptr.ValOK(r.GetTotalItemCount()) + itemCount[itemName] = ptr.Val(v.GetTotalItemCount()) } link, ok := ptr.ValOK(result.GetOdataNextLink()) @@ -105,86 +118,93 @@ func checkEmailRestoration( builder = users.NewItemMailFoldersRequestBuilder(link, client.GetAdapter()) } - folderID, ok := ptr.ValOK(restoreFolder.GetId()) - if !ok { - fmt.Printf("can't find ID of restore folder") - os.Exit(1) - } + folderID := ptr.Val(restoreFolder.GetId()) + folderName = ptr.Val(restoreFolder.GetDisplayName()) + ctx = clues.Add( + ctx, + "restore_folder_id", folderID, + "restore_folder_name", folderName) - folder := user.MailFoldersById(folderID) - - childFolder, err := folder.ChildFolders().Get(context.Background(), nil) + childFolder, err := client. + UsersById(testUser). + MailFoldersById(folderID). + ChildFolders(). + Get(ctx, nil) if err != nil { - fmt.Printf("Error getting the drive: %v\n", err) - os.Exit(1) + fatal(ctx, "getting restore folder child folders", err) } - for _, restore := range childFolder.GetValue() { - restoreDisplayName, ok := ptr.ValOK(restore.GetDisplayName()) - if !ok { - continue - } + for _, fld := range childFolder.GetValue() { + var ( + fldID = ptr.Val(fld.GetId()) + fldName = ptr.Val(fld.GetDisplayName()) + count = ptr.Val(fld.GetTotalItemCount()) + ictx = clues.Add( + ctx, + "child_folder_id", fldID, + "child_folder_name", fldName, + "expected_count", itemCount[fldName], + "actual_count", count) + ) - restoreItemCount, _ := ptr.ValOK(restore.GetTotalItemCount()) - - if messageCount[restoreDisplayName] != restoreItemCount { - fmt.Println("Restore was not succesfull for: ", restoreDisplayName, - "Folder count: ", messageCount[restoreDisplayName], - "Restore count: ", restoreItemCount) + if itemCount[fldName] != count { + logger.Ctx(ictx).Error("test failure: Restore item counts do not match") + fmt.Println("Restore item counts do not match:") + fmt.Println("* expected:", itemCount[fldName]) + fmt.Println("* actual:", count) + fmt.Println("Folder:", fldName, ptr.Val(fld.GetId())) os.Exit(1) } - checkAllSubFolder(client, testUser, restore, restoreDisplayName, messageCount) + checkAllSubFolder(ctx, client, testUser, fld, fldName, itemCount) } } // getAllSubFolder will recursively check for all subfolders and get the corresponding // email count. func getAllSubFolder( + ctx context.Context, client *msgraphsdk.GraphServiceClient, testUser string, r models.MailFolderable, parentFolder string, messageCount map[string]int32, ) { - folderID, ok := ptr.ValOK(r.GetId()) - - if !ok { - fmt.Println("unable to get sub folder ID") - return - } - - user := client.UsersById(testUser) - folder := user.MailFoldersById(folderID) - - var count int32 = 99 - - childFolder, err := folder.ChildFolders().Get( - context.Background(), - &users.ItemMailFoldersItemChildFoldersRequestBuilderGetRequestConfiguration{ + var ( + folderID = ptr.Val(r.GetId()) + count int32 = 99 + options = &users.ItemMailFoldersItemChildFoldersRequestBuilderGetRequestConfiguration{ QueryParameters: &users.ItemMailFoldersItemChildFoldersRequestBuilderGetQueryParameters{ Top: &count, }, - }) + } + ) + + ctx = clues.Add(ctx, "parent_folder_id", folderID) + + childFolder, err := client. + UsersById(testUser). + MailFoldersById(folderID). + ChildFolders(). + Get(ctx, options) if err != nil { - fmt.Printf("Error getting the drive: %v\n", err) - os.Exit(1) + fatal(ctx, "getting mail subfolders", err) } for _, child := range childFolder.GetValue() { - childDisplayName, _ := ptr.ValOK(child.GetDisplayName()) - - fullFolderName := parentFolder + "/" + childDisplayName + var ( + childDisplayName = ptr.Val(child.GetDisplayName()) + childFolderCount = ptr.Val(child.GetChildFolderCount()) + fullFolderName = parentFolder + "/" + childDisplayName + ) messageCount[fullFolderName], _ = ptr.ValOK(child.GetTotalItemCount()) - childFolderCount, _ := ptr.ValOK(child.GetChildFolderCount()) - // recursively check for subfolders if childFolderCount > 0 { parentFolder := fullFolderName - getAllSubFolder(client, testUser, child, parentFolder, messageCount) + getAllSubFolder(ctx, client, testUser, child, parentFolder, messageCount) } } } @@ -192,173 +212,251 @@ func getAllSubFolder( // checkAllSubFolder will recursively traverse inside the restore folder and // verify that data matched in all subfolders func checkAllSubFolder( + ctx context.Context, client *msgraphsdk.GraphServiceClient, testUser string, r models.MailFolderable, parentFolder string, messageCount map[string]int32, ) { - folderID, ok := ptr.ValOK(r.GetId()) - - if !ok { - fmt.Println("unable to get sub folder ID") - return - } - - user := client.UsersById(testUser) - folder := user.MailFoldersById(folderID) - - var count int32 = 99 - - childFolder, err := folder.ChildFolders().Get( - context.Background(), - &users.ItemMailFoldersItemChildFoldersRequestBuilderGetRequestConfiguration{ + var ( + folderID = ptr.Val(r.GetId()) + count int32 = 99 + options = &users.ItemMailFoldersItemChildFoldersRequestBuilderGetRequestConfiguration{ QueryParameters: &users.ItemMailFoldersItemChildFoldersRequestBuilderGetQueryParameters{ Top: &count, }, - }) + } + ) + + childFolder, err := client. + UsersById(testUser). + MailFoldersById(folderID). + ChildFolders(). + Get(ctx, options) if err != nil { - fmt.Printf("Error getting the drive: %v\n", err) - os.Exit(1) + fatal(ctx, "getting mail subfolders", err) } for _, child := range childFolder.GetValue() { - childDisplayName, _ := ptr.ValOK(child.GetDisplayName()) - - fullFolderName := parentFolder + "/" + childDisplayName - - childTotalCount, _ := ptr.ValOK(child.GetTotalItemCount()) + var ( + childDisplayName = ptr.Val(child.GetDisplayName()) + childTotalCount = ptr.Val(child.GetTotalItemCount()) + //nolint:forbidigo + fullFolderName = path.Join(parentFolder, childDisplayName) + ) if messageCount[fullFolderName] != childTotalCount { - fmt.Println("Restore was not succesfull for: ", fullFolderName, - "Folder count: ", messageCount[fullFolderName], - "Restore count: ", childTotalCount) + fmt.Println("Message count doesn't match:") + fmt.Println("* expected:", messageCount[fullFolderName]) + fmt.Println("* actual:", childTotalCount) + fmt.Println("Item:", fullFolderName, folderID) os.Exit(1) } - childFolderCount, _ := ptr.ValOK(child.GetChildFolderCount()) + childFolderCount := ptr.Val(child.GetChildFolderCount()) if childFolderCount > 0 { - parentFolder := fullFolderName - - checkAllSubFolder(client, testUser, child, parentFolder, messageCount) + checkAllSubFolder(ctx, client, testUser, child, fullFolderName, messageCount) } } } -func checkOnedriveRestoration(client *msgraphsdk.GraphServiceClient, testUser, folderName string, startTime time.Time) { - file := make(map[string]int64) - folderPermission := make(map[string][]string) - restoreFolderID := "" +func checkOnedriveRestoration( + ctx context.Context, + client *msgraphsdk.GraphServiceClient, + testUser, folderName string, + startTime time.Time, +) { + var ( + // map itemID -> item size + fileSizes = make(map[string]int64) + // map itemID -> permission id -> []permission roles + folderPermission = make(map[string]map[string][]string) + ) - drive, err := client.UsersById(testUser).Drive().Get(context.Background(), nil) + drive, err := client. + UsersById(testUser). + Drive(). + Get(ctx, nil) if err != nil { - fmt.Printf("Error getting the drive: %v\n", err) - os.Exit(1) + fatal(ctx, "getting the drive:", err) } - response, err := client.DrivesById(*drive.GetId()).Root().Children().Get(context.Background(), nil) + var ( + driveID = ptr.Val(drive.GetId()) + driveName = ptr.Val(drive.GetName()) + restoreFolderID string + ) + + ctx = clues.Add(ctx, "drive_id", driveID, "drive_name", driveName) + + response, err := client. + DrivesById(driveID). + Root(). + Children(). + Get(ctx, nil) if err != nil { - fmt.Printf("Error getting drive by id: %v\n", err) - os.Exit(1) + fatal(ctx, "getting drive by id", err) } for _, driveItem := range response.GetValue() { - if *driveItem.GetName() == folderName { - restoreFolderID = *driveItem.GetId() + var ( + itemID = ptr.Val(driveItem.GetId()) + itemName = ptr.Val(driveItem.GetName()) + ictx = clues.Add(ctx, "item_id", itemID, "item_name", itemName) + ) + + if itemName == folderName { + restoreFolderID = itemID continue } - var rStartTime time.Time + folderTime, hasTime := mustGetTimeFromName(ictx, itemName) - restoreStartTime := strings.SplitAfter(*driveItem.GetName(), "Corso_Restore_") - if len(restoreStartTime) > 1 { - rStartTime, _ = time.Parse(time.RFC822, restoreStartTime[1]) - if startTime.Before(rStartTime) { - fmt.Printf("The restore folder %s was created after %s. Will skip check.", *driveItem.GetName(), folderName) - continue - } + if !isWithinTimeBound(ctx, startTime, folderTime, hasTime) { + continue } // if it's a file check the size if driveItem.GetFile() != nil { - file[*driveItem.GetName()] = *driveItem.GetSize() + fileSizes[itemName] = ptr.Val(driveItem.GetSize()) } - if driveItem.GetFolder() != nil { - permission, err := client. - DrivesById(*drive.GetId()). - ItemsById(*driveItem.GetId()). - Permissions(). - Get(context.TODO(), nil) - if err != nil { - fmt.Printf("Error getting item by id: %v\n", err) - os.Exit(1) - } - - // check if permission are correct on folder - for _, permission := range permission.GetValue() { - folderPermission[*driveItem.GetName()] = permission.GetRoles() - } - - continue - } + folderPermission[itemID] = permissionsIn(ctx, client, driveID, itemID, folderPermission[itemID]) } - checkFileData(client, *drive.GetId(), restoreFolderID, file, folderPermission) + checkFileData(ctx, client, driveID, restoreFolderID, fileSizes, folderPermission) fmt.Println("Success") } func checkFileData( + ctx context.Context, client *msgraphsdk.GraphServiceClient, driveID, restoreFolderID string, - file map[string]int64, - folderPermission map[string][]string, + fileSizes map[string]int64, + folderPermission map[string]map[string][]string, ) { - itemBuilder := client.DrivesById(driveID).ItemsById(restoreFolderID) - - restoreResponses, err := itemBuilder.Children().Get(context.Background(), nil) + restored, err := client. + DrivesById(driveID). + ItemsById(restoreFolderID). + Children(). + Get(ctx, nil) if err != nil { - fmt.Printf("Error getting child folder: %v\n", err) - os.Exit(1) + fatal(ctx, "getting child folder", err) } - for _, restoreData := range restoreResponses.GetValue() { - restoreName := *restoreData.GetName() + for _, item := range restored.GetValue() { + var ( + itemID = ptr.Val(item.GetId()) + itemName = ptr.Val(item.GetName()) + itemSize = ptr.Val(item.GetSize()) + ) - if restoreData.GetFile() != nil { - if *restoreData.GetSize() != file[restoreName] { - fmt.Printf("Size of file %s is different in drive %d and restored file: %d ", - restoreName, - file[restoreName], - *restoreData.GetSize()) + if item.GetFile() != nil { + if itemSize != fileSizes[itemName] { + fmt.Println("File size does not match:") + fmt.Println("* expected:", fileSizes[itemName]) + fmt.Println("* actual:", itemSize) + fmt.Println("Item:", itemName, itemID) os.Exit(1) } continue } - itemBuilder := client.DrivesById(driveID).ItemsById(*restoreData.GetId()) + if item.GetFolder() == nil && item.GetPackage() == nil { + continue + } - if restoreData.GetFolder() != nil { - permissionColl, err := itemBuilder.Permissions().Get(context.TODO(), nil) - if err != nil { - fmt.Printf("Error getting permission: %v\n", err) - os.Exit(1) - } + var ( + expectItem = folderPermission[itemID] + results = permissionsIn(ctx, client, driveID, itemID, nil) + ) - userPermission := []string{} + for pid, result := range results { + expect := expectItem[pid] - for _, perm := range permissionColl.GetValue() { - userPermission = perm.GetRoles() - } - - if !reflect.DeepEqual(folderPermission[restoreName], userPermission) { - fmt.Printf("Permission mismatch for %s.", restoreName) + if !slices.Equal(expect, result) { + fmt.Println("permissions are not equal") + fmt.Println("* expected: ", expect) + fmt.Println("* actual: ", result) + fmt.Println("Item:", itemName, itemID) + fmt.Println("Permission:", pid) os.Exit(1) } } } } + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +func fatal(ctx context.Context, msg string, err error) { + logger.CtxErr(ctx, err).Error("test failure: " + msg) + fmt.Println(msg+": ", err) + os.Exit(1) +} + +func permissionsIn( + ctx context.Context, + client *msgraphsdk.GraphServiceClient, + driveID, itemID string, + init map[string][]string, +) map[string][]string { + result := map[string][]string{} + + pcr, err := client. + DrivesById(driveID). + ItemsById(itemID). + Permissions(). + Get(ctx, nil) + if err != nil { + fatal(ctx, "getting permission", err) + } + + if len(init) > 0 { + maps.Copy(result, init) + } + + for _, p := range pcr.GetValue() { + var ( + pid = ptr.Val(p.GetId()) + roles = p.GetRoles() + ) + + slices.Sort(roles) + + result[pid] = roles + } + + return result +} + +func mustGetTimeFromName(ctx context.Context, name string) (time.Time, bool) { + t, err := common.ExtractTime(name) + if err != nil && !errors.Is(err, common.ErrNoTimeString) { + fatal(ctx, "extracting time from name: "+name, err) + } + + return t, !errors.Is(err, common.ErrNoTimeString) +} + +func isWithinTimeBound(ctx context.Context, bound, check time.Time, skip bool) bool { + if skip { + return true + } + + if bound.Before(check) { + logger.Ctx(ctx). + With("boundary_time", bound, "check_time", check). + Info("skipping restore folder: not older than time bound") + + return false + } + + return true +} diff --git a/src/pkg/logger/example_logger_test.go b/src/pkg/logger/example_logger_test.go index 7b9ef9146..6241f4b99 100644 --- a/src/pkg/logger/example_logger_test.go +++ b/src/pkg/logger/example_logger_test.go @@ -14,7 +14,7 @@ import ( const ( loglevel = "info" - logfile = "stderr" + logfile = logger.Stderr itemID = "item_id" ) diff --git a/src/pkg/logger/logger.go b/src/pkg/logger/logger.go index 93ee86b13..181d5e070 100644 --- a/src/pkg/logger/logger.go +++ b/src/pkg/logger/logger.go @@ -49,6 +49,11 @@ const ( readableLogsFN = "readable-logs" ) +const ( + Stderr = "stderr" + Stdout = "stdout" +) + // Returns the default location for writing logs func defaultLogLocation() string { return filepath.Join(userLogsDir, "corso", "logs", time.Now().UTC().Format("2006-01-02T15-04-05Z")+".log") @@ -104,31 +109,40 @@ func PreloadLoggingFlags() (string, string) { // retrieve the user's preferred log file location // automatically defaults to default log location - logfile, err := fs.GetString(logFileFN) + lffv, err := fs.GetString(logFileFN) if err != nil { return "info", dlf } + logfile := GetLogFile(lffv) + + return levelString, logfile +} + +// GetLogFile parses the log file. Uses the provided value, if populated, +// then falls back to the env var, and then defaults to stderr. +func GetLogFile(logFileFlagVal string) string { + r := logFileFlagVal + // if not specified, attempt to fall back to env declaration. - if len(logfile) == 0 { - logfile = os.Getenv("CORSO_LOG_FILE") + if len(r) == 0 { + r = os.Getenv("CORSO_LOG_FILE") } - if logfile == "-" { - logfile = "stdout" + if r == "-" { + r = Stdout } - if logfile != "stdout" && logfile != "stderr" { - LogFile = logfile - logdir := filepath.Dir(logfile) + if r != Stdout && r != Stderr { + logdir := filepath.Dir(r) err := os.MkdirAll(logdir, 0o755) if err != nil { - return "info", "stderr" + return Stderr } } - return levelString, logfile + return r } func genLogger(level logLevel, logfile string) (*zapcore.Core, *zap.SugaredLogger) { @@ -183,7 +197,7 @@ func genLogger(level logLevel, logfile string) (*zapcore.Core, *zap.SugaredLogge opts = append(opts, zap.WithCaller(false)) cfg.EncoderConfig.EncodeTime = zapcore.TimeEncoderOfLayout("15:04:05.00") - if logfile == "stderr" || logfile == "stdout" { + if logfile == Stderr || logfile == Stdout { cfg.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder } }