diff --git a/.github/actions/backup-restore-test/action.yml b/.github/actions/backup-restore-test/action.yml new file mode 100644 index 000000000..83bee4c72 --- /dev/null +++ b/.github/actions/backup-restore-test/action.yml @@ -0,0 +1,113 @@ +name: Backup Restore Test + +inputs: + service: + description: Service to test + required: true + kind: + description: Kind of test + required: true + backup-args: + description: Arguments to pass for backup + required: false + default: "" + restore-args: + description: Arguments to pass for restore + required: false + default: "" + test-folder: + description: Folder to use for testing + required: true + base-backup: + description: Base backup to use for testing + required: false + +outputs: + backup-id: + value: ${{ steps.backup.outputs.result }} + +runs: + using: composite + steps: + - name: Backup ${{ inputs.service }} ${{ inputs.kind }} + id: backup + shell: bash + working-directory: src + run: | + set -euo pipefail + ./corso backup create '${{ inputs.service }}' \ + --no-stats --hide-progress --json \ + ${{ inputs.backup-args }} | + tee /dev/stderr | # for printing logs + jq -r '.[0] | .id' | + sed 's/^/result=/' | + tee $GITHUB_OUTPUT + + - name: Restore ${{ inputs.service }} ${{ inputs.kind }} + id: restore + shell: bash + working-directory: src + run: | + set -euo pipefail + ./corso restore '${{ inputs.service }}' \ + --no-stats --hide-progress \ + ${{ inputs.restore-args }} \ + --backup '${{ steps.backup.outputs.result }}' 2>&1 | + tee /tmp/corsologs | + grep -i -e 'Restoring to folder ' | + sed "s/Restoring to folder /result=/" | + tee $GITHUB_OUTPUT + + cat /tmp/corsologs + + - name: Check ${{ inputs.service }} ${{ inputs.kind }} + shell: bash + working-directory: src + env: + SANITY_RESTORE_FOLDER: ${{ steps.restore.outputs.result }} + SANITY_RESTORE_SERVICE: ${{ inputs.service }} + TEST_DATA: ${{ inputs.test-folder }} + BASE_BACKUP: ${{ inputs.base-backup }} + run: | + ./sanity-test + + - name: List ${{ inputs.service }} ${{ inputs.kind }} + shell: bash + working-directory: src + run: | + set -euo pipefail + ./corso backup list ${{ inputs.service }} \ + --no-stats --hide-progress 2>&1 | + tee /tmp/corso-backup-list.log + + if ! grep -q ${{ steps.backup.outputs.result }} /tmp/corso-backup-list.log + then + echo "Unable to find backup from previous run in backup list" + exit 1 + fi + + - name: List item ${{ inputs.service }} ${{ inputs.kind }} + shell: bash + working-directory: src + run: | + set -euo pipefail + ./corso backup list ${{ inputs.service }} \ + --no-stats --hide-progress \ + --backup "${{ steps.backup.outputs.result }}" 2>&1 | + tee /tmp/corso-backup-list-item.log + + if ! grep -q ${{ steps.backup.outputs.result }} /tmp/corso-backup-list-item.log + then + echo "Unable to list previous backup" + exit 1 + fi + + # Upload the original go test output as an artifact for later review. + - name: Upload test log + if: always() + uses: actions/upload-artifact@v3 + with: + name: "${{ inputs.service }}-${{ inputs.kind }}-logs" + path: ${{ env.WORKING_DIR }}/${{ env.CORSO_LOG_DIR }}/ + if-no-files-found: error + retention-days: 14 \ No newline at end of file diff --git a/.github/actions/publish-binary/action.yml b/.github/actions/publish-binary/action.yml new file mode 100644 index 000000000..2a8215592 --- /dev/null +++ b/.github/actions/publish-binary/action.yml @@ -0,0 +1,75 @@ +name: Publish Binary + +inputs: + version: + description: Corso version to use for publishing + required: true + github_token: + description: GitHub token for publishing + required: true + rudderstack_write_key: + description: Write key for RudderStack + required: true + rudderstack_data_plane_url: + description: Data plane URL for RudderStack + required: true + +runs: + using: composite + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 # needed to pull changelog + + - name: Setup Golang with cache + uses: magnetikonline/action-golang-cache@v4 + with: + go-version-file: src/go.mod + + - name: Mark snapshot release + shell: bash + if: ${{ !startsWith(github.ref , 'refs/tags/') }} + run: | + echo "grflags=--snapshot" >> $GITHUB_ENV + + - name: Run GoReleaser + uses: goreleaser/goreleaser-action@v4 + with: + version: latest + args: release --rm-dist --timeout 500m --parallelism 1 ${{ env.grflags }} + workdir: src + env: + GITHUB_TOKEN: ${{ inputs.github_token }} + RUDDERSTACK_CORSO_WRITE_KEY: ${{ inputs.rudderstack_write_key }} + RUDDERSTACK_CORSO_DATA_PLANE_URL: ${{ inputs.rudderstack_data_plane_url }} + CORSO_VERSION: ${{ inputs.version }} + + - name: Upload darwin arm64 + uses: actions/upload-artifact@v3 + with: + name: corso_Darwin_arm64 + path: src/dist/corso_darwin_arm64/corso + + - name: Upload linux arm64 + uses: actions/upload-artifact@v3 + with: + name: corso_Linux_arm64 + path: src/dist/corso_linux_arm64/corso + + - name: Upload darwin amd64 + uses: actions/upload-artifact@v3 + with: + name: corso_Darwin_amd64 + path: src/dist/corso_darwin_amd64_v1/corso + + - name: Upload linux amd64 + uses: actions/upload-artifact@v3 + with: + name: corso_Linux_amd64 + path: src/dist/corso_linux_amd64_v1/corso + + - name: Upload windows amd64 + uses: actions/upload-artifact@v3 + with: + name: corso_Windows_amd64 + path: src/dist/corso_windows_amd64_v1/corso.exe diff --git a/.github/workflows/_filechange_checker.yml b/.github/workflows/_filechange_checker.yml index 8d02d1437..92201d961 100644 --- a/.github/workflows/_filechange_checker.yml +++ b/.github/workflows/_filechange_checker.yml @@ -9,6 +9,9 @@ on: websitefileschanged: description: "'true' if websites/** or .github/workflows/** files have changed in the branch" value: ${{ jobs.file-change-check.outputs.websitefileschanged }} + actionsfileschanged: + description: "'true' if .github/actions/** or .github/workflows/** files have changed in the branch" + value: ${{ jobs.file-change-check.outputs.actionsfileschanged }} jobs: file-change-check: @@ -19,6 +22,7 @@ jobs: outputs: srcfileschanged: ${{ steps.srcchecker.outputs.srcfileschanged }} websitefileschanged: ${{ steps.websitechecker.outputs.websitefileschanged }} + actionsfileschanged: ${{ steps.actionschecker.outputs.actionsfileschanged }} steps: - uses: actions/checkout@v3 @@ -49,4 +53,11 @@ jobs: if: steps.dornycheck.outputs.src == 'true' || steps.dornycheck.outputs.website == 'true' || steps.dornycheck.outputs.actions == 'true' run: | echo "website or workflow file changes occurred" - echo websitefileschanged=true >> $GITHUB_OUTPUT \ No newline at end of file + echo websitefileschanged=true >> $GITHUB_OUTPUT + + - name: Check dorny for changes in actions filepaths + id: actionschecker + if: steps.dornycheck.outputs.actions == 'true' + run: | + echo "actions file changes occurred" + echo actionsfileschanged=true >> $GITHUB_OUTPUT diff --git a/.github/workflows/accSelector.yaml b/.github/workflows/accSelector.yaml new file mode 100644 index 000000000..6030c9002 --- /dev/null +++ b/.github/workflows/accSelector.yaml @@ -0,0 +1,46 @@ +name: SetM365AppAcc + +on: + workflow_call: + outputs: + client_app_slot: + value: ${{ jobs.GetM365App.outputs.client_app_slot }} + client_id_env: + value: ${{ jobs.GetM365App.outputs.client_id_env }} + client_secret_env: + value: ${{ jobs.GetM365App.outputs.client_secret_env }} + +jobs: + GetM365App: + environment: Testing + runs-on: ubuntu-latest + outputs: + client_app_slot: ${{ steps.roundrobin.outputs.CLIENT_APP_SLOT }} + client_id_env: ${{ steps.roundrobin.outputs.CLIENT_ID_ENV }} + client_secret_env: ${{ steps.roundrobin.outputs.CLIENT_SECRET_ENV }} + steps: + - name: Figure out which client id to use + id: roundrobin + run: | + slot=$((GITHUB_RUN_NUMBER % 4)) + echo "CLIENT_APP_SLOT=$slot" >> $GITHUB_OUTPUT + + case $slot in + + 0) + echo "CLIENT_ID_ENV=CLIENT_ID" >> $GITHUB_OUTPUT + echo "CLIENT_SECRET_ENV=CLIENT_SECRET" >> $GITHUB_OUTPUT + ;; + 1) + echo "CLIENT_ID_ENV=CLIENT_ID_2" >> $GITHUB_OUTPUT + echo "CLIENT_SECRET_ENV=CLIENT_SECRET_2" >> $GITHUB_OUTPUT + ;; + 2) + echo "CLIENT_ID_ENV=CLIENT_ID_3" >> $GITHUB_OUTPUT + echo "CLIENT_SECRET_ENV=CLIENT_SECRET_3" >> $GITHUB_OUTPUT + ;; + 3) + echo "CLIENT_ID_ENV=CLIENT_ID_4" >> $GITHUB_OUTPUT + echo "CLIENT_SECRET_ENV=CLIENT_SECRET_4" >> $GITHUB_OUTPUT + ;; + esac diff --git a/.github/workflows/binary-publish.yml b/.github/workflows/binary-publish.yml new file mode 100644 index 000000000..f2ba78438 --- /dev/null +++ b/.github/workflows/binary-publish.yml @@ -0,0 +1,37 @@ +name: Publish binary +on: + workflow_dispatch: + +jobs: + SetEnv: + environment: Testing + runs-on: ubuntu-latest + outputs: + version: ${{ steps.version.outputs.version }} + steps: + - uses: actions/checkout@v3 + + - name: Get version string + id: version + run: | + if ${{ startsWith(github.ref, 'refs/tags/') }}; then + echo "version=$(git describe --exact-match --tags $(git rev-parse HEAD))" | tee -a $GITHUB_OUTPUT + else + echo "version=$(echo unreleased-$(git rev-parse --short HEAD))" | tee -a $GITHUB_OUTPUT + fi + + Publish-Binary: + needs: [SetEnv] + environment: Testing + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + + - name: Publish Binary + uses: ./.github/actions/publish-binary + with: + version: ${{ needs.SetEnv.outputs.version }} + github_token: ${{ secrets.GITHUB_TOKEN }} + rudderstack_write_key: ${{ secrets.RUDDERSTACK_CORSO_WRITE_KEY }} + rudderstack_data_plane_url: ${{ secrets.RUDDERSTACK_CORSO_DATA_PLANE_URL }} \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a402b946d..e9edc7e33 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -52,38 +52,7 @@ jobs: # SetM365App will decide which M365 app to use for this CI run SetM365App: - environment: Testing - runs-on: ubuntu-latest - outputs: - client_app_slot: ${{ steps.roundrobin.outputs.CLIENT_APP_SLOT }} - client_id_env: ${{ steps.roundrobin.outputs.CLIENT_ID_ENV }} - client_secret_env: ${{ steps.roundrobin.outputs.CLIENT_SECRET_ENV }} - steps: - - name: Figure out which client id to use - id: roundrobin - run: | - slot=$((GITHUB_RUN_NUMBER % 4)) - echo "CLIENT_APP_SLOT=$slot" >> $GITHUB_OUTPUT - - case $slot in - - 0) - echo "CLIENT_ID_ENV=CLIENT_ID" >> $GITHUB_OUTPUT - echo "CLIENT_SECRET_ENV=CLIENT_SECRET" >> $GITHUB_OUTPUT - ;; - 1) - echo "CLIENT_ID_ENV=CLIENT_ID_2" >> $GITHUB_OUTPUT - echo "CLIENT_SECRET_ENV=CLIENT_SECRET_2" >> $GITHUB_OUTPUT - ;; - 2) - echo "CLIENT_ID_ENV=CLIENT_ID_3" >> $GITHUB_OUTPUT - echo "CLIENT_SECRET_ENV=CLIENT_SECRET_3" >> $GITHUB_OUTPUT - ;; - 3) - echo "CLIENT_ID_ENV=CLIENT_ID_4" >> $GITHUB_OUTPUT - echo "CLIENT_SECRET_ENV=CLIENT_SECRET_4" >> $GITHUB_OUTPUT - ;; - esac + uses: alcionai/corso/.github/workflows/accSelector.yaml@main SetEnv: environment: Testing @@ -168,6 +137,8 @@ jobs: AZURE_CLIENT_ID_NAME: ${{ needs.SetM365App.outputs.client_id_env }} AZURE_CLIENT_SECRET_NAME: ${{ needs.SetM365App.outputs.client_secret_env }} CLIENT_APP_SLOT: ${{ needs.SetM365App.outputs.client_app_slot }} + CORSO_LOG_FILE: ./src/testlog/suite-testlogging.log + LOG_GRAPH_REQUESTS: true steps: - uses: actions/checkout@v3 @@ -197,11 +168,9 @@ jobs: AZURE_CLIENT_SECRET: ${{ secrets[env.AZURE_CLIENT_SECRET_NAME] }} AZURE_TENANT_ID: ${{ secrets.TENANT_ID }} CORSO_CI_TESTS: true - CORSO_M365_TEST_USER_ID: ${{ secrets.CORSO_M365_TEST_USER_ID }} - CORSO_SECONDARY_M365_TEST_USER_ID: ${{ secrets.CORSO_SECONDARY_M365_TEST_USER_ID }} + CORSO_M365_TEST_USER_ID: ${{ vars.CORSO_M365_TEST_USER_ID }} + CORSO_SECONDARY_M365_TEST_USER_ID: ${{ vars.CORSO_SECONDARY_M365_TEST_USER_ID }} CORSO_PASSPHRASE: ${{ secrets.INTEGRATION_TEST_CORSO_PASSPHRASE }} - CORSO_LOG_FILE: ./src/testlog/testlogging.log - LOG_GRAPH_REQUESTS: true run: | set -euo pipefail go test \ @@ -211,14 +180,15 @@ jobs: -failfast \ -p 1 \ -timeout 15m \ - ./... 2>&1 | tee ./testlog/gotest.log | gotestfmt -hide successful-tests + ./... \ + 2>&1 | tee ./testlog/gotest.log | gotestfmt -hide successful-tests # Upload the original go test output as an artifact for later review. - name: Upload test log if: failure() uses: actions/upload-artifact@v3 with: - name: test-log + name: ci-test-log path: src/testlog/* if-no-files-found: error retention-days: 14 @@ -231,6 +201,9 @@ jobs: defaults: run: working-directory: src + env: + CORSO_LOG_FILE: ./src/testlog/unit-testlogging.log + LOG_GRAPH_REQUESTS: true steps: - uses: actions/checkout@v3 @@ -252,8 +225,6 @@ jobs: # something elsewhere. CORSO_M365_TEST_USER_ID: 'foo' CORSO_SECONDARY_M365_TEST_USER_ID: 'foo' - CORSO_LOG_FILE: ./src/testlog/testlogging.log - LOG_GRAPH_REQUESTS: true run: | set -euo pipefail go test \ @@ -263,7 +234,8 @@ jobs: -failfast \ -p 1 \ -timeout 15m \ - ./... 2>&1 | tee ./testlog/gotest-unit.log | gotestfmt -hide successful-tests + ./... \ + 2>&1 | tee ./testlog/gotest-unit.log | gotestfmt -hide successful-tests # Upload the original go test output as an artifact for later review. - name: Upload test log @@ -283,6 +255,9 @@ jobs: defaults: run: working-directory: src + env: + CORSO_LOG_FILE: ./src/testlog/fork-testlogging.log + LOG_GRAPH_REQUESTS: true steps: - name: Fail check if not repository_dispatch if: github.event_name != 'repository_dispatch' @@ -340,23 +315,23 @@ jobs: AZURE_CLIENT_SECRET: ${{ secrets.CLIENT_SECRET }} AZURE_TENANT_ID: ${{ secrets.TENANT_ID }} CORSO_CI_TESTS: true - CORSO_M365_TEST_USER_ID: ${{ secrets.CORSO_M365_TEST_USER_ID }} + CORSO_M365_TEST_USER_ID: ${{ vars.CORSO_M365_TEST_USER_ID }} CORSO_PASSPHRASE: ${{ secrets.INTEGRATION_TEST_CORSO_PASSPHRASE }} - CORSO_LOG_FILE: ./src/testlog/testlogging.log run: | set -euo pipefail go test \ -json \ -v \ -timeout 15m \ - ./... 2>&1 | tee ./testlog/gotest.log | gotestfmt -hide successful-tests + ./... \ + 2>&1 | tee ./testlog/gotest.log | gotestfmt -hide successful-tests # Upload the original go test log as an artifact for later review. - name: Upload test log if: failure() uses: actions/upload-artifact@v3 with: - name: test-log + name: fork-test-log path: src/testlog/* if-no-files-found: error retention-days: 14 @@ -364,7 +339,7 @@ jobs: # Update check run called "Test-Suite-Fork" - uses: actions/github-script@v6 id: update-check-run - if: ${{ always() }} + if: failure() env: number: ${{ github.event.client_payload.pull_request.number }} job: ${{ github.job }} @@ -395,7 +370,7 @@ jobs: # --- Source Code Linting ---------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------- - Linting: + Source-Code-Linting: needs: [Precheck, Checkout] environment: Testing runs-on: ubuntu-latest @@ -416,7 +391,7 @@ jobs: with: # Keep pinned to a verson as sometimes updates will add new lint # failures in unchanged code. - version: v1.50.1 + version: v1.52.2 working-directory: src skip-pkg-cache: true skip-build-cache: true @@ -435,82 +410,53 @@ jobs: working-directory: src + # ---------------------------------------------------------------------------------------------------- + # --- GitHub Actions Linting ------------------------------------------------------------------------- + # ---------------------------------------------------------------------------------------------------- + + Actions-Lint: + needs: [Precheck] + environment: Testing + runs-on: ubuntu-latest + if: needs.precheck.outputs.actionsfileschanged == 'true' + steps: + - uses: actions/checkout@v3 + + - name: actionlint + uses: raven-actions/actionlint@v1 + with: + fail-on-error: true + cache: true + # Ignore + # * combining commands into a subshell and using single output + # redirect + # * various variable quoting patterns + # * possible ineffective echo commands + flags: "-ignore SC2129 -ignore SC2086 -ignore SC2046 -ignore 2116" + # ---------------------------------------------------------------------------------------------------- # --- Publish steps ---------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------- Publish-Binary: - needs: [Test-Suite-Trusted, Unit-Test-Suite, Linting, Website-Linting, SetEnv] + needs: [Test-Suite-Trusted, Source-Code-Linting, Website-Linting, SetEnv] environment: ${{ needs.SetEnv.outputs.environment }} runs-on: ubuntu-latest if: startsWith(github.ref, 'refs/tags/') || github.ref == 'refs/heads/main' - defaults: - run: - working-directory: src steps: - uses: actions/checkout@v3 - with: - fetch-depth: 0 # needed to pull changelog - - name: Setup Golang with cache - uses: magnetikonline/action-golang-cache@v4 + - name: Publish Binary + uses: ./.github/actions/publish-binary with: - go-version-file: src/go.mod - - - name: Decide goreleaser release mode - shell: bash - run: | - if test '${{ github.ref }}' = "refs/heads/main"; then - echo "grflags=--snapshot" >> $GITHUB_ENV - else - echo "grflags=" >> $GITHUB_ENV - fi - - - name: Run GoReleaser - uses: goreleaser/goreleaser-action@v4 - with: - version: latest - args: release --rm-dist --timeout 500m --parallelism 1 ${{ env.grflags }} - workdir: src - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - RUDDERSTACK_CORSO_WRITE_KEY: ${{ secrets.RUDDERSTACK_CORSO_WRITE_KEY }} - RUDDERSTACK_CORSO_DATA_PLANE_URL: ${{ secrets.RUDDERSTACK_CORSO_DATA_PLANE_URL }} - CORSO_VERSION: ${{ needs.SetEnv.outputs.version }} - - - name: Upload darwin arm64 - uses: actions/upload-artifact@v3 - with: - name: corso_Darwin_arm64 - path: src/dist/corso_darwin_arm64/corso - - - name: Upload linux arm64 - uses: actions/upload-artifact@v3 - with: - name: corso_Linux_arm64 - path: src/dist/corso_linux_arm64/corso - - - name: Upload darwin amd64 - uses: actions/upload-artifact@v3 - with: - name: corso_Darwin_amd64 - path: src/dist/corso_darwin_amd64_v1/corso - - - name: Upload linux amd64 - uses: actions/upload-artifact@v3 - with: - name: corso_Linux_amd64 - path: src/dist/corso_linux_amd64_v1/corso - - - name: Upload windows amd64 - uses: actions/upload-artifact@v3 - with: - name: corso_Windows_amd64 - path: src/dist/corso_windows_amd64_v1/corso.exe + version: ${{ needs.SetEnv.outputs.version }} + github_token: ${{ secrets.GITHUB_TOKEN }} + rudderstack_write_key: ${{ secrets.RUDDERSTACK_CORSO_WRITE_KEY }} + rudderstack_data_plane_url: ${{ secrets.RUDDERSTACK_CORSO_DATA_PLANE_URL }} Publish-Image: - needs: [Test-Suite-Trusted, Unit-Test-Suite, Linting, Website-Linting, SetEnv] + needs: [Test-Suite-Trusted, Source-Code-Linting, Website-Linting, SetEnv] environment: ${{ needs.SetEnv.outputs.environment }} runs-on: ubuntu-latest if: startsWith(github.ref, 'refs/tags/') @@ -652,7 +598,7 @@ jobs: ./corso.exe --version 2>&1 | grep -E "version: ${{ env.CORSO_VERSION }}$" Publish-Website-Test: - needs: [Test-Suite-Trusted, Unit-Test-Suite, Linting, Website-Linting, SetEnv] + needs: [Test-Suite-Trusted, Source-Code-Linting, Website-Linting, SetEnv] environment: ${{ needs.SetEnv.outputs.environment }} runs-on: ubuntu-latest if: github.ref == 'refs/heads/main' diff --git a/.github/workflows/ci_test_cleanup.yml b/.github/workflows/ci_test_cleanup.yml index e3fd4edf9..1c4591e29 100644 --- a/.github/workflows/ci_test_cleanup.yml +++ b/.github/workflows/ci_test_cleanup.yml @@ -1,5 +1,6 @@ name: CI Test Cleanup on: + workflow_dispatch: schedule: # every half hour - cron: "*/30 * * * *" @@ -27,7 +28,7 @@ jobs: - name: Purge CI-Produced Folders for Users uses: ./.github/actions/purge-m365-data with: - user: ${{ secrets[matrix.user] }} + user: ${{ vars[matrix.user] }} folder-prefix: ${{ vars.CORSO_M365_TEST_PREFIXES }} older-than: ${{ env.HALF_HOUR_AGO }} azure-client-id: ${{ secrets.CLIENT_ID }} @@ -58,7 +59,7 @@ jobs: - name: Purge CI-Produced Folders for Sites uses: ./.github/actions/purge-m365-data with: - site: ${{ secrets[matrix.site] }} + site: ${{ vars[matrix.site] }} folder-prefix: ${{ vars.CORSO_M365_TEST_PREFIXES }} libraries: ${{ vars.CORSO_M365_TEST_SITE_LIBRARIES }} older-than: ${{ env.HALF_HOUR_AGO }} diff --git a/.github/workflows/load_test.yml b/.github/workflows/load_test.yml index 449fb9349..9241b3d8f 100644 --- a/.github/workflows/load_test.yml +++ b/.github/workflows/load_test.yml @@ -1,10 +1,8 @@ name: Nightly Load Testing on: schedule: - # every day at 01:59 (01:59am) UTC - # - cron: "59 1 * * *" - # temp, for testing: every 4 hours - - cron: "0 */4 * * *" + # every day at 03:59 GMT (roughly 8pm PST) + - cron: "59 3 * * *" permissions: # required to retrieve AWS credentials @@ -20,6 +18,10 @@ jobs: Load-Tests: environment: Load Testing runs-on: ubuntu-latest + # Skipping load testing for now. They need some love to get up and + # running properly, and it's better to not fight for resources with + # tests that are guaranteed to fail. + if: false defaults: run: working-directory: src @@ -57,7 +59,7 @@ jobs: CORSO_M365_LOAD_TEST_USER_ID: ${{ secrets.CORSO_M365_LOAD_TEST_USER_ID }} CORSO_M365_LOAD_TEST_ORG_USERS: ${{ secrets.CORSO_M365_LOAD_TEST_ORG_USERS }} CORSO_PASSPHRASE: ${{ secrets.CORSO_PASSPHRASE }} - IGNORE_LOAD_TEST_USER_ID: ${{ secrets.EXT_SDK_TEST_USER_ID }} + IGNORE_LOAD_TEST_USER_ID: ${{ vars.EXT_SDK_TEST_USER_ID }} LOG_GRAPH_REQUESTS: true run: | set -euo pipefail diff --git a/.github/workflows/nightly_test.yml b/.github/workflows/nightly_test.yml index a65d196af..ea313f571 100644 --- a/.github/workflows/nightly_test.yml +++ b/.github/workflows/nightly_test.yml @@ -3,12 +3,8 @@ on: workflow_dispatch: schedule: - # Run every day at 0 minutes and 0 hours (midnight GMT) - - cron: "0 0 * * *" - - push: - branches: [main] - tags: ["v*.*.*"] + # Run every day at 04:00 GMT (roughly 8pm PST) + - cron: "0 4 * * *" permissions: # required to retrieve AWS credentials @@ -45,38 +41,7 @@ jobs: # SetM365App will decide which M365 app to use for this CI run SetM365App: - environment: Testing - runs-on: ubuntu-latest - outputs: - client_app_slot: ${{ steps.roundrobin.outputs.CLIENT_APP_SLOT }} - client_id_env: ${{ steps.roundrobin.outputs.CLIENT_ID_ENV }} - client_secret_env: ${{ steps.roundrobin.outputs.CLIENT_SECRET_ENV }} - steps: - - name: Figure out which client id to use - id: roundrobin - run: | - slot=$((GITHUB_RUN_NUMBER % 4)) - echo "CLIENT_APP_SLOT=$slot" >> $GITHUB_OUTPUT - - case $slot in - - 0) - echo "CLIENT_ID_ENV=CLIENT_ID" >> $GITHUB_OUTPUT - echo "CLIENT_SECRET_ENV=CLIENT_SECRET" >> $GITHUB_OUTPUT - ;; - 1) - echo "CLIENT_ID_ENV=CLIENT_ID_2" >> $GITHUB_OUTPUT - echo "CLIENT_SECRET_ENV=CLIENT_SECRET_2" >> $GITHUB_OUTPUT - ;; - 2) - echo "CLIENT_ID_ENV=CLIENT_ID_3" >> $GITHUB_OUTPUT - echo "CLIENT_SECRET_ENV=CLIENT_SECRET_3" >> $GITHUB_OUTPUT - ;; - 3) - echo "CLIENT_ID_ENV=CLIENT_ID_4" >> $GITHUB_OUTPUT - echo "CLIENT_SECRET_ENV=CLIENT_SECRET_4" >> $GITHUB_OUTPUT - ;; - esac + uses: alcionai/corso/.github/workflows/accSelector.yaml@main SetEnv: environment: Testing @@ -85,7 +50,6 @@ jobs: environment: ${{ steps.environment.outputs.environment }} version: ${{ steps.version.outputs.version }} website-bucket: ${{ steps.website-bucket.outputs.website-bucket }} - website-cfid: ${{ steps.website-cfid.outputs.website-cfid }} steps: - uses: actions/checkout@v3 @@ -157,8 +121,9 @@ jobs: AZURE_CLIENT_SECRET: ${{ secrets[env.AZURE_CLIENT_SECRET_NAME] }} AZURE_TENANT_ID: ${{ secrets.TENANT_ID }} CORSO_NIGHTLY_TESTS: true - CORSO_M365_TEST_USER_ID: ${{ secrets.CORSO_M365_TEST_USER_ID }} - CORSO_SECONDARY_M365_TEST_USER_ID: ${{ secrets.CORSO_SECONDARY_M365_TEST_USER_ID }} + CORSO_E2E_TESTS: true + CORSO_M365_TEST_USER_ID: ${{ vars.CORSO_M365_TEST_USER_ID }} + CORSO_SECONDARY_M365_TEST_USER_ID: ${{ vars.CORSO_SECONDARY_M365_TEST_USER_ID }} CORSO_PASSPHRASE: ${{ secrets.INTEGRATION_TEST_CORSO_PASSPHRASE }} CORSO_LOG_FILE: ./src/testlog/testlogging.log LOG_GRAPH_REQUESTS: true @@ -175,10 +140,10 @@ jobs: # Upload the original go test output as an artifact for later review. - name: Upload test log - if: failure() + if: always() uses: actions/upload-artifact@v3 with: - name: test-log + name: nightly-test-log path: src/testlog/* if-no-files-found: error retention-days: 14 diff --git a/.github/workflows/sanity-test.yaml b/.github/workflows/sanity-test.yaml index 93f4b7211..c793986e7 100644 --- a/.github/workflows/sanity-test.yaml +++ b/.github/workflows/sanity-test.yaml @@ -1,5 +1,5 @@ name: Sanity Testing -on: +on: push: branches: - main @@ -19,25 +19,40 @@ concurrency: cancel-in-progress: true jobs: + SetM365App: + uses: alcionai/corso/.github/workflows/accSelector.yaml@main + Sanity-Tests: + needs: [ SetM365App ] environment: Testing runs-on: ubuntu-latest env: AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_ACCESS_KEY_SECRET }} - AZURE_CLIENT_ID: ${{ secrets.CLIENT_ID }} - AZURE_CLIENT_SECRET: ${{ secrets.CLIENT_SECRET }} + AZURE_CLIENT_ID: ${{ secrets[needs.SetM365App.outputs.client_id_env] }} + AZURE_CLIENT_SECRET: ${{ secrets[needs.SetM365App.outputs.client_secret_env] }} AZURE_TENANT_ID: ${{ secrets.TENANT_ID }} CORSO_BUCKET: ${{ secrets.CI_TESTS_S3_BUCKET }} - CORSO_LOG_FILE: ./src/testlog/testlogging.log - CORSO_M365_TEST_USER_ID: ${{ github.event.inputs.user != '' && github.event.inputs.user || secrets.CORSO_M365_TEST_USER_ID }} + CORSO_LOG_DIR: testlog + CORSO_LOG_FILE: testlog/testlogging.log CORSO_PASSPHRASE: ${{ secrets.INTEGRATION_TEST_CORSO_PASSPHRASE }} - TEST_RESULT: "test_results" + RESTORE_DEST_PFX: Corso_Test_Sanity_ + TEST_RESULT: test_results + TEST_USER: ${{ github.event.inputs.user != '' && github.event.inputs.user || secrets.CORSO_M365_TEST_USER_ID }} + TEST_SITE: ${{ secrets.CORSO_M365_TEST_SITE_URL }} + SECONDARY_TEST_USER : ${{ secrets.CORSO_SECONDARY_M365_TEST_USER_ID }} + # The default working directory doesn't seem to apply to things without + # the 'run' directive. https://stackoverflow.com/a/67845456 + WORKING_DIR: src defaults: run: - working-directory: src + working-directory: ${{ env.WORKING_DIR }} steps: +########################################################################################################################################## + +# setup + - uses: actions/checkout@v3 - name: Setup Golang with cache @@ -45,336 +60,281 @@ jobs: with: go-version-file: src/go.mod - - run: make build + - run: go build -o corso + - run: go build -o sanity-test ./cmd/sanity_test - - run: go build -o sanityCheck ./cmd/sanity_test + - run: mkdir ${TEST_RESULT} + - run: mkdir ${CORSO_LOG_DIR} - - run: mkdir test_results +########################################################################################################################################## - - run: mkdir testlog +# Pre-Run cleanup + + # unlike CI tests, sanity tests are not expected to run concurrently. + # however, the sanity yaml concurrency is set to a maximum of 1 run, preferring + # the latest release. If we wait to clean up the production til after the tests + # It would be possible to complete all the testing but cancel the run before + # cleanup occurs. Setting the cleanup before the tests ensures we always begin + # with a clean slate, and cannot compound data production. + - name: Set purge boundary + if: always() + run: | + echo "NOW=$(date +"%Y-%m-%dT%H:%M:%SZ")" >> $GITHUB_ENV + + - name: Purge CI-Produced Folders for Users + uses: ./.github/actions/purge-m365-data + with: + user: ${{ env.TEST_USER }} + folder-prefix: ${{ env.RESTORE_DEST_PFX }} + older-than: ${{ env.NOW }} + azure-client-id: ${{ env.AZURE_CLIENT_ID }} + azure-client-secret: ${{ env.AZURE_CLIENT_SECRET }} + azure-tenant-id: ${{ env.AZURE_TENANT_ID }} + m365-admin-user: ${{ secrets.M365_TENANT_ADMIN_USER }} + m365-admin-password: ${{ secrets.M365_TENANT_ADMIN_PASSWORD }} + + - name: Purge CI-Produced Folders for Sites + if: always() + uses: ./.github/actions/purge-m365-data + with: + site: ${{ env.TEST_SITE }} + folder-prefix: ${{ env.RESTORE_DEST_PFX }} + libraries: ${{ vars.CORSO_M365_TEST_SITE_LIBRARIES }} + older-than: ${{ env.NOW }} + azure-client-id: ${{ env.AZURE_CLIENT_ID }} + azure-client-secret: ${{ env.AZURE_CLIENT_SECRET }} + azure-tenant-id: ${{ env.AZURE_TENANT_ID }} + m365-admin-user: ${{ secrets.M365_TENANT_ADMIN_USER }} + m365-admin-password: ${{ secrets.M365_TENANT_ADMIN_PASSWORD }} + +########################################################################################################################################## + +# Repository commands - # run the tests - name: Version Test - run: | - set -euo pipefail - if [ $( ./corso --version | grep 'Corso version:' | wc -l) -ne 1 ] - then - echo "valid version not found" - exit 1 - fi + run: | + ./corso --version | grep -c 'Corso version:' - name: Repo init test id: repo-init env: TEST_RESULT: "test_results" run: | - set -euo pipefail - prefix=`date +"%Y-%m-%d-%T"` - + set -euo pipefail + prefix=$(date +"%Y-%m-%d-%T") + echo -e "\nRepo init test\n" >> ${CORSO_LOG_FILE} ./corso repo init s3 \ - --no-stats \ - --hide-progress \ - --prefix $prefix \ - --bucket ${CORSO_BUCKET} 2>&1 | tee $TEST_RESULT/initrepo.txt + --no-stats --hide-progress --prefix $prefix \ + --bucket ${CORSO_BUCKET} 2>&1 | tee $TEST_RESULT/initrepo.txt - if ! grep -q 'Initialized a S3 repository within bucket' $TEST_RESULT/initrepo.txt + if ! grep -q 'Initialized a S3 repository within bucket' $TEST_RESULT/initrepo.txt then - echo "repo could not be initiated" + echo "Repo could not be initialized" exit 1 fi - - echo result="$prefix" >> $GITHUB_OUTPUT - # run the tests + echo result="$prefix" >> $GITHUB_OUTPUT + - name: Repo connect test run: | set -euo pipefail + echo -e "\nRepo connect test\n" >> ${CORSO_LOG_FILE} ./corso repo connect s3 \ - --no-stats \ - --hide-progress \ - --prefix ${{ steps.repo-init.outputs.result }} \ - --bucket ${CORSO_BUCKET} 2>&1 | tee $TEST_RESULT/connect.txt + --no-stats --hide-progress --prefix ${{ steps.repo-init.outputs.result }} \ + --bucket ${CORSO_BUCKET} 2>&1 | tee $TEST_RESULT/connect.txt - if ! grep -q 'Connected to S3 bucket' $TEST_RESULT/connect.txt + if ! grep -q 'Connected to S3 bucket' $TEST_RESULT/connect.txt then - echo "repo could not be connected" + echo "Repo could not be connected" exit 1 fi +########################################################################################################################################## + +# Exchange + # generate new entries to roll into the next load test # only runs if the test was successful - - name: New Data Creation + - name: Exchange - Create new data working-directory: ./src/cmd/factory - env: - AZURE_CLIENT_ID: ${{ secrets.CLIENT_ID }} - AZURE_CLIENT_SECRET: ${{ secrets.CLIENT_SECRET }} - AZURE_TENANT_ID: ${{ secrets.TENANT_ID }} run: | go run . exchange emails \ - --user ${{ env.CORSO_M365_TEST_USER_ID }} \ - --tenant ${{ env.AZURE_TENANT_ID }} \ - --destination Corso_Restore_st_${{ steps.repo-init.outputs.result }} \ + --user ${TEST_USER} \ + --tenant ${AZURE_TENANT_ID} \ + --destination ${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }} \ --count 4 - # run the tests - - name: Backup exchange test - id: exchange-test + - name: Exchange - Backup + id: exchange-backup + uses: ./.github/actions/backup-restore-test + with: + service: exchange + kind: backup + backup-args: '--mailbox "${TEST_USER}" --data "email"' + restore-args: '--email-folder ${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }}' + test-folder: '${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }}' + + - name: Exchange - Incremental backup + id: exchange-backup-incremental + uses: ./.github/actions/backup-restore-test + with: + service: exchange + kind: backup-incremental + backup-args: '--mailbox "${TEST_USER}" --data "email"' + restore-args: '--email-folder ${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }}' + test-folder: '${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }}' + base-backup: ${{ steps.exchange-backup.outputs.backup-id }} + + - name: Exchange - Non delta backup + id: exchange-backup-non-delta + uses: ./.github/actions/backup-restore-test + with: + service: exchange + kind: backup-non-delta + backup-args: '--mailbox "${TEST_USER}" --data "email" --disable-delta' + restore-args: '--email-folder ${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }}' + test-folder: '${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }}' + base-backup: ${{ steps.exchange-backup.outputs.backup-id }} + + - name: Exchange - Incremental backup after non-delta + id: exchange-backup-incremental-after-non-delta + uses: ./.github/actions/backup-restore-test + with: + service: exchange + kind: backup-incremental-after-non-delta + backup-args: '--mailbox "${TEST_USER}" --data "email"' + restore-args: '--email-folder ${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }}' + test-folder: '${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }}' + base-backup: ${{ steps.exchange-backup.outputs.backup-id }} + + +########################################################################################################################################## + +# Onedrive + + # generate new entries for test + - name: OneDrive - Create new data + id: new-data-creation-onedrive + working-directory: ./src/cmd/factory run: | - ./corso backup create exchange \ - --no-stats \ - --mailbox "${CORSO_M365_TEST_USER_ID}" \ - --hide-progress \ - --data 'email' \ - --json \ - 2>&1 | tee $TEST_RESULT/backup_exchange.txt + suffix=$(date +"%Y-%m-%d_%H-%M-%S") - resultjson=$(sed -e '1,/Completed Backups/d' $TEST_RESULT/backup_exchange.txt ) + go run . onedrive files \ + --user ${TEST_USER} \ + --secondaryuser ${SECONDARY_TEST_USER} \ + --tenant ${AZURE_TENANT_ID} \ + --destination ${RESTORE_DEST_PFX}$suffix \ + --count 4 - if [[ $( echo $resultjson | jq -r '.[0] | .errorCount') -ne 0 ]]; then - echo "backup was not successful" - exit 1 - fi + echo result="${suffix}" >> $GITHUB_OUTPUT - data=$( echo $resultjson | jq -r '.[0] | .id' ) - echo result=$data >> $GITHUB_OUTPUT + - name: OneDrive - Backup + id: onedrive-backup + uses: ./.github/actions/backup-restore-test + with: + service: onedrive + kind: backup + backup-args: '--user "${TEST_USER}"' + restore-args: '--folder ${RESTORE_DEST_PFX}${{ steps.new-data-creation-onedrive.outputs.result }} --restore-permissions' + test-folder: '${RESTORE_DEST_PFX}${{ steps.new-data-creation-onedrive.outputs.result }}' - # list all exchange backups - - name: Backup exchange list test + # generate some more enteries for incremental check + - name: OneDrive - Create new data (for incremental) + working-directory: ./src/cmd/factory run: | - set -euo pipefail - ./corso backup list exchange \ - --no-stats \ - --hide-progress \ - 2>&1 | tee $TEST_RESULT/backup_exchange_list.txt + go run . onedrive files \ + --user ${TEST_USER} \ + --secondaryuser ${SECONDARY_TEST_USER} \ + --tenant ${AZURE_TENANT_ID} \ + --destination ${RESTORE_DEST_PFX}${{ steps.new-data-creation-onedrive.outputs.result }} \ + --count 4 - if ! grep -q ${{ steps.exchange-test.outputs.result }} $TEST_RESULT/backup_exchange_list.txt - then - echo "listing of backup was not successful" - exit 1 - fi + - name: OneDrive - Incremental backup + id: onedrive-incremental + uses: ./.github/actions/backup-restore-test + with: + service: onedrive + kind: incremental + backup-args: '--user "${TEST_USER}"' + restore-args: '--folder ${RESTORE_DEST_PFX}${{ steps.new-data-creation-onedrive.outputs.result }} --restore-permissions' + test-folder: '${RESTORE_DEST_PFX}${{ steps.new-data-creation-onedrive.outputs.result }}' - # list the previous exchange backups - - name: Backup exchange list single backup test +########################################################################################################################################## + +# Sharepoint + + # generate new entries for test + - name: SharePoint - Create new data + id: new-data-creation-sharepoint + working-directory: ./src/cmd/factory run: | - set -euo pipefail - ./corso backup list exchange \ - --no-stats \ - --hide-progress \ - --backup "${{ steps.exchange-test.outputs.result }}" \ - 2>&1 | tee $TEST_RESULT/backup_exchange_list_single.txt + suffix=$(date +"%Y-%m-%d_%H-%M-%S") - if ! grep -q ${{ steps.exchange-test.outputs.result }} $TEST_RESULT/backup_exchange_list.txt - then - echo "listing of backup was not successful" - exit 1 - fi + go run . sharepoint files \ + --site ${TEST_SITE} \ + --user ${TEST_USER} \ + --secondaryuser ${SECONDARY_TEST_USER} \ + --tenant ${AZURE_TENANT_ID} \ + --destination ${RESTORE_DEST_PFX}$suffix \ + --count 4 - # test exchange restore - - name: Backup exchange restore - id: exchange-restore-test + echo result="${suffix}" >> $GITHUB_OUTPUT + + - name: SharePoint - Backup + id: sharepoint-backup + uses: ./.github/actions/backup-restore-test + with: + service: sharepoint + kind: backup + backup-args: '--site "${TEST_SITE}"' + restore-args: '--folder ${RESTORE_DEST_PFX}${{ steps.new-data-creation-sharepoint.outputs.result }} --restore-permissions' + test-folder: '${RESTORE_DEST_PFX}${{ steps.new-data-creation-sharepoint.outputs.result }}' + + # generate some more enteries for incremental check + - name: SharePoint - Create new data (for incremental) + working-directory: ./src/cmd/factory run: | - set -euo pipefail - ./corso restore exchange \ - --no-stats \ - --email-folder Corso_Restore_st_${{ steps.repo-init.outputs.result }} \ - --hide-progress \ - --backup "${{ steps.exchange-test.outputs.result }}" \ - 2>&1 | tee $TEST_RESULT/exchange-restore-test.txt - echo result=$(grep -i -e 'Restoring to folder ' $TEST_RESULT/exchange-restore-test.txt | sed "s/Restoring to folder//" ) >> $GITHUB_OUTPUT - - - name: Restoration check - env: - SANITY_RESTORE_FOLDER: ${{ steps.exchange-restore-test.outputs.result }} - SANITY_RESTORE_SERVICE: "exchange" - TEST_DATA: Corso_Restore_st_${{ steps.repo-init.outputs.result }} - run: | - set -euo pipefail - ./sanityCheck + go run . sharepoint files \ + --site ${TEST_SITE} \ + --user ${TEST_USER} \ + --secondaryuser ${SECONDARY_TEST_USER} \ + --tenant ${AZURE_TENANT_ID} \ + --destination ${RESTORE_DEST_PFX}${{ steps.new-data-creation-sharepoint.outputs.result }} \ + --count 4 - # test incremental backup exchange - - name: Backup exchange incremental - id: exchange-incremental-test - run: | - set -euo pipefail - ./corso backup create exchange \ - --no-stats \ - --hide-progress \ - --mailbox "${CORSO_M365_TEST_USER_ID}" \ - --json \ - 2>&1 | tee $TEST_RESULT/backup_exchange_incremental.txt + - name: SharePoint - Incremental backup + id: sharepoint-incremental + uses: ./.github/actions/backup-restore-test + with: + service: sharepoint + kind: incremental + backup-args: '--site "${TEST_SITE}"' + restore-args: '--folder ${RESTORE_DEST_PFX}${{ steps.new-data-creation-sharepoint.outputs.result }} --restore-permissions' + test-folder: '${RESTORE_DEST_PFX}${{ steps.new-data-creation-sharepoint.outputs.result }}' - resultjson=$(sed -e '1,/Completed Backups/d' $TEST_RESULT/backup_exchange_incremental.txt ) +########################################################################################################################################## - if [[ $( echo $resultjson | jq -r '.[0] | .errorCount') -ne 0 ]]; then - echo "backup was not successful" - exit 1 - fi - - echo result=$( echo $resultjson | jq -r '.[0] | .id' ) >> $GITHUB_OUTPUT - - # test exchange restore - - name: Backup incremantal exchange restore - id: exchange-incremantal-restore-test - run: | - set -euo pipefail - ./corso restore exchange \ - --no-stats \ - --hide-progress \ - --backup "${{ steps.exchange-incremental-test.outputs.result }}" \ - --email-folder Corso_Restore_st_${{ steps.repo-init.outputs.result }} \ - 2>&1 | tee $TEST_RESULT/exchange-incremantal-restore-test.txt - echo result=$(grep -i -e 'Restoring to folder ' $TEST_RESULT/exchange-incremantal-restore-test.txt | sed "s/Restoring to folder//" ) >> $GITHUB_OUTPUT - - - name: Restoration check - env: - SANITY_RESTORE_FOLDER: ${{ steps.exchange-incremantal-restore-test.outputs.result }} - SANITY_RESTORE_SERVICE: "exchange" - TEST_DATA: Corso_Restore_st_${{ steps.repo-init.outputs.result }} - BASE_BACKUP: ${{ steps.exchange-restore-test.outputs.result }} - run: | - set -euo pipefail - ./sanityCheck - - - # Onedrive test - - # run the tests - - name: Backup onedrive test - id: onedrive-test - run: | - set -euo pipefail - ./corso backup create onedrive \ - --no-stats \ - --hide-progress \ - --user "${CORSO_M365_TEST_USER_ID}" \ - --json \ - 2>&1 | tee $TEST_RESULT/backup_onedrive.txt - - resultjson=$(sed -e '1,/Completed Backups/d' $TEST_RESULT/backup_onedrive.txt ) - - if [[ $( echo $resultjson | jq -r '.[0] | .errorCount') -ne 0 ]]; then - echo "backup was not successful" - exit 1 - fi - - data=$( echo $resultjson | jq -r '.[0] | .id' ) - echo result=$data >> $GITHUB_OUTPUT - - # list all onedrive backups - - name: Backup onedrive list test - run: | - set -euo pipefail - ./corso backup list onedrive \ - --no-stats \ - --hide-progress \ - 2>&1 | tee $TEST_RESULT/backup_onedrive_list.txt - - if ! grep -q ${{ steps.onedrive-test.outputs.result }} $TEST_RESULT/backup_onedrive_list.txt - then - echo "listing of backup was not successful" - exit 1 - fi - - # list the previous onedrive backup - - name: Backup onedrive list test - run: | - set -euo pipefail - ./corso backup list onedrive \ - --no-stats \ - --hide-progress \ - --backup "${{ steps.onedrive-test.outputs.result }}" \ - 2>&1 | tee $TEST_RESULT/backup_onedrive_list_single.txt - - if ! grep -q ${{ steps.onedrive-test.outputs.result }} $TEST_RESULT/backup_onedrive_list.txt - then - echo "listing of backup was not successful" - exit 1 - fi - - # test onedrive restore - - name: Backup onedrive restore - id: onedrive-restore-test - run: | - set -euo pipefail - ./corso restore onedrive \ - --no-stats \ - --restore-permissions \ - --hide-progress \ - --backup "${{ steps.onedrive-test.outputs.result }}" \ - 2>&1 | tee $TEST_RESULT/onedrive-restore-test.txt - echo result=$(grep -i -e 'Restoring to folder ' $TEST_RESULT/onedrive-restore-test.txt | sed "s/Restoring to folder//") >> $GITHUB_OUTPUT - - - name: Restoration oneDrive check - env: - SANITY_RESTORE_FOLDER: ${{ steps.onedrive-restore-test.outputs.result }} - SANITY_RESTORE_SERVICE: "onedrive" - run: | - set -euo pipefail - ./sanityCheck - - # test onedrive incremental - - name: Backup onedrive incremental - id: onedrive-incremental-test - run: | - set -euo pipefail - ./corso backup create onedrive \ - --no-stats \ - --hide-progress \ - --user "${CORSO_M365_TEST_USER_ID}" \ - --json \ - 2>&1 | tee $TEST_RESULT/backup_onedrive_incremental.txt - - resultjson=$(sed -e '1,/Completed Backups/d' $TEST_RESULT/backup_onedrive_incremental.txt ) - - if [[ $( echo $resultjson | jq -r '.[0] | .errorCount') -ne 0 ]]; then - echo "backup was not successful" - exit 1 - fi - - data=$( echo $resultjson | jq -r '.[0] | .id' ) - echo result=$data >> $GITHUB_OUTPUT - - # test onedrive restore - - name: Backup onedrive restore - id: onedrive-incremental-restore-test - run: | - set -euo pipefail - ./corso restore onedrive \ - --no-stats \ - --restore-permissions \ - --hide-progress \ - --backup "${{ steps.onedrive-incremental-test.outputs.result }}" \ - 2>&1 | tee $TEST_RESULT/onedrive-incremental-restore-test.txt - echo result=$(grep -i -e 'Restoring to folder ' $TEST_RESULT/onedrive-incremental-restore-test.txt | sed "s/Restoring to folder//") >> $GITHUB_OUTPUT - - - name: Restoration oneDrive check - env: - SANITY_RESTORE_FOLDER: ${{ steps.onedrive-incremental-restore-test.outputs.result }} - SANITY_RESTORE_SERVICE: "onedrive" - run: | - set -euo pipefail - ./sanityCheck +# Logging & Notifications # Upload the original go test output as an artifact for later review. - name: Upload test log - if: failure() + if: always() uses: actions/upload-artifact@v3 with: - name: test-log - path: src/testlog/* + name: sanity-test-log + path: ${{ env.WORKING_DIR }}/${{ env.CORSO_LOG_DIR }}/ if-no-files-found: error retention-days: 14 - # run the tests - name: SHA info id: sha-info if: failure() - run: | - echo SHA=${GITHUB_REF#refs/heads/}-${GITHUB_SHA} >> $GITHUB_OUTPUT - echo RUN_URL=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} >> $GITHUB_OUTPUT - echo COMMIT_URL=${{ github.server_url }}/${{ github.repository }}/commit/${GITHUB_SHA} >> $GITHUB_OUTPUT - - + run: | + echo ${GITHUB_REF#refs/heads/}-${GITHUB_SHA} + echo SHA=${GITHUB_REF#refs/heads/}-${GITHUB_SHA} >> $GITHUB_OUTPUT + echo RUN_URL=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} >> $GITHUB_OUTPUT + echo COMMIT_URL=${{ github.server_url }}/${{ github.repository }}/commit/${GITHUB_SHA} >> $GITHUB_OUTPUT + - name: Send Github Action failure to Slack id: slack-notification if: failure() @@ -384,21 +344,11 @@ jobs: { "text": "GitHub Action build result: ${{ job.status }} on SHA: ${{ steps.sha-info.outputs.SHA }}", "blocks": [ - { - "type": "header", - "text": { - "type": "plain_text", - "text": "Failure in Sanity Test" - } - }, - { - "type": "divider" - }, { "type": "section", "text": { "type": "mrkdwn", - "text": "<${{ steps.sha-info.outputs.RUN_URL }}|Check logs> for <${{ steps.sha-info.outputs.COMMIT_URL }}|${{ steps.sha-info.outputs.SHA }}>" + "text": "[FAILED] Sanity Checks :: <${{ steps.sha-info.outputs.RUN_URL }}|[Logs]> <${{ github.event.pull_request.html_url || github.event.head_commit.url }}|[Base]>\nCommit: <${{ steps.sha-info.outputs.COMMIT_URL }}|${{ steps.sha-info.outputs.SHA }}>" } } ] diff --git a/.github/workflows/website-publish.yml b/.github/workflows/website-publish.yml index b53ed320d..dda3909e9 100644 --- a/.github/workflows/website-publish.yml +++ b/.github/workflows/website-publish.yml @@ -28,8 +28,7 @@ jobs: - name: Get version string id: version run: | - echo "set-output name=version::$(git describe --tags --abbrev=0)" - echo "::set-output name=version::$(git describe --tags --abbrev=0)" + echo version=$(git describe --tags --abbrev=0) | tee -a $GITHUB_OUTPUT # ---------------------------------------------------------------------------------------------------- # --- Website Linting ----------------------------------------------------------------------------------- diff --git a/CHANGELOG.md b/CHANGELOG.md index 14637004d..475c98407 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,9 +7,43 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] (beta) +### Added +### Fixed +### Known Issues + +## [v0.8.0] (beta) - 2023-05-15 + +### Added +- Released the --mask-sensitive-data flag, which will automatically obscure private data in logs. +- Added `--disable-delta` flag to disable delta based backups for Exchange +- Permission support for SharePoint libraries. + +### Fixed +- Graph requests now automatically retry in case of a Bad Gateway or Gateway Timeout. +- POST Retries following certain status codes (500, 502, 504) will re-use the post body instead of retrying with a no-content request. +- Fix nil pointer exception when running an incremental backup on SharePoint where the base backup used an older index data format. +- --user and --mailbox flags have been removed from CLI examples for details and restore commands (they were already not supported, this only updates the docs). +- Improve restore time on large restores by optimizing how items are loaded from the remote repository. +- Remove exchange item filtering based on m365 item ID via the CLI. +- OneDrive backups no longer include a user's non-default drives. +- OneDrive and SharePoint file downloads will properly redirect from 3xx responses. +- Refined oneDrive rate limiter controls to reduce throttling errors. +- Fix handling of duplicate folders at the same hierarchy level in Exchange. Duplicate folders will be merged during restore operations. +- Fix backup for mailboxes that has used up all their storage quota +- Restored folders no longer appear in the Restore results. Only restored items will be displayed. + +### Known Issues +- Restore operations will merge duplicate Exchange folders at the same hierarchy level into a single folder. +- Sharepoint SiteGroup permissions are not restored. +- SharePoint document library data can't be restored after the library has been deleted. + +## [v0.7.0] (beta) - 2023-05-02 + ### Added - Permissions backup for OneDrive is now out of experimental (By default, only newly backed up items will have their permissions backed up. You will have to run a full backup to ensure all items have their permissions backed up.) - LocationRef is now populated for all services and data types. It should be used in place of RepoRef if a location for an item is required. +- User selection for Exchange and OneDrive can accept either a user PrincipalName or the user's canonical ID. +- Add path information to items that were skipped during backup because they were flagged as malware. ### Fixed - Fixed permissions restore in latest backup version. @@ -24,9 +58,17 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - ParentPath of json output for Exchange calendar now shows names instead of IDs. - Fixed failure when downloading huge amount of attachments - Graph API requests that return an ECONNRESET error are now retried. +- Fixed edge case in incremental backups where moving a subfolder, deleting and recreating the subfolder's original parent folder, and moving the subfolder back to where it started would skip backing up unchanged items in the subfolder. +- SharePoint now correctly displays site urls on `backup list`, instead of the site id. +- Drives with a directory containing a folder named 'folder' will now restore without error. +- The CORSO_LOG_FILE env is appropriately utilized if no --log-file flag is provided. +- Fixed Exchange events progress output to show calendar names instead of IDs. +- Fixed reporting no items match if restoring or listing details on an older Exchange backup and filtering by folder. +- Fix backup for mailboxes that has used up all their storage quota ### Known Issues - Restoring a OneDrive or SharePoint file with the same name as a file with that name as its M365 ID may restore both items. +- Exchange event restores will display calendar IDs instead of names in the progress output. ## [v0.6.1] (beta) - 2023-03-21 @@ -237,7 +279,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Miscellaneous - Optional usage statistics reporting ([RM-35](https://github.com/alcionai/corso-roadmap/issues/35)) -[Unreleased]: https://github.com/alcionai/corso/compare/v0.6.1...HEAD +[Unreleased]: https://github.com/alcionai/corso/compare/v0.7.0...HEAD +[v0.7.0]: https://github.com/alcionai/corso/compare/v0.6.1...v0.7.0 [v0.6.1]: https://github.com/alcionai/corso/compare/v0.5.0...v0.6.1 [v0.5.0]: https://github.com/alcionai/corso/compare/v0.4.0...v0.5.0 [v0.4.0]: https://github.com/alcionai/corso/compare/v0.3.0...v0.4.0 diff --git a/build/Dockerfile b/build/Dockerfile index fc0f17419..0c3b41247 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -6,7 +6,7 @@ COPY src . ARG CORSO_BUILD_LDFLAGS="" RUN go build -o corso -ldflags "$CORSO_BUILD_LDFLAGS" -FROM alpine:3.17 +FROM alpine:3 LABEL org.opencontainers.image.title="Corso" LABEL org.opencontainers.image.description="Free, Secure, and Open-Source Backup for Microsoft 365" diff --git a/docker/Dockerfile b/docker/Dockerfile index 48238f2ce..cae074692 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -20,7 +20,7 @@ ARG TARGETARCH RUN GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o /corso . ## Deploy -FROM ubuntu:latest +FROM ubuntu:22.10 COPY --from=build /corso / diff --git a/src/Makefile b/src/Makefile index fff36d78c..74119b3d9 100644 --- a/src/Makefile +++ b/src/Makefile @@ -1,5 +1,5 @@ # This must match the version defined in .github/workflows/lint.yaml. -WANTED_LINT_VERSION := 1.50.1 +WANTED_LINT_VERSION := 1.52.2 LINT_VERSION := $(shell golangci-lint version | cut -d' ' -f4) HAS_LINT := $(shell which golangci-lint) @@ -82,4 +82,4 @@ load-test: ./pkg/repository/loadtest/repository_load_test.go getM365: - go build -o getM365 cmd/getM365/main.go \ No newline at end of file + go build -o getM365 cmd/getM365/main.go diff --git a/src/cli/backup/backup.go b/src/cli/backup/backup.go index c3233e231..0a8b45dc3 100644 --- a/src/cli/backup/backup.go +++ b/src/cli/backup/backup.go @@ -9,13 +9,10 @@ import ( "github.com/pkg/errors" "github.com/spf13/cobra" - "github.com/alcionai/corso/src/cli/config" - "github.com/alcionai/corso/src/cli/options" . "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/internal/data" - "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" @@ -198,7 +195,7 @@ func runBackups( r repository.Repository, serviceName, resourceOwnerType string, selectorSet []selectors.Selector, - ins common.IDNameSwapper, + ins idname.Cacher, ) error { var ( bIDs []string @@ -210,7 +207,7 @@ func runBackups( var ( owner = discSel.DiscreteOwner - ictx = clues.Add(ctx, "resource_owner", owner) + ictx = clues.Add(ctx, "resource_owner_selected", owner) ) bo, err := r.NewBackupWithLookup(ictx, discSel, ins) @@ -221,6 +218,11 @@ func runBackups( continue } + ictx = clues.Add( + ctx, + "resource_owner_id", bo.ResourceOwner.ID(), + "resource_owner_name", bo.ResourceOwner.Name()) + err = bo.Run(ictx) if err != nil { errs = append(errs, clues.Wrap(err, owner).WithClues(ictx)) @@ -230,7 +232,13 @@ func runBackups( } bIDs = append(bIDs, string(bo.Results.BackupID)) - Infof(ctx, "Done - ID: %v\n", bo.Results.BackupID) + + if !DisplayJSONFormat() { + Infof(ctx, "Done\n") + printBackupStats(ctx, r, string(bo.Results.BackupID)) + } else { + Infof(ctx, "Done - ID: %v\n", bo.Results.BackupID) + } } bups, berrs := r.Backups(ctx, bIDs) @@ -264,7 +272,7 @@ func genericDeleteCommand(cmd *cobra.Command, bID, designation string, args []st ctx := clues.Add(cmd.Context(), "delete_backup_id", bID) - r, _, err := getAccountAndConnect(ctx) + r, _, err := utils.GetAccountAndConnect(ctx) if err != nil { return Only(ctx, err) } @@ -285,7 +293,7 @@ func genericDeleteCommand(cmd *cobra.Command, bID, designation string, args []st func genericListCommand(cmd *cobra.Command, bID string, service path.ServiceType, args []string) error { ctx := cmd.Context() - r, _, err := getAccountAndConnect(ctx) + r, _, err := utils.GetAccountAndConnect(ctx) if err != nil { return Only(ctx, err) } @@ -318,20 +326,16 @@ func genericListCommand(cmd *cobra.Command, bID string, service path.ServiceType return nil } -func getAccountAndConnect(ctx context.Context) (repository.Repository, *account.Account, error) { - cfg, err := config.GetConfigRepoDetails(ctx, true, nil) - if err != nil { - return nil, nil, err - } - - r, err := repository.Connect(ctx, cfg.Account, cfg.Storage, options.Control()) - if err != nil { - return nil, nil, clues.Wrap(err, "Failed to connect to the "+cfg.Storage.Provider.String()+" repository") - } - - return r, &cfg.Account, nil -} - func ifShow(flag string) bool { return strings.ToLower(strings.TrimSpace(flag)) == "show" } + +func printBackupStats(ctx context.Context, r repository.Repository, bid string) { + b, err := r.Backup(ctx, bid) + if err != nil { + logger.CtxErr(ctx, err).Error("finding backup immediately after backup operation completion") + } + + b.ToPrintable().Stats.Print(ctx) + Info(ctx, " ") +} diff --git a/src/cli/backup/exchange.go b/src/cli/backup/exchange.go index b0eac0bdf..ded194a05 100644 --- a/src/cli/backup/exchange.go +++ b/src/cli/backup/exchange.go @@ -17,7 +17,6 @@ import ( "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/selectors" - "github.com/alcionai/corso/src/pkg/services/m365" ) // ------------------------------------------------------------------------------------------------ @@ -50,20 +49,20 @@ corso backup create exchange --mailbox '*'` exchangeServiceCommandDeleteExamples = `# Delete Exchange backup with ID 1234abcd-12ab-cd34-56de-1234abcd corso backup delete exchange --backup 1234abcd-12ab-cd34-56de-1234abcd` - exchangeServiceCommandDetailsExamples = `# Explore Alice's items in backup 1234abcd-12ab-cd34-56de-1234abcd -corso backup details exchange --backup 1234abcd-12ab-cd34-56de-1234abcd --mailbox alice@example.com + exchangeServiceCommandDetailsExamples = `# Explore items in Alice's latest backup (1234abcd...) +corso backup details exchange --backup 1234abcd-12ab-cd34-56de-1234abcd -# Explore Alice's emails with subject containing "Hello world" in folder "Inbox" from a specific backup +# Explore emails in the folder "Inbox" with subject containing "Hello world" corso backup details exchange --backup 1234abcd-12ab-cd34-56de-1234abcd \ - --mailbox alice@example.com --email-subject "Hello world" --email-folder Inbox + --email-subject "Hello world" --email-folder Inbox -# Explore Bobs's events occurring after start of 2022 from a specific backup +# Explore calendar events occurring after start of 2022 corso backup details exchange --backup 1234abcd-12ab-cd34-56de-1234abcd \ - --mailbox bob@example.com --event-starts-after 2022-01-01T00:00:00 + --event-starts-after 2022-01-01T00:00:00 -# Explore Alice's contacts with name containing Andy from a specific backup +# Explore contacts named Andy corso backup details exchange --backup 1234abcd-12ab-cd34-56de-1234abcd \ - --mailbox alice@example.com --contact-name Andy` + --contact-name Andy` ) // called by backup.go to map subcommands to provider-specific handling. @@ -88,7 +87,9 @@ func addExchangeCommands(cmd *cobra.Command) *cobra.Command { options.AddFetchParallelismFlag(c) options.AddFailFastFlag(c) options.AddDisableIncrementalsFlag(c) + options.AddDisableDeltaFlag(c) options.AddEnableImmutableIDFlag(c) + options.AddDisableConcurrencyLimiterFlag(c) case listCommand: c, fs = utils.AddCommand(cmd, exchangeListCmd()) @@ -152,7 +153,7 @@ func createExchangeCmd(cmd *cobra.Command, args []string) error { return err } - r, acct, err := getAccountAndConnect(ctx) + r, acct, err := utils.GetAccountAndConnect(ctx) if err != nil { return Only(ctx, err) } @@ -161,10 +162,7 @@ func createExchangeCmd(cmd *cobra.Command, args []string) error { sel := exchangeBackupCreateSelectors(utils.UserFV, utils.CategoryDataFV) - // TODO: log/print recoverable errors - errs := fault.New(false) - - ins, err := m365.UsersMap(ctx, *acct, errs) + ins, err := utils.UsersMap(ctx, *acct, fault.New(true)) if err != nil { return Only(ctx, clues.Wrap(err, "Failed to retrieve M365 users")) } @@ -264,7 +262,7 @@ func detailsExchangeCmd(cmd *cobra.Command, args []string) error { ctx := cmd.Context() opts := utils.MakeExchangeOpts(cmd) - r, _, err := getAccountAndConnect(ctx) + r, _, err := utils.GetAccountAndConnect(ctx) if err != nil { return Only(ctx, err) } diff --git a/src/cli/backup/exchange_e2e_test.go b/src/cli/backup/exchange_e2e_test.go index ef37d00fb..e5c60df2b 100644 --- a/src/cli/backup/exchange_e2e_test.go +++ b/src/cli/backup/exchange_e2e_test.go @@ -18,7 +18,7 @@ import ( "github.com/alcionai/corso/src/cli/config" "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/internal/connector/exchange" "github.com/alcionai/corso/src/internal/operations" "github.com/alcionai/corso/src/internal/tester" @@ -54,7 +54,6 @@ func TestNoBackupExchangeE2ESuite(t *testing.T) { suite.Run(t, &NoBackupExchangeE2ESuite{Suite: tester.NewE2ESuite( t, [][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs}, - tester.CorsoCITests, )}) } @@ -120,7 +119,6 @@ func TestBackupExchangeE2ESuite(t *testing.T) { suite.Run(t, &BackupExchangeE2ESuite{Suite: tester.NewE2ESuite( t, [][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs}, - tester.CorsoCITests, )}) } @@ -235,7 +233,6 @@ func TestPreparedBackupExchangeE2ESuite(t *testing.T) { suite.Run(t, &PreparedBackupExchangeE2ESuite{Suite: tester.NewE2ESuite( t, [][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs}, - tester.CorsoCITests, )}) } @@ -256,13 +253,8 @@ func (suite *PreparedBackupExchangeE2ESuite) SetupSuite() { suite.backupOps = make(map[path.CategoryType]string) var ( - users = []string{suite.m365UserID} - idToName = map[string]string{suite.m365UserID: suite.m365UserID} - nameToID = map[string]string{suite.m365UserID: suite.m365UserID} - ins = common.IDsNames{ - IDToName: idToName, - NameToID: nameToID, - } + users = []string{suite.m365UserID} + ins = idname.NewCache(map[string]string{suite.m365UserID: suite.m365UserID}) ) for _, set := range []path.CategoryType{email, contacts, events} { @@ -495,7 +487,6 @@ func TestBackupDeleteExchangeE2ESuite(t *testing.T) { Suite: tester.NewE2ESuite( t, [][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs}, - tester.CorsoCITests, ), }) } diff --git a/src/cli/backup/exchange_test.go b/src/cli/backup/exchange_test.go index f4b864cd6..d8d4f9e68 100644 --- a/src/cli/backup/exchange_test.go +++ b/src/cli/backup/exchange_test.go @@ -43,6 +43,7 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() { utils.UserFN, utils.CategoryDataFN, options.DisableIncrementalsFN, + options.DisableDeltaFN, options.FailFastFN, options.FetchParallelismFN, options.SkipReduceFN, diff --git a/src/cli/backup/help_e2e_test.go b/src/cli/backup/help_e2e_test.go index 9fec46934..1a5356652 100644 --- a/src/cli/backup/help_e2e_test.go +++ b/src/cli/backup/help_e2e_test.go @@ -46,7 +46,7 @@ func prepM365Test( vpr, cfgFP := tester.MakeTempTestConfigClone(t, force) ctx = config.SetViper(ctx, vpr) - repo, err := repository.Initialize(ctx, acct, st, control.Options{}) + repo, err := repository.Initialize(ctx, acct, st, control.Defaults()) require.NoError(t, err, clues.ToCore(err)) return acct, st, repo, vpr, recorder, cfgFP diff --git a/src/cli/backup/onedrive.go b/src/cli/backup/onedrive.go index 006ae087b..dca460de0 100644 --- a/src/cli/backup/onedrive.go +++ b/src/cli/backup/onedrive.go @@ -17,7 +17,6 @@ import ( "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/selectors" - "github.com/alcionai/corso/src/pkg/services/m365" ) // ------------------------------------------------------------------------------------------------ @@ -44,16 +43,16 @@ corso backup create onedrive --user '*'` oneDriveServiceCommandDeleteExamples = `# Delete OneDrive backup with ID 1234abcd-12ab-cd34-56de-1234abcd corso backup delete onedrive --backup 1234abcd-12ab-cd34-56de-1234abcd` - oneDriveServiceCommandDetailsExamples = `# Explore Alice's files from backup 1234abcd-12ab-cd34-56de-1234abcd -corso backup details onedrive --backup 1234abcd-12ab-cd34-56de-1234abcd --user alice@example.com + oneDriveServiceCommandDetailsExamples = `# Explore items in Bob's latest backup (1234abcd...) +corso backup details onedrive --backup 1234abcd-12ab-cd34-56de-1234abcd -# Explore Alice or Bob's files with name containing "Fiscal 22" in folder "Reports" +# Explore files in the folder "Reports" named "Fiscal 22" corso backup details onedrive --backup 1234abcd-12ab-cd34-56de-1234abcd \ - --user alice@example.com,bob@example.com --file-name "Fiscal 22" --folder "Reports" + --file-name "Fiscal 22" --folder "Reports" -# Explore Alice's files created before end of 2015 from a specific backup +# Explore files created before the end of 2015 corso backup details onedrive --backup 1234abcd-12ab-cd34-56de-1234abcd \ - --user alice@example.com --file-created-before 2015-01-01T00:00:00` + --file-created-before 2015-01-01T00:00:00` ) // called by backup.go to map subcommands to provider-specific handling. @@ -135,7 +134,7 @@ func createOneDriveCmd(cmd *cobra.Command, args []string) error { return err } - r, acct, err := getAccountAndConnect(ctx) + r, acct, err := utils.GetAccountAndConnect(ctx) if err != nil { return Only(ctx, err) } @@ -144,10 +143,7 @@ func createOneDriveCmd(cmd *cobra.Command, args []string) error { sel := oneDriveBackupCreateSelectors(utils.UserFV) - // TODO: log/print recoverable errors - errs := fault.New(false) - - ins, err := m365.UsersMap(ctx, *acct, errs) + ins, err := utils.UsersMap(ctx, *acct, fault.New(true)) if err != nil { return Only(ctx, clues.Wrap(err, "Failed to retrieve M365 users")) } @@ -224,7 +220,7 @@ func detailsOneDriveCmd(cmd *cobra.Command, args []string) error { ctx := cmd.Context() opts := utils.MakeOneDriveOpts(cmd) - r, _, err := getAccountAndConnect(ctx) + r, _, err := utils.GetAccountAndConnect(ctx) if err != nil { return Only(ctx, err) } diff --git a/src/cli/backup/onedrive_e2e_test.go b/src/cli/backup/onedrive_e2e_test.go index e3d20f5ff..73cedd2ca 100644 --- a/src/cli/backup/onedrive_e2e_test.go +++ b/src/cli/backup/onedrive_e2e_test.go @@ -16,12 +16,13 @@ import ( "github.com/alcionai/corso/src/cli/config" "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/internal/operations" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/selectors" + selTD "github.com/alcionai/corso/src/pkg/selectors/testdata" "github.com/alcionai/corso/src/pkg/storage" ) @@ -44,9 +45,7 @@ func TestNoBackupOneDriveE2ESuite(t *testing.T) { suite.Run(t, &NoBackupOneDriveE2ESuite{ Suite: tester.NewE2ESuite( t, - [][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs}, - tester.CorsoCITests, - ), + [][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs}), }) } @@ -148,9 +147,7 @@ func TestBackupDeleteOneDriveE2ESuite(t *testing.T) { suite.Run(t, &BackupDeleteOneDriveE2ESuite{ Suite: tester.NewE2ESuite( t, - [][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs}, - tester.CorsoCITests, - ), + [][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs}), }) } @@ -171,17 +168,12 @@ func (suite *BackupDeleteOneDriveE2ESuite) SetupSuite() { var ( m365UserID = tester.M365UserID(t) users = []string{m365UserID} - idToName = map[string]string{m365UserID: m365UserID} - nameToID = map[string]string{m365UserID: m365UserID} - ins = common.IDsNames{ - IDToName: idToName, - NameToID: nameToID, - } + ins = idname.NewCache(map[string]string{m365UserID: m365UserID}) ) // some tests require an existing backup sel := selectors.NewOneDriveBackup(users) - sel.Include(sel.Folders(selectors.Any())) + sel.Include(selTD.OneDriveBackupFolderScope(sel)) backupOp, err := suite.repo.NewBackupWithLookup(ctx, sel.Selector, ins) require.NoError(t, err, clues.ToCore(err)) diff --git a/src/cli/backup/sharepoint.go b/src/cli/backup/sharepoint.go index bf3ff3c71..7f48d4c33 100644 --- a/src/cli/backup/sharepoint.go +++ b/src/cli/backup/sharepoint.go @@ -12,7 +12,7 @@ import ( "github.com/alcionai/corso/src/cli/options" . "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/fault" @@ -40,10 +40,10 @@ const ( ) const ( - sharePointServiceCommandCreateExamples = `# Backup SharePoint data for a Site -corso backup create sharepoint --site + sharePointServiceCommandCreateExamples = `# Backup SharePoint data in the HR Site +corso backup create sharepoint --site https://example.com/hr -# Backup SharePoint for two sites: HR and Team +# Backup SharePoint for the HR and Team sites corso backup create sharepoint --site https://example.com/hr,https://example.com/team # Backup all SharePoint data for all Sites @@ -52,16 +52,20 @@ corso backup create sharepoint --site '*'` sharePointServiceCommandDeleteExamples = `# Delete SharePoint backup with ID 1234abcd-12ab-cd34-56de-1234abcd corso backup delete sharepoint --backup 1234abcd-12ab-cd34-56de-1234abcd` - sharePointServiceCommandDetailsExamples = `# Explore a site's files from backup 1234abcd-12ab-cd34-56de-1234abcd + sharePointServiceCommandDetailsExamples = `# Explore items in the HR site's latest backup (1234abcd...) corso backup details sharepoint --backup 1234abcd-12ab-cd34-56de-1234abcd -# Find all files that were created before a certain date. +# Explore files in the folder "Reports" named "Fiscal 22" +corso backup details sharepoint --backup 1234abcd-12ab-cd34-56de-1234abcd \ + --file-name "Fiscal 22" --folder "Reports" + +# Explore files in the folder ""Display Templates/Style Sheets"" created before the end of 2015. corso backup details sharepoint --backup 1234abcd-12ab-cd34-56de-1234abcd \ --file-created-before 2015-01-01T00:00:00 --folder "Display Templates/Style Sheets" -# Find all files within a specific library. +# Explore all files within the document library "Work Documents" corso backup details sharepoint --backup 1234abcd-12ab-cd34-56de-1234abcd \ - --library documents --folder "Display Templates/Style Sheets" + --library "Work Documents" ` ) @@ -146,7 +150,7 @@ func createSharePointCmd(cmd *cobra.Command, args []string) error { return err } - r, acct, err := getAccountAndConnect(ctx) + r, acct, err := utils.GetAccountAndConnect(ctx) if err != nil { return Only(ctx, err) } @@ -203,7 +207,7 @@ func validateSharePointBackupCreateFlags(sites, weburls, cats []string) error { // TODO: users might specify a data type, this only supports AllData(). func sharePointBackupCreateSelectors( ctx context.Context, - ins common.IDNameSwapper, + ins idname.Cacher, sites, weburls, cats []string, ) (*selectors.SharePointBackup, error) { if len(sites) == 0 && len(weburls) == 0 { @@ -223,7 +227,7 @@ func sharePointBackupCreateSelectors( return addCategories(sel, cats), nil } -func includeAllSitesWithCategories(ins common.IDNameSwapper, categories []string) *selectors.SharePointBackup { +func includeAllSitesWithCategories(ins idname.Cacher, categories []string) *selectors.SharePointBackup { return addCategories(selectors.NewSharePointBackup(ins.IDs()), categories) } @@ -308,7 +312,7 @@ func detailsSharePointCmd(cmd *cobra.Command, args []string) error { ctx := cmd.Context() opts := utils.MakeSharePointOpts(cmd) - r, _, err := getAccountAndConnect(ctx) + r, _, err := utils.GetAccountAndConnect(ctx) if err != nil { return Only(ctx, err) } diff --git a/src/cli/backup/sharepoint_e2e_test.go b/src/cli/backup/sharepoint_e2e_test.go index 2ece84b2d..09d65d90e 100644 --- a/src/cli/backup/sharepoint_e2e_test.go +++ b/src/cli/backup/sharepoint_e2e_test.go @@ -16,7 +16,7 @@ import ( "github.com/alcionai/corso/src/cli/config" "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/internal/operations" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/account" @@ -45,7 +45,6 @@ func TestNoBackupSharePointE2ESuite(t *testing.T) { suite.Run(t, &NoBackupSharePointE2ESuite{Suite: tester.NewE2ESuite( t, [][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs}, - tester.CorsoCITests, )}) } @@ -112,9 +111,7 @@ func TestBackupDeleteSharePointE2ESuite(t *testing.T) { suite.Run(t, &BackupDeleteSharePointE2ESuite{ Suite: tester.NewE2ESuite( t, - [][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs}, - tester.CorsoCITests, - ), + [][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs}), }) } @@ -135,12 +132,7 @@ func (suite *BackupDeleteSharePointE2ESuite) SetupSuite() { var ( m365SiteID = tester.M365SiteID(t) sites = []string{m365SiteID} - idToName = map[string]string{m365SiteID: m365SiteID} - nameToID = map[string]string{m365SiteID: m365SiteID} - ins = common.IDsNames{ - IDToName: idToName, - NameToID: nameToID, - } + ins = idname.NewCache(map[string]string{m365SiteID: m365SiteID}) ) // some tests require an existing backup diff --git a/src/cli/backup/sharepoint_test.go b/src/cli/backup/sharepoint_test.go index f040102ac..70b132897 100644 --- a/src/cli/backup/sharepoint_test.go +++ b/src/cli/backup/sharepoint_test.go @@ -12,7 +12,7 @@ import ( "github.com/alcionai/corso/src/cli/options" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/cli/utils/testdata" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/selectors" ) @@ -156,10 +156,7 @@ func (suite *SharePointUnitSuite) TestSharePointBackupCreateSelectors() { ) var ( - ins = common.IDsNames{ - IDToName: map[string]string{id1: url1, id2: url2}, - NameToID: map[string]string{url1: id1, url2: id2}, - } + ins = idname.NewCache(map[string]string{id1: url1, id2: url2}) bothIDs = []string{id1, id2} ) diff --git a/src/cli/config/account.go b/src/cli/config/account.go index 3bcd9fcd2..310ac97c3 100644 --- a/src/cli/config/account.go +++ b/src/cli/config/account.go @@ -6,7 +6,6 @@ import ( "github.com/alcionai/clues" "github.com/spf13/viper" - "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/common" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/credentials" @@ -72,7 +71,7 @@ func configureAccount( } // ensure required properties are present - if err := utils.RequireProps(map[string]string{ + if err := requireProps(map[string]string{ credentials.AzureClientID: m365Cfg.AzureClientID, credentials.AzureClientSecret: m365Cfg.AzureClientSecret, account.AzureTenantID: m365Cfg.AzureTenantID, diff --git a/src/cli/config/config.go b/src/cli/config/config.go index e48f6f5ce..5cdf7863c 100644 --- a/src/cli/config/config.go +++ b/src/cli/config/config.go @@ -321,3 +321,15 @@ func mustMatchConfig(vpr *viper.Viper, m map[string]string) error { return nil } + +// requireProps validates the existence of the properties +// in the map. Expects the format map[propName]propVal. +func requireProps(props map[string]string) error { + for name, val := range props { + if len(val) == 0 { + return clues.New(name + " is required to perform this command") + } + } + + return nil +} diff --git a/src/cli/config/config_test.go b/src/cli/config/config_test.go index d9b2be563..1226902bb 100644 --- a/src/cli/config/config_test.go +++ b/src/cli/config/config_test.go @@ -39,6 +39,27 @@ func TestConfigSuite(t *testing.T) { suite.Run(t, &ConfigSuite{Suite: tester.NewUnitSuite(t)}) } +func (suite *ConfigSuite) TestRequireProps() { + table := []struct { + name string + props map[string]string + errCheck assert.ErrorAssertionFunc + }{ + { + props: map[string]string{"exists": "I have seen the fnords!"}, + errCheck: assert.NoError, + }, + { + props: map[string]string{"not-exists": ""}, + errCheck: assert.Error, + }, + } + for _, test := range table { + err := requireProps(test.props) + test.errCheck(suite.T(), err, clues.ToCore(err)) + } +} + func (suite *ConfigSuite) TestReadRepoConfigBasic() { var ( t = suite.T() diff --git a/src/cli/config/storage.go b/src/cli/config/storage.go index a10c3315e..9aba1e5d9 100644 --- a/src/cli/config/storage.go +++ b/src/cli/config/storage.go @@ -9,7 +9,6 @@ import ( "github.com/aws/aws-sdk-go/aws/defaults" "github.com/spf13/viper" - "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/common" "github.com/alcionai/corso/src/pkg/credentials" "github.com/alcionai/corso/src/pkg/storage" @@ -112,7 +111,7 @@ func configureStorage( } // ensure required properties are present - if err := utils.RequireProps(map[string]string{ + if err := requireProps(map[string]string{ storage.Bucket: s3Cfg.Bucket, credentials.CorsoPassphrase: corso.CorsoPassphrase, }); err != nil { diff --git a/src/cli/options/options.go b/src/cli/options/options.go index 626ad2115..ac76b41b8 100644 --- a/src/cli/options/options.go +++ b/src/cli/options/options.go @@ -18,8 +18,10 @@ func Control() control.Options { opt.RestorePermissions = restorePermissionsFV opt.SkipReduce = skipReduceFV opt.ToggleFeatures.DisableIncrementals = disableIncrementalsFV + opt.ToggleFeatures.DisableDelta = disableDeltaFV opt.ToggleFeatures.ExchangeImmutableIDs = enableImmutableID - opt.ItemFetchParallelism = fetchParallelismFV + opt.ToggleFeatures.DisableConcurrencyLimiter = disableConcurrencyLimiterFV + opt.Parallelism.ItemFetch = fetchParallelismFV return opt } @@ -29,13 +31,15 @@ func Control() control.Options { // --------------------------------------------------------------------------- const ( - FailFastFN = "fail-fast" - FetchParallelismFN = "fetch-parallelism" - NoStatsFN = "no-stats" - RestorePermissionsFN = "restore-permissions" - SkipReduceFN = "skip-reduce" - DisableIncrementalsFN = "disable-incrementals" - EnableImmutableIDFN = "enable-immutable-id" + FailFastFN = "fail-fast" + FetchParallelismFN = "fetch-parallelism" + NoStatsFN = "no-stats" + RestorePermissionsFN = "restore-permissions" + SkipReduceFN = "skip-reduce" + DisableDeltaFN = "disable-delta" + DisableIncrementalsFN = "disable-incrementals" + EnableImmutableIDFN = "enable-immutable-id" + DisableConcurrencyLimiterFN = "disable-concurrency-limiter" ) var ( @@ -90,7 +94,10 @@ func AddFetchParallelismFlag(cmd *cobra.Command) { // Feature Flags // --------------------------------------------------------------------------- -var disableIncrementalsFV bool +var ( + disableIncrementalsFV bool + disableDeltaFV bool +) // Adds the hidden '--disable-incrementals' cli flag which, when set, disables // incremental backups. @@ -104,6 +111,18 @@ func AddDisableIncrementalsFlag(cmd *cobra.Command) { cobra.CheckErr(fs.MarkHidden(DisableIncrementalsFN)) } +// Adds the hidden '--disable-delta' cli flag which, when set, disables +// delta based backups. +func AddDisableDeltaFlag(cmd *cobra.Command) { + fs := cmd.Flags() + fs.BoolVar( + &disableDeltaFV, + DisableDeltaFN, + false, + "Disable delta based data retrieval in backups.") + cobra.CheckErr(fs.MarkHidden(DisableDeltaFN)) +} + var enableImmutableID bool // Adds the hidden '--enable-immutable-id' cli flag which, when set, enables @@ -117,3 +136,18 @@ func AddEnableImmutableIDFlag(cmd *cobra.Command) { "Enable exchange immutable ID.") cobra.CheckErr(fs.MarkHidden(EnableImmutableIDFN)) } + +var disableConcurrencyLimiterFV bool + +// AddDisableConcurrencyLimiterFlag adds a hidden cli flag which, when set, +// removes concurrency limits when communicating with graph API. This +// flag is only relevant for exchange backups for now +func AddDisableConcurrencyLimiterFlag(cmd *cobra.Command) { + fs := cmd.Flags() + fs.BoolVar( + &disableConcurrencyLimiterFV, + DisableConcurrencyLimiterFN, + false, + "Disable concurrency limiter middleware. Default: false") + cobra.CheckErr(fs.MarkHidden(DisableConcurrencyLimiterFN)) +} diff --git a/src/cli/options/options_test.go b/src/cli/options/options_test.go index ae229396e..8538e3441 100644 --- a/src/cli/options/options_test.go +++ b/src/cli/options/options_test.go @@ -28,10 +28,12 @@ func (suite *OptionsUnitSuite) TestAddExchangeCommands() { Run: func(cmd *cobra.Command, args []string) { assert.True(t, failFastFV, FailFastFN) assert.True(t, disableIncrementalsFV, DisableIncrementalsFN) + assert.True(t, disableDeltaFV, DisableDeltaFN) assert.True(t, noStatsFV, NoStatsFN) assert.True(t, restorePermissionsFV, RestorePermissionsFN) assert.True(t, skipReduceFV, SkipReduceFN) assert.Equal(t, 2, fetchParallelismFV, FetchParallelismFN) + assert.True(t, disableConcurrencyLimiterFV, DisableConcurrencyLimiterFN) }, } @@ -40,21 +42,23 @@ func (suite *OptionsUnitSuite) TestAddExchangeCommands() { AddFailFastFlag(cmd) AddDisableIncrementalsFlag(cmd) + AddDisableDeltaFlag(cmd) AddRestorePermissionsFlag(cmd) AddSkipReduceFlag(cmd) - AddFetchParallelismFlag(cmd) + AddDisableConcurrencyLimiterFlag(cmd) // Test arg parsing for few args cmd.SetArgs([]string{ "test", "--" + FailFastFN, "--" + DisableIncrementalsFN, + "--" + DisableDeltaFN, "--" + NoStatsFN, "--" + RestorePermissionsFN, "--" + SkipReduceFN, - "--" + FetchParallelismFN, "2", + "--" + DisableConcurrencyLimiterFN, }) err := cmd.Execute() diff --git a/src/cli/print/print.go b/src/cli/print/print.go index 5ab61acca..91ef1e581 100644 --- a/src/cli/print/print.go +++ b/src/cli/print/print.go @@ -50,8 +50,8 @@ func AddOutputFlag(cmd *cobra.Command) { cobra.CheckErr(fs.MarkHidden("json-debug")) } -// JSONFormat returns true if the printer plans to output as json. -func JSONFormat() bool { +// DisplayJSONFormat returns true if the printer plans to output as json. +func DisplayJSONFormat() bool { return outputAsJSON || outputAsJSONDebug } diff --git a/src/cli/repo/repo.go b/src/cli/repo/repo.go index d60f9265a..5f768cb8b 100644 --- a/src/cli/repo/repo.go +++ b/src/cli/repo/repo.go @@ -1,12 +1,21 @@ package repo import ( + "strings" + + "github.com/alcionai/clues" "github.com/spf13/cobra" + "golang.org/x/exp/maps" + + "github.com/alcionai/corso/src/cli/print" + "github.com/alcionai/corso/src/cli/utils" + "github.com/alcionai/corso/src/pkg/control/repository" ) const ( - initCommand = "init" - connectCommand = "connect" + initCommand = "init" + connectCommand = "connect" + maintenanceCommand = "maintenance" ) var repoCommands = []func(cmd *cobra.Command) *cobra.Command{ @@ -18,15 +27,24 @@ func AddCommands(cmd *cobra.Command) { var ( // Get new instances so that setting the context during tests works // properly. - repoCmd = repoCmd() - initCmd = initCmd() - connectCmd = connectCmd() + repoCmd = repoCmd() + initCmd = initCmd() + connectCmd = connectCmd() + maintenanceCmd = maintenanceCmd() ) cmd.AddCommand(repoCmd) repoCmd.AddCommand(initCmd) repoCmd.AddCommand(connectCmd) + utils.AddCommand( + repoCmd, + maintenanceCmd, + utils.HideCommand(), + utils.MarkPreReleaseCommand()) + utils.AddMaintenanceModeFlag(maintenanceCmd) + utils.AddForceMaintenanceFlag(maintenanceCmd) + for _, addRepoTo := range repoCommands { addRepoTo(initCmd) addRepoTo(connectCmd) @@ -84,3 +102,65 @@ func connectCmd() *cobra.Command { func handleConnectCmd(cmd *cobra.Command, args []string) error { return cmd.Help() } + +func maintenanceCmd() *cobra.Command { + return &cobra.Command{ + Use: maintenanceCommand, + Short: "Run maintenance on an existing repository", + Long: `Run maintenance on an existing repository to optimize performance and storage use`, + RunE: handleMaintenanceCmd, + Args: cobra.NoArgs, + } +} + +func handleMaintenanceCmd(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + + t, err := getMaintenanceType(utils.MaintenanceModeFV) + if err != nil { + return err + } + + r, _, err := utils.GetAccountAndConnect(ctx) + if err != nil { + return print.Only(ctx, err) + } + + defer utils.CloseRepo(ctx, r) + + m, err := r.NewMaintenance( + ctx, + repository.Maintenance{ + Type: t, + Safety: repository.FullMaintenanceSafety, + Force: utils.ForceMaintenanceFV, + }) + if err != nil { + return print.Only(ctx, err) + } + + err = m.Run(ctx) + if err != nil { + return print.Only(ctx, err) + } + + return nil +} + +func getMaintenanceType(t string) (repository.MaintenanceType, error) { + res, ok := repository.StringToMaintenanceType[t] + if !ok { + modes := maps.Keys(repository.StringToMaintenanceType) + allButLast := []string{} + + for i := 0; i < len(modes)-1; i++ { + allButLast = append(allButLast, string(modes[i])) + } + + valuesStr := strings.Join(allButLast, ", ") + " or " + string(modes[len(modes)-1]) + + return res, clues.New(t + " is an unrecognized maintenance mode; must be one of " + valuesStr) + } + + return res, nil +} diff --git a/src/cli/repo/repo_test.go b/src/cli/repo/repo_test.go new file mode 100644 index 000000000..97497b719 --- /dev/null +++ b/src/cli/repo/repo_test.go @@ -0,0 +1,41 @@ +package repo + +import ( + "testing" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/tester" +) + +type RepoUnitSuite struct { + tester.Suite +} + +func TestRepoUnitSuite(t *testing.T) { + suite.Run(t, &RepoUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *RepoUnitSuite) TestAddRepoCommands() { + t := suite.T() + cmd := &cobra.Command{} + + AddCommands(cmd) + + var found bool + + // This is the repo command. + repoCmds := cmd.Commands() + require.Len(t, repoCmds, 1) + + for _, c := range repoCmds[0].Commands() { + if c.Use == maintenanceCommand { + found = true + } + } + + assert.True(t, found, "looking for maintenance command") +} diff --git a/src/cli/repo/s3_e2e_test.go b/src/cli/repo/s3_e2e_test.go index 4266be8f0..388b687e2 100644 --- a/src/cli/repo/s3_e2e_test.go +++ b/src/cli/repo/s3_e2e_test.go @@ -25,7 +25,6 @@ func TestS3E2ESuite(t *testing.T) { suite.Run(t, &S3E2ESuite{Suite: tester.NewE2ESuite( t, [][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs}, - tester.CorsoCITests, )}) } @@ -194,7 +193,7 @@ func (suite *S3E2ESuite) TestConnectS3Cmd() { ctx = config.SetViper(ctx, vpr) // init the repo first - _, err = repository.Initialize(ctx, account.Account{}, st, control.Options{}) + _, err = repository.Initialize(ctx, account.Account{}, st, control.Defaults()) require.NoError(t, err, clues.ToCore(err)) // then test it diff --git a/src/cli/restore/exchange.go b/src/cli/restore/exchange.go index 1fb098531..1f80958ab 100644 --- a/src/cli/restore/exchange.go +++ b/src/cli/restore/exchange.go @@ -6,14 +6,12 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" - "github.com/alcionai/corso/src/cli/config" "github.com/alcionai/corso/src/cli/options" . "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/pkg/control" - "github.com/alcionai/corso/src/pkg/repository" ) // called by restore.go to map subcommands to provider-specific handling. @@ -46,18 +44,19 @@ const ( exchangeServiceCommand = "exchange" exchangeServiceCommandUseSuffix = "--backup " - exchangeServiceCommandRestoreExamples = `# Restore emails with ID 98765abcdef and 12345abcdef from a specific backup + //nolint:lll + exchangeServiceCommandRestoreExamples = `# Restore emails with ID 98765abcdef and 12345abcdef from Alice's last backup (1234abcd...) corso restore exchange --backup 1234abcd-12ab-cd34-56de-1234abcd --email 98765abcdef,12345abcdef -# Restore Alice's emails with subject containing "Hello world" in "Inbox" from a specific backup +# Restore emails with subject containing "Hello world" in the "Inbox" corso restore exchange --backup 1234abcd-12ab-cd34-56de-1234abcd \ - --user alice@example.com --email-subject "Hello world" --email-folder Inbox + --email-subject "Hello world" --email-folder Inbox -# Restore Bobs's entire calendar from a specific backup +# Restore an entire calendar corso restore exchange --backup 1234abcd-12ab-cd34-56de-1234abcd \ - --user bob@example.com --event-calendar Calendar + --event-calendar Calendar -# Restore contact with ID abdef0101 from a specific backup +# Restore the contact with ID abdef0101 corso restore exchange --backup 1234abcd-12ab-cd34-56de-1234abcd --contact abdef0101` ) @@ -90,19 +89,14 @@ func restoreExchangeCmd(cmd *cobra.Command, args []string) error { return err } - cfg, err := config.GetConfigRepoDetails(ctx, true, nil) + r, _, err := utils.GetAccountAndConnect(ctx) if err != nil { return Only(ctx, err) } - r, err := repository.Connect(ctx, cfg.Account, cfg.Storage, options.Control()) - if err != nil { - return Only(ctx, clues.Wrap(err, "Failed to connect to the "+cfg.Storage.Provider.String()+" repository")) - } - defer utils.CloseRepo(ctx, r) - dest := control.DefaultRestoreDestination(common.SimpleDateTime) + dest := control.DefaultRestoreDestination(dttm.HumanReadable) Infof(ctx, "Restoring to folder %s", dest.ContainerName) sel := utils.IncludeExchangeRestoreDataSelectors(opts) @@ -122,7 +116,7 @@ func restoreExchangeCmd(cmd *cobra.Command, args []string) error { return Only(ctx, clues.Wrap(err, "Failed to run Exchange restore")) } - ds.PrintEntries(ctx) + ds.Items().PrintEntries(ctx) return nil } diff --git a/src/cli/restore/exchange_e2e_test.go b/src/cli/restore/exchange_e2e_test.go index 2064868e5..30114aa4f 100644 --- a/src/cli/restore/exchange_e2e_test.go +++ b/src/cli/restore/exchange_e2e_test.go @@ -13,7 +13,7 @@ import ( "github.com/alcionai/corso/src/cli" "github.com/alcionai/corso/src/cli/config" "github.com/alcionai/corso/src/cli/utils" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/internal/connector/exchange" "github.com/alcionai/corso/src/internal/operations" "github.com/alcionai/corso/src/internal/tester" @@ -48,9 +48,7 @@ func TestRestoreExchangeE2ESuite(t *testing.T) { suite.Run(t, &RestoreExchangeE2ESuite{ Suite: tester.NewE2ESuite( t, - [][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs}, - tester.CorsoCITests, - ), + [][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs}), }) } @@ -77,13 +75,8 @@ func (suite *RestoreExchangeE2ESuite) SetupSuite() { suite.m365UserID = strings.ToLower(tester.M365UserID(t)) var ( - users = []string{suite.m365UserID} - idToName = map[string]string{suite.m365UserID: suite.m365UserID} - nameToID = map[string]string{suite.m365UserID: suite.m365UserID} - ins = common.IDsNames{ - IDToName: idToName, - NameToID: nameToID, - } + users = []string{suite.m365UserID} + ins = idname.NewCache(map[string]string{suite.m365UserID: suite.m365UserID}) ) // init the repo first diff --git a/src/cli/restore/onedrive.go b/src/cli/restore/onedrive.go index 6e61e9386..a3724229e 100644 --- a/src/cli/restore/onedrive.go +++ b/src/cli/restore/onedrive.go @@ -6,14 +6,12 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" - "github.com/alcionai/corso/src/cli/config" "github.com/alcionai/corso/src/cli/options" . "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/pkg/control" - "github.com/alcionai/corso/src/pkg/repository" ) // called by restore.go to map subcommands to provider-specific handling. @@ -48,19 +46,19 @@ const ( oneDriveServiceCommand = "onedrive" oneDriveServiceCommandUseSuffix = "--backup " - oneDriveServiceCommandRestoreExamples = `# Restore file with ID 98765abcdef + oneDriveServiceCommandRestoreExamples = `# Restore file with ID 98765abcdef in Bob's last backup (1234abcd...) corso restore onedrive --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef -# Restore file with ID 98765abcdef along with its associated permissions +# Restore the file with ID 98765abcdef along with its associated permissions corso restore onedrive --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef --restore-permissions -# Restore Alice's file named "FY2021 Planning.xlsx in "Documents/Finance Reports" from a specific backup +# Restore files named "FY2021 Planning.xlsx" in "Documents/Finance Reports" corso restore onedrive --backup 1234abcd-12ab-cd34-56de-1234abcd \ - --user alice@example.com --file "FY2021 Planning.xlsx" --folder "Documents/Finance Reports" + --file "FY2021 Planning.xlsx" --folder "Documents/Finance Reports" -# Restore all files from Bob's folder that were created before 2020 when captured in a specific backup +# Restore all files and folders in folder "Documents/Finance Reports" that were created before 2020 corso restore onedrive --backup 1234abcd-12ab-cd34-56de-1234abcd - --user bob@example.com --folder "Documents/Finance Reports" --file-created-before 2020-01-01T00:00:00` + --folder "Documents/Finance Reports" --file-created-before 2020-01-01T00:00:00` ) // `corso restore onedrive [...]` @@ -92,19 +90,14 @@ func restoreOneDriveCmd(cmd *cobra.Command, args []string) error { return err } - cfg, err := config.GetConfigRepoDetails(ctx, true, nil) + r, _, err := utils.GetAccountAndConnect(ctx) if err != nil { return Only(ctx, err) } - r, err := repository.Connect(ctx, cfg.Account, cfg.Storage, options.Control()) - if err != nil { - return Only(ctx, clues.Wrap(err, "Failed to connect to the "+cfg.Storage.Provider.String()+" repository")) - } - defer utils.CloseRepo(ctx, r) - dest := control.DefaultRestoreDestination(common.SimpleDateTimeOneDrive) + dest := control.DefaultRestoreDestination(dttm.HumanReadableDriveItem) Infof(ctx, "Restoring to folder %s", dest.ContainerName) sel := utils.IncludeOneDriveRestoreDataSelectors(opts) @@ -124,7 +117,7 @@ func restoreOneDriveCmd(cmd *cobra.Command, args []string) error { return Only(ctx, clues.Wrap(err, "Failed to run OneDrive restore")) } - ds.PrintEntries(ctx) + ds.Items().PrintEntries(ctx) return nil } diff --git a/src/cli/restore/sharepoint.go b/src/cli/restore/sharepoint.go index 8c0e5bfb2..63678a718 100644 --- a/src/cli/restore/sharepoint.go +++ b/src/cli/restore/sharepoint.go @@ -6,14 +6,12 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" - "github.com/alcionai/corso/src/cli/config" "github.com/alcionai/corso/src/cli/options" . "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/pkg/control" - "github.com/alcionai/corso/src/pkg/repository" ) // called by restore.go to map subcommands to provider-specific handling. @@ -35,6 +33,8 @@ func addSharePointCommands(cmd *cobra.Command) *cobra.Command { utils.AddBackupIDFlag(c, true) utils.AddSharePointDetailsAndRestoreFlags(c) + + options.AddRestorePermissionsFlag(c) options.AddFailFastFlag(c) } @@ -46,20 +46,24 @@ const ( sharePointServiceCommandUseSuffix = "--backup " //nolint:lll - sharePointServiceCommandRestoreExamples = `# Restore file with ID 98765abcdef + sharePointServiceCommandRestoreExamples = `# Restore file with ID 98765abcdef in Bob's latest backup (1234abcd...) corso restore sharepoint --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef -# Restore a file named "ServerRenderTemplate.xsl in "Display Templates/Style Sheets". +# Restore the file with ID 98765abcdef along with its associated permissions +corso restore sharepoint --backup 1234abcd-12ab-cd34-56de-1234abcd \ + --file 98765abcdef --restore-permissions + +# Restore files named "ServerRenderTemplate.xsl" in the folder "Display Templates/Style Sheets". corso restore sharepoint --backup 1234abcd-12ab-cd34-56de-1234abcd \ --file "ServerRenderTemplate.xsl" --folder "Display Templates/Style Sheets" -# Restore all files that were created before 2020. +# Restore all files in the folder "Display Templates/Style Sheets" that were created before 2020. corso restore sharepoint --backup 1234abcd-12ab-cd34-56de-1234abcd --file-created-before 2020-01-01T00:00:00 --folder "Display Templates/Style Sheets" -# Restore all files in a certain library. +# Restore all files in the "Documents" library. corso restore sharepoint --backup 1234abcd-12ab-cd34-56de-1234abcd - --library documents --folder "Display Templates/Style Sheets" ` + --library Documents --folder "Display Templates/Style Sheets" ` ) // `corso restore sharepoint [...]` @@ -91,19 +95,14 @@ func restoreSharePointCmd(cmd *cobra.Command, args []string) error { return err } - cfg, err := config.GetConfigRepoDetails(ctx, true, nil) + r, _, err := utils.GetAccountAndConnect(ctx) if err != nil { return Only(ctx, err) } - r, err := repository.Connect(ctx, cfg.Account, cfg.Storage, options.Control()) - if err != nil { - return Only(ctx, clues.Wrap(err, "Failed to connect to the "+cfg.Storage.Provider.String()+" repository")) - } - defer utils.CloseRepo(ctx, r) - dest := control.DefaultRestoreDestination(common.SimpleDateTimeOneDrive) + dest := control.DefaultRestoreDestination(dttm.HumanReadableDriveItem) Infof(ctx, "Restoring to folder %s", dest.ContainerName) sel := utils.IncludeSharePointRestoreDataSelectors(ctx, opts) @@ -123,7 +122,7 @@ func restoreSharePointCmd(cmd *cobra.Command, args []string) error { return Only(ctx, clues.Wrap(err, "Failed to run SharePoint restore")) } - ds.PrintEntries(ctx) + ds.Items().PrintEntries(ctx) return nil } diff --git a/src/cli/utils/exchange.go b/src/cli/utils/exchange.go index ac3de3871..d167c710e 100644 --- a/src/cli/utils/exchange.go +++ b/src/cli/utils/exchange.go @@ -113,7 +113,7 @@ func AddExchangeDetailsAndRestoreFlags(cmd *cobra.Command) { fs.StringSliceVar( &EmailFV, EmailFN, nil, - "Select emails by email ID; accepts '"+Wildcard+"' to select all emails.") + "Select email messages by ID; accepts '"+Wildcard+"' to select all emails.") fs.StringSliceVar( &EmailFolderFV, EmailFolderFN, nil, diff --git a/src/cli/utils/exchange_test.go b/src/cli/utils/exchange_test.go index b9f7f64ca..c61e8da77 100644 --- a/src/cli/utils/exchange_test.go +++ b/src/cli/utils/exchange_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/suite" "github.com/alcionai/corso/src/cli/utils" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/selectors" ) @@ -42,7 +42,7 @@ func (suite *ExchangeUtilsSuite) TestValidateRestoreFlags() { { name: "valid time", backupID: "bid", - opts: utils.ExchangeOpts{EmailReceivedAfter: common.Now()}, + opts: utils.ExchangeOpts{EmailReceivedAfter: dttm.Now()}, expect: assert.NoError, }, { diff --git a/src/cli/utils/flags.go b/src/cli/utils/flags.go index d884fb631..ab1503034 100644 --- a/src/cli/utils/flags.go +++ b/src/cli/utils/flags.go @@ -8,8 +8,10 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" + "github.com/alcionai/corso/src/pkg/control/repository" "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/selectors" ) // common flag vars (eg: FV) @@ -36,6 +38,9 @@ var ( // for selection of data by category. eg: `--data email,contacts` CategoryDataFV []string + + MaintenanceModeFV string + ForceMaintenanceFV bool ) // common flag names (eg: FN) @@ -58,6 +63,10 @@ const ( FileCreatedBeforeFN = "file-created-before" FileModifiedAfterFN = "file-modified-after" FileModifiedBeforeFN = "file-modified-before" + + // Maintenance stuff. + MaintenanceModeFN = "mode" + ForceMaintenanceFN = "force" ) // well-known flag values @@ -167,6 +176,30 @@ func AddSiteFlag(cmd *cobra.Command) { "Backup data by site URL; accepts '"+Wildcard+"' to select all sites.") } +func AddMaintenanceModeFlag(cmd *cobra.Command) { + fs := cmd.Flags() + fs.StringVar( + &MaintenanceModeFV, + MaintenanceModeFN, + repository.CompleteMaintenance.String(), + "Type of maintenance operation to run. Pass '"+ + repository.MetadataMaintenance.String()+"' to run a faster maintenance "+ + "that does minimal clean-up and optimization. Pass '"+ + repository.CompleteMaintenance.String()+"' to fully compact existing "+ + "data and delete unused data.") + cobra.CheckErr(fs.MarkHidden(MaintenanceModeFN)) +} + +func AddForceMaintenanceFlag(cmd *cobra.Command) { + fs := cmd.Flags() + fs.BoolVar( + &ForceMaintenanceFV, + ForceMaintenanceFN, + false, + "Force maintenance. Caution: user must ensure this is not run concurrently on a single repo") + cobra.CheckErr(fs.MarkHidden(ForceMaintenanceFN)) +} + type PopulatedFlags map[string]struct{} func (fs PopulatedFlags) populate(pf *pflag.Flag) { @@ -198,7 +231,7 @@ func GetPopulatedFlags(cmd *cobra.Command) PopulatedFlags { // IsValidTimeFormat returns true if the input is recognized as a // supported format by the common time parser. func IsValidTimeFormat(in string) bool { - _, err := common.ParseTime(in) + _, err := dttm.ParseTime(in) return err == nil } @@ -215,6 +248,11 @@ func trimFolderSlash(folders []string) []string { res := make([]string, 0, len(folders)) for _, p := range folders { + if p == string(path.PathSeparator) { + res = selectors.Any() + break + } + // Use path package because it has logic to handle escaping already. res = append(res, path.TrimTrailingSlash(p)) } diff --git a/src/cli/utils/onedrive_test.go b/src/cli/utils/onedrive_test.go index 43c0507c0..61653045f 100644 --- a/src/cli/utils/onedrive_test.go +++ b/src/cli/utils/onedrive_test.go @@ -8,6 +8,7 @@ import ( "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/pkg/path" ) type OneDriveUtilsSuite struct { @@ -26,6 +27,7 @@ func (suite *OneDriveUtilsSuite) TestIncludeOneDriveRestoreDataSelectors() { containsOnly = []string{"contains"} prefixOnly = []string{"/prefix"} containsAndPrefix = []string{"contains", "/prefix"} + onlySlash = []string{string(path.PathSeparator)} ) table := []struct { @@ -87,6 +89,15 @@ func (suite *OneDriveUtilsSuite) TestIncludeOneDriveRestoreDataSelectors() { }, expectIncludeLen: 2, }, + { + name: "folder with just /", + opts: utils.OneDriveOpts{ + Users: empty, + FileName: empty, + FolderPath: onlySlash, + }, + expectIncludeLen: 1, + }, } for _, test := range table { suite.Run(test.name, func() { diff --git a/src/cli/utils/sharepoint_test.go b/src/cli/utils/sharepoint_test.go index 0a14f435e..0201ab29e 100644 --- a/src/cli/utils/sharepoint_test.go +++ b/src/cli/utils/sharepoint_test.go @@ -7,8 +7,9 @@ import ( "github.com/stretchr/testify/suite" "github.com/alcionai/corso/src/cli/utils" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" ) @@ -30,6 +31,7 @@ func (suite *SharePointUtilsSuite) TestIncludeSharePointRestoreDataSelectors() { containsOnly = []string{"contains"} prefixOnly = []string{"/prefix"} containsAndPrefix = []string{"contains", "/prefix"} + onlySlash = []string{string(path.PathSeparator)} ) table := []struct { @@ -182,6 +184,13 @@ func (suite *SharePointUtilsSuite) TestIncludeSharePointRestoreDataSelectors() { }, expectIncludeLen: 2, }, + { + name: "folder with just /", + opts: utils.SharePointOpts{ + FolderPath: onlySlash, + }, + expectIncludeLen: 1, + }, } for _, test := range table { suite.Run(test.name, func() { @@ -280,10 +289,10 @@ func (suite *SharePointUtilsSuite) TestValidateSharePointRestoreFlags() { backupID: "id", opts: utils.SharePointOpts{ WebURL: []string{"www.corsobackup.io/sites/foo"}, - FileCreatedAfter: common.Now(), - FileCreatedBefore: common.Now(), - FileModifiedAfter: common.Now(), - FileModifiedBefore: common.Now(), + FileCreatedAfter: dttm.Now(), + FileCreatedBefore: dttm.Now(), + FileModifiedAfter: dttm.Now(), + FileModifiedBefore: dttm.Now(), Populated: utils.PopulatedFlags{ utils.SiteFN: {}, utils.FileCreatedAfterFN: {}, diff --git a/src/cli/utils/testdata/flags.go b/src/cli/utils/testdata/flags.go index 1048a4e31..25e516b4d 100644 --- a/src/cli/utils/testdata/flags.go +++ b/src/cli/utils/testdata/flags.go @@ -43,4 +43,6 @@ var ( PageFolderInput = []string{"pageFolder1", "pageFolder2"} PageInput = []string{"page1", "page2"} + + RestorePermissions = true ) diff --git a/src/cli/utils/testdata/opts.go b/src/cli/utils/testdata/opts.go index 9511f58ab..8bbb35a58 100644 --- a/src/cli/utils/testdata/opts.go +++ b/src/cli/utils/testdata/opts.go @@ -7,7 +7,7 @@ import ( "github.com/alcionai/clues" "github.com/alcionai/corso/src/cli/utils" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/backup/details/testdata" @@ -21,7 +21,7 @@ type ExchangeOptionsTest struct { Name string Opts utils.ExchangeOpts BackupGetter *MockBackupGetter - Expected []details.DetailsEntry + Expected []details.Entry } var ( @@ -138,39 +138,39 @@ var ( Name: "EmailsFolderPrefixMatch", Expected: testdata.ExchangeEmailItems, Opts: utils.ExchangeOpts{ - EmailFolder: []string{testdata.ExchangeEmailInboxPath.Folder(false)}, + EmailFolder: []string{testdata.ExchangeEmailInboxPath.FolderLocation()}, }, }, { Name: "EmailsFolderPrefixMatchTrailingSlash", Expected: testdata.ExchangeEmailItems, Opts: utils.ExchangeOpts{ - EmailFolder: []string{testdata.ExchangeEmailInboxPath.Folder(false) + "/"}, + EmailFolder: []string{testdata.ExchangeEmailInboxPath.FolderLocation() + "/"}, }, }, { Name: "EmailsFolderWithSlashPrefixMatch", - Expected: []details.DetailsEntry{ + Expected: []details.Entry{ testdata.ExchangeEmailItems[1], testdata.ExchangeEmailItems[2], }, Opts: utils.ExchangeOpts{ - EmailFolder: []string{testdata.ExchangeEmailBasePath2.Folder(false)}, + EmailFolder: []string{testdata.ExchangeEmailBasePath2.FolderLocation()}, }, }, { Name: "EmailsFolderWithSlashPrefixMatchTrailingSlash", - Expected: []details.DetailsEntry{ + Expected: []details.Entry{ testdata.ExchangeEmailItems[1], testdata.ExchangeEmailItems[2], }, Opts: utils.ExchangeOpts{ - EmailFolder: []string{testdata.ExchangeEmailBasePath2.Folder(false) + "/"}, + EmailFolder: []string{testdata.ExchangeEmailBasePath2.FolderLocation() + "/"}, }, }, { Name: "EmailsBySubject", - Expected: []details.DetailsEntry{ + Expected: []details.Entry{ testdata.ExchangeEmailItems[0], testdata.ExchangeEmailItems[1], }, @@ -183,7 +183,7 @@ var ( Expected: append( append( append( - []details.DetailsEntry{}, + []details.Entry{}, testdata.ExchangeEmailItems..., ), testdata.ExchangeContactsItems..., @@ -193,41 +193,43 @@ var ( }, { Name: "MailReceivedTime", - Expected: []details.DetailsEntry{testdata.ExchangeEmailItems[0]}, + Expected: []details.Entry{testdata.ExchangeEmailItems[0]}, Opts: utils.ExchangeOpts{ - EmailReceivedBefore: common.FormatTime(testdata.Time1.Add(time.Second)), + EmailReceivedBefore: dttm.Format(testdata.Time1.Add(time.Second)), }, }, { - Name: "MailItemRef", - Expected: []details.DetailsEntry{testdata.ExchangeEmailItems[0]}, + Name: "MailShortRef", + Expected: []details.Entry{testdata.ExchangeEmailItems[0]}, + Opts: utils.ExchangeOpts{ + Email: []string{testdata.ExchangeEmailItemPath1.RR.ShortRef()}, + }, + }, + { + Name: "BadMailItemRef", + // no matches are expected, since exchange ItemRefs + // are not matched when using the CLI's selectors. + Expected: []details.Entry{}, Opts: utils.ExchangeOpts{ Email: []string{testdata.ExchangeEmailItems[0].ItemRef}, }, }, - { - Name: "MailShortRef", - Expected: []details.DetailsEntry{testdata.ExchangeEmailItems[0]}, - Opts: utils.ExchangeOpts{ - Email: []string{testdata.ExchangeEmailItemPath1.ShortRef()}, - }, - }, { Name: "MultipleMailShortRef", - Expected: []details.DetailsEntry{ + Expected: []details.Entry{ testdata.ExchangeEmailItems[0], testdata.ExchangeEmailItems[1], }, Opts: utils.ExchangeOpts{ Email: []string{ - testdata.ExchangeEmailItemPath1.ShortRef(), - testdata.ExchangeEmailItemPath2.ShortRef(), + testdata.ExchangeEmailItemPath1.RR.ShortRef(), + testdata.ExchangeEmailItemPath2.RR.ShortRef(), }, }, }, { Name: "AllEventsAndMailWithSubject", - Expected: []details.DetailsEntry{testdata.ExchangeEmailItems[0]}, + Expected: []details.Entry{testdata.ExchangeEmailItems[0]}, Opts: utils.ExchangeOpts{ EmailSubject: "foo", Event: selectors.Any(), @@ -235,7 +237,7 @@ var ( }, { Name: "EventsAndMailWithSubject", - Expected: []details.DetailsEntry{}, + Expected: []details.Entry{}, Opts: utils.ExchangeOpts{ EmailSubject: "foo", EventSubject: "foo", @@ -243,13 +245,13 @@ var ( }, { Name: "EventsAndMailByShortRef", - Expected: []details.DetailsEntry{ + Expected: []details.Entry{ testdata.ExchangeEmailItems[0], testdata.ExchangeEventsItems[0], }, Opts: utils.ExchangeOpts{ - Email: []string{testdata.ExchangeEmailItemPath1.ShortRef()}, - Event: []string{testdata.ExchangeEventsItemPath1.ShortRef()}, + Email: []string{testdata.ExchangeEmailItemPath1.RR.ShortRef()}, + Event: []string{testdata.ExchangeEventsItemPath1.RR.ShortRef()}, }, }, } @@ -259,7 +261,7 @@ type OneDriveOptionsTest struct { Name string Opts utils.OneDriveOpts BackupGetter *MockBackupGetter - Expected []details.DetailsEntry + Expected []details.Entry } var ( @@ -354,6 +356,13 @@ var ( FolderPath: selectors.Any(), }, }, + { + Name: "FilesWithSingleSlash", + Expected: testdata.OneDriveItems, + Opts: utils.OneDriveOpts{ + FolderPath: []string{"/"}, + }, + }, { Name: "FolderPrefixMatch", Expected: testdata.OneDriveItems, @@ -375,9 +384,16 @@ var ( FolderPath: []string{testdata.OneDriveFolderFolder + "/"}, }, }, + { + Name: "FolderRepoRefMatchesNothing", + Expected: []details.Entry{}, + Opts: utils.OneDriveOpts{ + FolderPath: []string{testdata.OneDriveFolderPath.RR.Folder(true)}, + }, + }, { Name: "ShortRef", - Expected: []details.DetailsEntry{ + Expected: []details.Entry{ testdata.OneDriveItems[0], testdata.OneDriveItems[1], }, @@ -390,7 +406,7 @@ var ( }, { Name: "SingleItem", - Expected: []details.DetailsEntry{testdata.OneDriveItems[0]}, + Expected: []details.Entry{testdata.OneDriveItems[0]}, Opts: utils.OneDriveOpts{ FileName: []string{ testdata.OneDriveItems[0].OneDrive.ItemName, @@ -399,7 +415,7 @@ var ( }, { Name: "MultipleItems", - Expected: []details.DetailsEntry{ + Expected: []details.Entry{ testdata.OneDriveItems[0], testdata.OneDriveItems[1], }, @@ -412,7 +428,7 @@ var ( }, { Name: "ItemRefMatchesNothing", - Expected: []details.DetailsEntry{}, + Expected: []details.Entry{}, Opts: utils.OneDriveOpts{ FileName: []string{ testdata.OneDriveItems[0].ItemRef, @@ -421,9 +437,9 @@ var ( }, { Name: "CreatedBefore", - Expected: []details.DetailsEntry{testdata.OneDriveItems[1]}, + Expected: []details.Entry{testdata.OneDriveItems[1]}, Opts: utils.OneDriveOpts{ - FileCreatedBefore: common.FormatTime(testdata.Time1.Add(time.Second)), + FileCreatedBefore: dttm.Format(testdata.Time1.Add(time.Second)), }, }, } @@ -433,7 +449,7 @@ type SharePointOptionsTest struct { Name string Opts utils.SharePointOpts BackupGetter *MockBackupGetter - Expected []details.DetailsEntry + Expected []details.Entry } var ( @@ -473,6 +489,13 @@ var ( FolderPath: selectors.Any(), }, }, + { + Name: "LibraryItemsWithSingleSlash", + Expected: testdata.SharePointLibraryItems, + Opts: utils.SharePointOpts{ + FolderPath: []string{"/"}, + }, + }, { Name: "FolderPrefixMatch", Expected: testdata.SharePointLibraryItems, @@ -494,9 +517,16 @@ var ( FolderPath: []string{testdata.SharePointLibraryFolder + "/"}, }, }, + { + Name: "FolderRepoRefMatchesNothing", + Expected: []details.Entry{}, + Opts: utils.SharePointOpts{ + FolderPath: []string{testdata.SharePointLibraryPath.RR.Folder(true)}, + }, + }, { Name: "ShortRef", - Expected: []details.DetailsEntry{ + Expected: []details.Entry{ testdata.SharePointLibraryItems[0], testdata.SharePointLibraryItems[1], }, @@ -509,7 +539,7 @@ var ( }, { Name: "SingleItem", - Expected: []details.DetailsEntry{testdata.SharePointLibraryItems[0]}, + Expected: []details.Entry{testdata.SharePointLibraryItems[0]}, Opts: utils.SharePointOpts{ FileName: []string{ testdata.SharePointLibraryItems[0].SharePoint.ItemName, @@ -518,7 +548,7 @@ var ( }, { Name: "MultipleItems", - Expected: []details.DetailsEntry{ + Expected: []details.Entry{ testdata.SharePointLibraryItems[0], testdata.SharePointLibraryItems[1], }, @@ -531,7 +561,7 @@ var ( }, { Name: "ItemRefMatchesNothing", - Expected: []details.DetailsEntry{}, + Expected: []details.Entry{}, Opts: utils.SharePointOpts{ FileName: []string{ testdata.SharePointLibraryItems[0].ItemRef, @@ -542,7 +572,7 @@ var ( // Name: "CreatedBefore", // Expected: []details.DetailsEntry{testdata.SharePointLibraryItems[1]}, // Opts: utils.SharePointOpts{ - // FileCreatedBefore: common.FormatTime(testdata.Time1.Add(time.Second)), + // FileCreatedBefore: dttm.Format(testdata.Time1.Add(time.Second)), // }, // }, } diff --git a/src/cli/utils/users.go b/src/cli/utils/users.go new file mode 100644 index 000000000..610f0e2c6 --- /dev/null +++ b/src/cli/utils/users.go @@ -0,0 +1,40 @@ +package utils + +import ( + "context" + + "github.com/alcionai/clues" + + "github.com/alcionai/corso/src/internal/common/idname" + "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/services/m365/api" +) + +// UsersMap retrieves all users in the tenant and returns them in an idname.Cacher +func UsersMap( + ctx context.Context, + acct account.Account, + errs *fault.Bus, +) (idname.Cacher, error) { + au, err := makeUserAPI(acct) + if err != nil { + return nil, clues.Wrap(err, "constructing a graph client") + } + + return au.GetAllIDsAndNames(ctx, errs) +} + +func makeUserAPI(acct account.Account) (api.Users, error) { + creds, err := acct.M365Config() + if err != nil { + return api.Users{}, clues.Wrap(err, "getting m365 account creds") + } + + cli, err := api.NewClient(creds) + if err != nil { + return api.Users{}, clues.Wrap(err, "constructing api client") + } + + return cli.Users(), nil +} diff --git a/src/cli/utils/utils.go b/src/cli/utils/utils.go index 6be59d367..b41e7703f 100644 --- a/src/cli/utils/utils.go +++ b/src/cli/utils/utils.go @@ -8,7 +8,10 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" + "github.com/alcionai/corso/src/cli/config" + "github.com/alcionai/corso/src/cli/options" "github.com/alcionai/corso/src/internal/events" + "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" @@ -21,16 +24,18 @@ const ( Wildcard = "*" ) -// RequireProps validates the existence of the properties -// in the map. Expects the format map[propName]propVal. -func RequireProps(props map[string]string) error { - for name, val := range props { - if len(val) == 0 { - return clues.New(name + " is required to perform this command") - } +func GetAccountAndConnect(ctx context.Context) (repository.Repository, *account.Account, error) { + cfg, err := config.GetConfigRepoDetails(ctx, true, nil) + if err != nil { + return nil, nil, err } - return nil + r, err := repository.Connect(ctx, cfg.Account, cfg.Storage, options.Control()) + if err != nil { + return nil, nil, clues.Wrap(err, "Failed to connect to the "+cfg.Storage.Provider.String()+" repository") + } + + return r, &cfg.Account, nil } // CloseRepo handles closing a repo. diff --git a/src/cli/utils/utils_test.go b/src/cli/utils/utils_test.go index f942e61f3..e6f5340d4 100644 --- a/src/cli/utils/utils_test.go +++ b/src/cli/utils/utils_test.go @@ -3,7 +3,6 @@ package utils import ( "testing" - "github.com/alcionai/clues" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" @@ -19,27 +18,6 @@ func TestCliUtilsSuite(t *testing.T) { suite.Run(t, &CliUtilsSuite{Suite: tester.NewUnitSuite(t)}) } -func (suite *CliUtilsSuite) TestRequireProps() { - table := []struct { - name string - props map[string]string - errCheck assert.ErrorAssertionFunc - }{ - { - props: map[string]string{"exists": "I have seen the fnords!"}, - errCheck: assert.NoError, - }, - { - props: map[string]string{"not-exists": ""}, - errCheck: assert.Error, - }, - } - for _, test := range table { - err := RequireProps(test.props) - test.errCheck(suite.T(), err, clues.ToCore(err)) - } -} - func (suite *CliUtilsSuite) TestSplitFoldersIntoContainsAndPrefix() { table := []struct { name string diff --git a/src/cmd/factory/factory.go b/src/cmd/factory/factory.go index b174702c8..e7201fc50 100644 --- a/src/cmd/factory/factory.go +++ b/src/cmd/factory/factory.go @@ -8,6 +8,7 @@ import ( . "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cmd/factory/impl" + "github.com/alcionai/corso/src/internal/common/crash" "github.com/alcionai/corso/src/pkg/logger" ) @@ -29,21 +30,38 @@ var oneDriveCmd = &cobra.Command{ RunE: handleOneDriveFactory, } +var sharePointCmd = &cobra.Command{ + Use: "sharepoint", + Short: "Generate shareopint data", + RunE: handleSharePointFactory, +} + // ------------------------------------------------------------------------------------------ // CLI command handlers // ------------------------------------------------------------------------------------------ func main() { - ctx, _ := logger.SeedLevel(context.Background(), logger.Development) + ls := logger.Settings{ + Level: logger.LLDebug, + Format: logger.LFText, + } + ctx, _ := logger.CtxOrSeed(context.Background(), ls) ctx = SetRootCmd(ctx, factoryCmd) - defer logger.Flush(ctx) + defer func() { + if err := crash.Recovery(ctx, recover(), "backup"); err != nil { + logger.CtxErr(ctx, err).Error("panic in factory") + } + + logger.Flush(ctx) + }() // persistent flags that are common to all use cases fs := factoryCmd.PersistentFlags() fs.StringVar(&impl.Tenant, "tenant", "", "m365 tenant containing the user") + fs.StringVar(&impl.Site, "site", "", "sharepoint site owning the new data") fs.StringVar(&impl.User, "user", "", "m365 user owning the new data") - cobra.CheckErr(factoryCmd.MarkPersistentFlagRequired("user")) + fs.StringVar(&impl.SecondaryUser, "secondaryuser", "", "m365 secondary user owning the new data") fs.IntVar(&impl.Count, "count", 0, "count of items to produce") cobra.CheckErr(factoryCmd.MarkPersistentFlagRequired("count")) fs.StringVar(&impl.Destination, "destination", "", "destination of the new data (will create as needed)") @@ -53,6 +71,8 @@ func main() { impl.AddExchangeCommands(exchangeCmd) factoryCmd.AddCommand(oneDriveCmd) impl.AddOneDriveCommands(oneDriveCmd) + factoryCmd.AddCommand(sharePointCmd) + impl.AddSharePointCommands(sharePointCmd) if err := factoryCmd.ExecuteContext(ctx); err != nil { logger.Flush(ctx) @@ -74,3 +94,8 @@ func handleOneDriveFactory(cmd *cobra.Command, args []string) error { Err(cmd.Context(), impl.ErrNotYetImplemented) return cmd.Help() } + +func handleSharePointFactory(cmd *cobra.Command, args []string) error { + Err(cmd.Context(), impl.ErrNotYetImplemented) + return cmd.Help() +} diff --git a/src/cmd/factory/impl/common.go b/src/cmd/factory/impl/common.go index 60b524834..c267a828a 100644 --- a/src/cmd/factory/impl/common.go +++ b/src/cmd/factory/impl/common.go @@ -2,6 +2,7 @@ package impl import ( "context" + "fmt" "os" "strings" "time" @@ -11,9 +12,13 @@ import ( "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" + "github.com/alcionai/corso/src/internal/common/idname" + "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/connector" exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock" "github.com/alcionai/corso/src/internal/data" + "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/version" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/backup/details" @@ -22,14 +27,15 @@ import ( "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" - "github.com/alcionai/corso/src/pkg/services/m365" ) var ( - Count int - Destination string - Tenant string - User string + Count int + Destination string + Site string + Tenant string + User string + SecondaryUser string ) // TODO: ErrGenerating = clues.New("not all items were successfully generated") @@ -59,8 +65,8 @@ func generateAndRestoreItems( for i := 0; i < howMany; i++ { var ( - now = common.Now() - nowLegacy = common.FormatLegacyTime(time.Now()) + now = dttm.Now() + nowLegacy = dttm.FormatToLegacy(time.Now()) id = uuid.NewString() subject = "automated " + now[:16] + " - " + id[:8] body = "automated " + cat.String() + " generation for " + userID + " at " + now + " - " + id @@ -73,13 +79,12 @@ func generateAndRestoreItems( } collections := []collection{{ - pathElements: []string{destFldr}, + PathElements: []string{destFldr}, category: cat, items: items, }} - // TODO: fit the destination to the containers - dest := control.DefaultRestoreDestination(common.SimpleTimeTesting) + dest := control.DefaultRestoreDestination(dttm.SafeForTesting) dest.ContainerName = destFldr print.Infof(ctx, "Restoring to folder %s", dest.ContainerName) @@ -101,7 +106,16 @@ func generateAndRestoreItems( // Common Helpers // ------------------------------------------------------------------------------------------ -func getGCAndVerifyUser(ctx context.Context, userID string) (*connector.GraphConnector, account.Account, error) { +func getGCAndVerifyResourceOwner( + ctx context.Context, + resource connector.Resource, + resourceOwner string, +) ( + *connector.GraphConnector, + account.Account, + idname.Provider, + error, +) { tid := common.First(Tenant, os.Getenv(account.AzureTenantID)) if len(Tenant) == 0 { @@ -116,34 +130,20 @@ func getGCAndVerifyUser(ctx context.Context, userID string) (*connector.GraphCon acct, err := account.NewAccount(account.ProviderM365, m365Cfg) if err != nil { - return nil, account.Account{}, clues.Wrap(err, "finding m365 account details") + return nil, account.Account{}, nil, clues.Wrap(err, "finding m365 account details") } - // TODO: log/print recoverable errors - errs := fault.New(false) - - ins, err := m365.UsersMap(ctx, acct, errs) + gc, err := connector.NewGraphConnector(ctx, acct, resource) if err != nil { - return nil, account.Account{}, clues.Wrap(err, "getting tenant users") + return nil, account.Account{}, nil, clues.Wrap(err, "connecting to graph api") } - _, idOK := ins.NameOf(strings.ToLower(userID)) - _, nameOK := ins.IDOf(strings.ToLower(userID)) - - if !idOK && !nameOK { - return nil, account.Account{}, clues.New("user not found within tenant") - } - - gc, err := connector.NewGraphConnector( - ctx, - acct, - connector.Users, - errs) + id, _, err := gc.PopulateOwnerIDAndNamesFrom(ctx, resourceOwner, nil) if err != nil { - return nil, account.Account{}, clues.Wrap(err, "connecting to graph api") + return nil, account.Account{}, nil, clues.Wrap(err, "verifying user") } - return gc, acct, nil + return gc, acct, gc.IDNameLookup.ProviderForID(id), nil } type item struct { @@ -156,7 +156,7 @@ type collection struct { // only contain elements after the prefix that corso uses for the path. For // example, a collection for the Inbox folder in exchange mail would just be // "Inbox". - pathElements []string + PathElements []string category path.CategoryType items []item } @@ -176,7 +176,7 @@ func buildCollections( service, c.category, false, - c.pathElements...) + c.PathElements...) if err != nil { return nil, err } @@ -193,3 +193,219 @@ func buildCollections( return collections, nil } + +var ( + folderAName = "folder-a" + folderBName = "b" + folderCName = "folder-c" + + fileAData = []byte(strings.Repeat("a", 33)) + fileBData = []byte(strings.Repeat("b", 65)) + fileEData = []byte(strings.Repeat("e", 257)) + + // Cannot restore owner or empty permissions and so not testing them + writePerm = []string{"write"} + readPerm = []string{"read"} +) + +func generateAndRestoreDriveItems( + gc *connector.GraphConnector, + resourceOwner, secondaryUserID, secondaryUserName string, + acct account.Account, + service path.ServiceType, + cat path.CategoryType, + sel selectors.Selector, + tenantID, destFldr string, + count int, + errs *fault.Bus, +) ( + *details.Details, + error, +) { + ctx, flush := tester.NewContext() + defer flush() + + dest := control.DefaultRestoreDestination(dttm.SafeForTesting) + dest.ContainerName = destFldr + print.Infof(ctx, "Restoring to folder %s", dest.ContainerName) + + var driveID string + + switch service { + case path.SharePointService: + d, err := gc.Service.Client().Sites().BySiteId(resourceOwner).Drive().Get(ctx, nil) + if err != nil { + return nil, clues.Wrap(err, "getting site's default drive") + } + + driveID = ptr.Val(d.GetId()) + default: + d, err := gc.Service.Client().Users().ByUserId(resourceOwner).Drive().Get(ctx, nil) + if err != nil { + return nil, clues.Wrap(err, "getting user's default drive") + } + + driveID = ptr.Val(d.GetId()) + } + + var ( + cols []connector.OnedriveColInfo + + rootPath = []string{"drives", driveID, "root:"} + folderAPath = []string{"drives", driveID, "root:", folderAName} + folderBPath = []string{"drives", driveID, "root:", folderBName} + folderCPath = []string{"drives", driveID, "root:", folderCName} + + now = time.Now() + year, mnth, date = now.Date() + hour, min, sec = now.Clock() + currentTime = fmt.Sprintf("%d-%v-%d-%d-%d-%d", year, mnth, date, hour, min, sec) + ) + + for i := 0; i < count; i++ { + col := []connector.OnedriveColInfo{ + // basic folder and file creation + { + PathElements: rootPath, + Files: []connector.ItemData{ + { + Name: fmt.Sprintf("file-1st-count-%d-at-%s", i, currentTime), + Data: fileAData, + Perms: connector.PermData{ + User: secondaryUserName, + EntityID: secondaryUserID, + Roles: writePerm, + }, + }, + { + Name: fmt.Sprintf("file-2nd-count-%d-at-%s", i, currentTime), + Data: fileBData, + }, + }, + Folders: []connector.ItemData{ + { + Name: folderBName, + }, + { + Name: folderAName, + Perms: connector.PermData{ + User: secondaryUserName, + EntityID: secondaryUserID, + Roles: readPerm, + }, + }, + { + Name: folderCName, + Perms: connector.PermData{ + User: secondaryUserName, + EntityID: secondaryUserID, + Roles: readPerm, + }, + }, + }, + }, + { + // a folder that has permissions with an item in the folder with + // the different permissions. + PathElements: folderAPath, + Files: []connector.ItemData{ + { + Name: fmt.Sprintf("file-count-%d-at-%s", i, currentTime), + Data: fileEData, + Perms: connector.PermData{ + User: secondaryUserName, + EntityID: secondaryUserID, + Roles: writePerm, + }, + }, + }, + Perms: connector.PermData{ + User: secondaryUserName, + EntityID: secondaryUserID, + Roles: readPerm, + }, + }, + { + // a folder that has permissions with an item in the folder with + // no permissions. + PathElements: folderCPath, + Files: []connector.ItemData{ + { + Name: fmt.Sprintf("file-count-%d-at-%s", i, currentTime), + Data: fileAData, + }, + }, + Perms: connector.PermData{ + User: secondaryUserName, + EntityID: secondaryUserID, + Roles: readPerm, + }, + }, + { + PathElements: folderBPath, + Files: []connector.ItemData{ + { + // restoring a file in a non-root folder that doesn't inherit + // permissions. + Name: fmt.Sprintf("file-count-%d-at-%s", i, currentTime), + Data: fileBData, + Perms: connector.PermData{ + User: secondaryUserName, + EntityID: secondaryUserID, + Roles: writePerm, + }, + }, + }, + Folders: []connector.ItemData{ + { + Name: folderAName, + Perms: connector.PermData{ + User: secondaryUserName, + EntityID: secondaryUserID, + Roles: readPerm, + }, + }, + }, + }, + } + + cols = append(cols, col...) + } + + input, err := connector.DataForInfo(service, cols, version.Backup) + if err != nil { + return nil, err + } + + // collections := getCollections( + // service, + // tenantID, + // []string{resourceOwner}, + // input, + // version.Backup) + + opts := control.Options{ + RestorePermissions: true, + ToggleFeatures: control.Toggles{}, + } + + config := connector.ConfigInfo{ + Acct: acct, + Opts: opts, + Resource: connector.Users, + Service: service, + Tenant: tenantID, + ResourceOwners: []string{resourceOwner}, + Dest: tester.DefaultTestRestoreDestination(""), + } + + _, _, collections, _, err := connector.GetCollectionsAndExpected( + config, + input, + version.Backup) + if err != nil { + return nil, err + } + + return gc.ConsumeRestoreCollections(ctx, version.Backup, acct, sel, dest, opts, collections, errs) +} diff --git a/src/cmd/factory/impl/exchange.go b/src/cmd/factory/impl/exchange.go index a28fe3389..7027e60db 100644 --- a/src/cmd/factory/impl/exchange.go +++ b/src/cmd/factory/impl/exchange.go @@ -5,6 +5,7 @@ import ( . "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" + "github.com/alcionai/corso/src/internal/connector" exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/fault" @@ -51,7 +52,7 @@ func handleExchangeEmailFactory(cmd *cobra.Command, args []string) error { return nil } - gc, acct, err := getGCAndVerifyUser(ctx, User) + gc, acct, _, err := getGCAndVerifyResourceOwner(ctx, connector.Users, User) if err != nil { return Only(ctx, err) } @@ -71,7 +72,7 @@ func handleExchangeEmailFactory(cmd *cobra.Command, args []string) error { subject, body, body, now, now, now, now) }, - control.Options{}, + control.Defaults(), errs) if err != nil { return Only(ctx, err) @@ -98,7 +99,7 @@ func handleExchangeCalendarEventFactory(cmd *cobra.Command, args []string) error return nil } - gc, acct, err := getGCAndVerifyUser(ctx, User) + gc, acct, _, err := getGCAndVerifyResourceOwner(ctx, connector.Users, User) if err != nil { return Only(ctx, err) } @@ -117,7 +118,7 @@ func handleExchangeCalendarEventFactory(cmd *cobra.Command, args []string) error User, subject, body, body, now, now, exchMock.NoRecurrence, exchMock.NoAttendees, false) }, - control.Options{}, + control.Defaults(), errs) if err != nil { return Only(ctx, err) @@ -144,7 +145,7 @@ func handleExchangeContactFactory(cmd *cobra.Command, args []string) error { return nil } - gc, acct, err := getGCAndVerifyUser(ctx, User) + gc, acct, _, err := getGCAndVerifyResourceOwner(ctx, connector.Users, User) if err != nil { return Only(ctx, err) } @@ -168,7 +169,7 @@ func handleExchangeContactFactory(cmd *cobra.Command, args []string) error { "123-456-7890", ) }, - control.Options{}, + control.Defaults(), errs) if err != nil { return Only(ctx, err) diff --git a/src/cmd/factory/impl/onedrive.go b/src/cmd/factory/impl/onedrive.go index c10fe7af2..62ebcc71a 100644 --- a/src/cmd/factory/impl/onedrive.go +++ b/src/cmd/factory/impl/onedrive.go @@ -1,28 +1,71 @@ package impl import ( + "strings" + "github.com/spf13/cobra" . "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" + "github.com/alcionai/corso/src/internal/connector" + "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/logger" + "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/selectors" ) -var filesCmd = &cobra.Command{ +var odFilesCmd = &cobra.Command{ Use: "files", Short: "Generate OneDrive files", RunE: handleOneDriveFileFactory, } func AddOneDriveCommands(cmd *cobra.Command) { - cmd.AddCommand(filesCmd) + cmd.AddCommand(odFilesCmd) } func handleOneDriveFileFactory(cmd *cobra.Command, args []string) error { - Err(cmd.Context(), ErrNotYetImplemented) + var ( + ctx = cmd.Context() + service = path.OneDriveService + category = path.FilesCategory + errs = fault.New(false) + ) if utils.HasNoFlagsAndShownHelp(cmd) { return nil } + gc, acct, inp, err := getGCAndVerifyResourceOwner(ctx, connector.Users, User) + if err != nil { + return Only(ctx, err) + } + + sel := selectors.NewOneDriveBackup([]string{User}).Selector + sel.SetDiscreteOwnerIDName(inp.ID(), inp.Name()) + + deets, err := generateAndRestoreDriveItems( + gc, + inp.ID(), + SecondaryUser, + strings.ToLower(SecondaryUser), + acct, + service, + category, + sel, + Tenant, + Destination, + Count, + errs) + if err != nil { + return Only(ctx, err) + } + + for _, e := range errs.Recovered() { + logger.CtxErr(ctx, err).Error(e.Error()) + } + + deets.PrintEntries(ctx) + return nil } diff --git a/src/cmd/factory/impl/sharepoint.go b/src/cmd/factory/impl/sharepoint.go new file mode 100644 index 000000000..7f50ee97b --- /dev/null +++ b/src/cmd/factory/impl/sharepoint.go @@ -0,0 +1,71 @@ +package impl + +import ( + "strings" + + "github.com/spf13/cobra" + + . "github.com/alcionai/corso/src/cli/print" + "github.com/alcionai/corso/src/cli/utils" + "github.com/alcionai/corso/src/internal/connector" + "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/logger" + "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/selectors" +) + +var spFilesCmd = &cobra.Command{ + Use: "files", + Short: "Generate SharePoint files", + RunE: handleSharePointLibraryFileFactory, +} + +func AddSharePointCommands(cmd *cobra.Command) { + cmd.AddCommand(spFilesCmd) +} + +func handleSharePointLibraryFileFactory(cmd *cobra.Command, args []string) error { + var ( + ctx = cmd.Context() + service = path.SharePointService + category = path.LibrariesCategory + errs = fault.New(false) + ) + + if utils.HasNoFlagsAndShownHelp(cmd) { + return nil + } + + gc, acct, inp, err := getGCAndVerifyResourceOwner(ctx, connector.Sites, Site) + if err != nil { + return Only(ctx, err) + } + + sel := selectors.NewSharePointBackup([]string{Site}).Selector + sel.SetDiscreteOwnerIDName(inp.ID(), inp.Name()) + + deets, err := generateAndRestoreDriveItems( + gc, + inp.ID(), + SecondaryUser, + strings.ToLower(SecondaryUser), + acct, + service, + category, + sel, + Tenant, + Destination, + Count, + errs) + if err != nil { + return Only(ctx, err) + } + + for _, e := range errs.Recovered() { + logger.CtxErr(ctx, err).Error(e.Error()) + } + + deets.PrintEntries(ctx) + + return nil +} diff --git a/src/cmd/getM365/exchange/get_item.go b/src/cmd/getM365/exchange/get_item.go index 8196beb34..1d644f97e 100644 --- a/src/cmd/getM365/exchange/get_item.go +++ b/src/cmd/getM365/exchange/get_item.go @@ -16,12 +16,12 @@ import ( "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/common" - "github.com/alcionai/corso/src/internal/connector/exchange/api" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/credentials" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) // Required inputs from user for command execution diff --git a/src/cmd/getM365/main.go b/src/cmd/getM365/main.go index 17aa71d78..c7acd3175 100644 --- a/src/cmd/getM365/main.go +++ b/src/cmd/getM365/main.go @@ -17,7 +17,11 @@ var rootCmd = &cobra.Command{ } func main() { - ctx, _ := logger.SeedLevel(context.Background(), logger.Development) + ls := logger.Settings{ + Level: logger.LLDebug, + Format: logger.LFText, + } + ctx, _ := logger.CtxOrSeed(context.Background(), ls) ctx = SetRootCmd(ctx, rootCmd) defer logger.Flush(ctx) diff --git a/src/cmd/getM365/onedrive/get_item.go b/src/cmd/getM365/onedrive/get_item.go index 8794fbb03..ab1378ab9 100644 --- a/src/cmd/getM365/onedrive/get_item.go +++ b/src/cmd/getM365/onedrive/get_item.go @@ -22,9 +22,9 @@ import ( "github.com/alcionai/corso/src/internal/common" "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/connector/graph" - "github.com/alcionai/corso/src/internal/connector/onedrive/api" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/credentials" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) const downloadURLKey = "@microsoft.graph.downloadUrl" @@ -77,7 +77,10 @@ func handleOneDriveCmd(cmd *cobra.Command, args []string) error { return Only(ctx, clues.Wrap(err, "creating graph adapter")) } - err = runDisplayM365JSON(ctx, graph.NewService(adpt), creds, user, m365ID) + svc := graph.NewService(adpt) + gr := graph.NewNoTimeoutHTTPWrapper() + + err = runDisplayM365JSON(ctx, svc, gr, creds, user, m365ID) if err != nil { cmd.SilenceUsage = true cmd.SilenceErrors = true @@ -105,10 +108,11 @@ func (i itemPrintable) MinimumPrintable() any { func runDisplayM365JSON( ctx context.Context, srv graph.Servicer, + gr graph.Requester, creds account.M365Config, user, itemID string, ) error { - drive, err := api.GetDriveByID(ctx, srv, user) + drive, err := api.GetUsersDrive(ctx, srv, user) if err != nil { return err } @@ -123,7 +127,7 @@ func runDisplayM365JSON( } if item != nil { - content, err := getDriveItemContent(item) + content, err := getDriveItemContent(ctx, gr, item) if err != nil { return err } @@ -180,23 +184,21 @@ func serializeObject(data serialization.Parsable) (string, error) { return string(content), err } -func getDriveItemContent(item models.DriveItemable) ([]byte, error) { +func getDriveItemContent( + ctx context.Context, + gr graph.Requester, + item models.DriveItemable, +) ([]byte, error) { url, ok := item.GetAdditionalData()[downloadURLKey].(*string) if !ok { - return nil, clues.New("get download url") + return nil, clues.New("retrieving download url") } - req, err := http.NewRequest(http.MethodGet, *url, nil) + resp, err := gr.Request(ctx, http.MethodGet, *url, nil, nil) if err != nil { - return nil, clues.New("create download request").With("error", err) - } - - hc := graph.HTTPClient(graph.NoTimeout()) - - resp, err := hc.Do(req) - if err != nil { - return nil, clues.New("download item").With("error", err) + return nil, clues.New("downloading item").With("error", err) } + defer resp.Body.Close() content, err := io.ReadAll(resp.Body) if err != nil { diff --git a/src/cmd/purge/purge.go b/src/cmd/purge/purge.go index b7bbff321..d9f1133c1 100644 --- a/src/cmd/purge/purge.go +++ b/src/cmd/purge/purge.go @@ -12,6 +12,7 @@ import ( . "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/internal/connector" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/onedrive" @@ -48,7 +49,11 @@ var ErrPurging = clues.New("not all items were successfully purged") // ------------------------------------------------------------------------------------------ func main() { - ctx, _ := logger.SeedLevel(context.Background(), logger.Development) + ls := logger.Settings{ + Level: logger.LLDebug, + Format: logger.LFText, + } + ctx, _ := logger.CtxOrSeed(context.Background(), ls) ctx = SetRootCmd(ctx, purgeCmd) defer logger.Flush(ctx) @@ -226,8 +231,8 @@ func purgeFolders( // compare the folder time to the deletion boundary time first displayName := *fld.GetDisplayName() - dnTime, err := common.ExtractTime(displayName) - if err != nil && !errors.Is(err, common.ErrNoTimeString) { + dnTime, err := dttm.ExtractTime(displayName) + if err != nil && !errors.Is(err, dttm.ErrNoTimeString) { err = clues.Wrap(err, "!! Error: parsing container: "+displayName) Info(ctx, err) @@ -266,11 +271,7 @@ func getGC(ctx context.Context) (account.Account, *connector.GraphConnector, err return account.Account{}, nil, Only(ctx, clues.Wrap(err, "finding m365 account details")) } - // build a graph connector - // TODO: log/print recoverable errors - errs := fault.New(false) - - gc, err := connector.NewGraphConnector(ctx, acct, connector.Users, errs) + gc, err := connector.NewGraphConnector(ctx, acct, connector.Users) if err != nil { return account.Account{}, nil, Only(ctx, clues.Wrap(err, "connecting to graph api")) } @@ -286,7 +287,7 @@ func getBoundaryTime(ctx context.Context) (time.Time, error) { ) if len(before) > 0 { - boundaryTime, err = common.ParseTime(before) + boundaryTime, err = dttm.ParseTime(before) if err != nil { return time.Time{}, Only(ctx, clues.Wrap(err, "parsing before flag to time")) } diff --git a/src/cmd/purge/scripts/onedrivePurge.ps1 b/src/cmd/purge/scripts/onedrivePurge.ps1 index 4b72ebe8f..ae1acf328 100644 --- a/src/cmd/purge/scripts/onedrivePurge.ps1 +++ b/src/cmd/purge/scripts/onedrivePurge.ps1 @@ -131,6 +131,12 @@ if (![string]::IsNullOrEmpty($User)) { # Works for dev domains where format is @.onmicrosoft.com $domain = $User.Split('@')[1].Split('.')[0] $userNameEscaped = $User.Replace('.', '_').Replace('@', '_') + + # hacky special case because of recreated CI user + if ($userNameEscaped -ilike "lynner*") { + $userNameEscaped += '1' + } + $siteUrl = "https://$domain-my.sharepoint.com/personal/$userNameEscaped/" if ($LibraryNameList.count -eq 0) { diff --git a/src/cmd/sanity_test/sanity_tests.go b/src/cmd/sanity_test/sanity_tests.go index a9a155f93..da603d651 100644 --- a/src/cmd/sanity_test/sanity_tests.go +++ b/src/cmd/sanity_test/sanity_tests.go @@ -5,7 +5,7 @@ import ( "errors" "fmt" "os" - "path" + stdpath "path" "strings" "time" @@ -15,12 +15,13 @@ import ( "github.com/microsoftgraph/msgraph-sdk-go/users" "golang.org/x/exp/slices" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/filters" "github.com/alcionai/corso/src/pkg/logger" + "github.com/alcionai/corso/src/pkg/path" ) // --------------------------------------------------------------------------- @@ -63,6 +64,7 @@ func main() { var ( client = msgraphsdk.NewGraphServiceClient(adapter) testUser = tester.GetM365UserID(ctx) + testSite = tester.GetM365SiteID(ctx) testService = os.Getenv("SANITY_RESTORE_SERVICE") folder = strings.TrimSpace(os.Getenv("SANITY_RESTORE_FOLDER")) startTime, _ = mustGetTimeFromName(ctx, folder) @@ -83,7 +85,9 @@ func main() { case "exchange": checkEmailRestoration(ctx, client, testUser, folder, dataFolder, baseBackupFolder, startTime) case "onedrive": - checkOnedriveRestoration(ctx, client, testUser, folder, startTime) + checkOneDriveRestoration(ctx, client, testUser, folder, dataFolder, startTime) + case "sharepoint": + checkSharePointRestoration(ctx, client, testSite, testUser, folder, dataFolder, startTime) default: fatal(ctx, "no service specified", nil) } @@ -105,7 +109,7 @@ func checkEmailRestoration( restoreFolder models.MailFolderable itemCount = make(map[string]int32) restoreItemCount = make(map[string]int32) - builder = client.UsersById(testUser).MailFolders() + builder = client.Users().ByUserId(testUser).MailFolders() ) for { @@ -148,8 +152,10 @@ func checkEmailRestoration( "restore_folder_name", folderName) childFolder, err := client. - UsersById(testUser). - MailFoldersById(folderID). + Users(). + ByUserId(testUser). + MailFolders(). + ByMailFolderId(folderID). ChildFolders(). Get(ctx, nil) if err != nil { @@ -209,8 +215,10 @@ func getAllMailSubFolders( ctx = clues.Add(ctx, "parent_folder_id", folderID) childFolder, err := client. - UsersById(testUser). - MailFoldersById(folderID). + Users(). + ByUserId(testUser). + MailFolders(). + ByMailFolderId(folderID). ChildFolders(). Get(ctx, options) if err != nil { @@ -222,7 +230,7 @@ func getAllMailSubFolders( childDisplayName = ptr.Val(child.GetDisplayName()) childFolderCount = ptr.Val(child.GetChildFolderCount()) //nolint:forbidigo - fullFolderName = path.Join(parentFolder, childDisplayName) + fullFolderName = stdpath.Join(parentFolder, childDisplayName) ) if filters.PathContains([]string{dataFolder}).Compare(fullFolderName) { @@ -259,8 +267,10 @@ func checkAllSubFolder( ) childFolder, err := client. - UsersById(testUser). - MailFoldersById(folderID). + Users(). + ByUserId(testUser). + MailFolders(). + ByMailFolderId(folderID). ChildFolders(). Get(ctx, options) if err != nil { @@ -271,7 +281,7 @@ func checkAllSubFolder( var ( childDisplayName = ptr.Val(child.GetDisplayName()) //nolint:forbidigo - fullFolderName = path.Join(parentFolder, childDisplayName) + fullFolderName = stdpath.Join(parentFolder, childDisplayName) ) if filters.PathContains([]string{dataFolder}).Compare(fullFolderName) { @@ -292,41 +302,97 @@ func checkAllSubFolder( // oneDrive // --------------------------------------------------------------------------- -func checkOnedriveRestoration( +func checkOneDriveRestoration( ctx context.Context, client *msgraphsdk.GraphServiceClient, - testUser, - folderName string, + userID, folderName, dataFolder string, startTime time.Time, ) { - var ( - // map itemID -> item size - fileSizes = make(map[string]int64) - // map itemID -> permission id -> []permission roles - folderPermission = make(map[string][]permissionInfo) - restoreFile = make(map[string]int64) - restoreFolderPermission = make(map[string][]permissionInfo) - ) - drive, err := client. - UsersById(testUser). + Users(). + ByUserId(userID). Drive(). Get(ctx, nil) if err != nil { fatal(ctx, "getting the drive:", err) } + checkDriveRestoration( + ctx, + client, + path.OneDriveService, + folderName, + ptr.Val(drive.GetId()), + ptr.Val(drive.GetName()), + dataFolder, + startTime, + false) +} + +// --------------------------------------------------------------------------- +// sharePoint +// --------------------------------------------------------------------------- + +func checkSharePointRestoration( + ctx context.Context, + client *msgraphsdk.GraphServiceClient, + siteID, userID, folderName, dataFolder string, + startTime time.Time, +) { + drive, err := client. + Sites(). + BySiteId(siteID). + Drive(). + Get(ctx, nil) + if err != nil { + fatal(ctx, "getting the drive:", err) + } + + checkDriveRestoration( + ctx, + client, + path.SharePointService, + folderName, + ptr.Val(drive.GetId()), + ptr.Val(drive.GetName()), + dataFolder, + startTime, + true) +} + +// --------------------------------------------------------------------------- +// shared drive tests +// --------------------------------------------------------------------------- + +func checkDriveRestoration( + ctx context.Context, + client *msgraphsdk.GraphServiceClient, + service path.ServiceType, + folderName, + driveID, + driveName, + dataFolder string, + startTime time.Time, + skipPermissionTest bool, +) { var ( - driveID = ptr.Val(drive.GetId()) - driveName = ptr.Val(drive.GetName()) - restoreFolderID string + // map itemID -> item size + fileSizes = make(map[string]int64) + // map itemID -> permission id -> []permission roles + folderPermissions = make(map[string][]permissionInfo) + restoreFile = make(map[string]int64) + restoredFolderPermissions = make(map[string][]permissionInfo) ) + var restoreFolderID string + ctx = clues.Add(ctx, "drive_id", driveID, "drive_name", driveName) response, err := client. - DrivesById(driveID). - Root(). + Drives(). + ByDriveId(driveID). + Items(). + ByDriveItemId("root"). Children(). Get(ctx, nil) if err != nil { @@ -337,7 +403,6 @@ func checkOnedriveRestoration( var ( itemID = ptr.Val(driveItem.GetId()) itemName = ptr.Val(driveItem.GetName()) - ictx = clues.Add(ctx, "item_id", itemID, "item_name", itemName) ) if itemName == folderName { @@ -345,8 +410,8 @@ func checkOnedriveRestoration( continue } - folderTime, hasTime := mustGetTimeFromName(ictx, itemName) - if !isWithinTimeBound(ctx, startTime, folderTime, hasTime) { + if itemName != dataFolder { + logAndPrint(ctx, "test data for folder: %s", dataFolder) continue } @@ -362,59 +427,26 @@ func checkOnedriveRestoration( // currently we don't restore blank folders. // skip permission check for empty folders if ptr.Val(driveItem.GetFolder().GetChildCount()) == 0 { - logger.Ctx(ctx).Info("skipped empty folder: ", itemName) - fmt.Println("skipped empty folder: ", itemName) - + logAndPrint(ctx, "skipped empty folder: %s", itemName) continue } - folderPermission[itemName] = permissionIn(ctx, client, driveID, itemID) - getOneDriveChildFolder(ctx, client, driveID, itemID, itemName, fileSizes, folderPermission, startTime) + folderPermissions[itemName] = permissionIn(ctx, client, driveID, itemID) + getOneDriveChildFolder(ctx, client, driveID, itemID, itemName, fileSizes, folderPermissions, startTime) } - getRestoredDrive(ctx, client, *drive.GetId(), restoreFolderID, restoreFile, restoreFolderPermission, startTime) + getRestoredDrive(ctx, client, driveID, restoreFolderID, restoreFile, restoredFolderPermissions, startTime) - for folderName, permissions := range folderPermission { - logger.Ctx(ctx).Info("checking for folder: ", folderName) - fmt.Printf("checking for folder: %s\n", folderName) - - restoreFolderPerm := restoreFolderPermission[folderName] - - if len(permissions) < 1 { - logger.Ctx(ctx).Info("no permissions found in:", folderName) - fmt.Println("no permissions found in:", folderName) - - continue - } - - assert( - ctx, - func() bool { return len(permissions) == len(restoreFolderPerm) }, - fmt.Sprintf("wrong number of restored permissions: %s", folderName), - permissions, - restoreFolderPerm) - - for i, perm := range permissions { - // permissions should be sorted, so a by-index comparison works - restored := restoreFolderPerm[i] - - assert( - ctx, - func() bool { return strings.EqualFold(perm.entityID, restored.entityID) }, - fmt.Sprintf("non-matching entity id: %s", folderName), - perm.entityID, - restored.entityID) - - assert( - ctx, - func() bool { return slices.Equal(perm.roles, restored.roles) }, - fmt.Sprintf("different roles restored: %s", folderName), - perm.roles, - restored.roles) - } - } + checkRestoredDriveItemPermissions( + ctx, + service, + skipPermissionTest, + folderPermissions, + restoredFolderPermissions) for fileName, expected := range fileSizes { + logAndPrint(ctx, "checking for file: %s", fileName) + got := restoreFile[fileName] assert( @@ -428,6 +460,69 @@ func checkOnedriveRestoration( fmt.Println("Success") } +func checkRestoredDriveItemPermissions( + ctx context.Context, + service path.ServiceType, + skip bool, + folderPermissions map[string][]permissionInfo, + restoredFolderPermissions map[string][]permissionInfo, +) { + if skip { + return + } + + /** + TODO: replace this check with testElementsMatch + from internal/connecter/graph_connector_helper_test.go + **/ + + for folderName, permissions := range folderPermissions { + logAndPrint(ctx, "checking for folder: %s", folderName) + + restoreFolderPerm := restoredFolderPermissions[folderName] + + if len(permissions) < 1 { + logAndPrint(ctx, "no permissions found in: %s", folderName) + continue + } + + permCheck := func() bool { return len(permissions) == len(restoreFolderPerm) } + + if service == path.SharePointService { + permCheck = func() bool { return len(permissions) <= len(restoreFolderPerm) } + } + + assert( + ctx, + permCheck, + fmt.Sprintf("wrong number of restored permissions: %s", folderName), + permissions, + restoreFolderPerm) + + for _, perm := range permissions { + eqID := func(pi permissionInfo) bool { return strings.EqualFold(pi.entityID, perm.entityID) } + i := slices.IndexFunc(restoreFolderPerm, eqID) + + assert( + ctx, + func() bool { return i >= 0 }, + fmt.Sprintf("permission was restored in: %s", folderName), + perm.entityID, + restoreFolderPerm) + + // permissions should be sorted, so a by-index comparison works + restored := restoreFolderPerm[i] + + assert( + ctx, + func() bool { return slices.Equal(perm.roles, restored.roles) }, + fmt.Sprintf("different roles restored: %s", folderName), + perm.roles, + restored.roles) + } + } +} + func getOneDriveChildFolder( ctx context.Context, client *msgraphsdk.GraphServiceClient, @@ -436,7 +531,7 @@ func getOneDriveChildFolder( folderPermission map[string][]permissionInfo, startTime time.Time, ) { - response, err := client.DrivesById(driveID).ItemsById(itemID).Children().Get(ctx, nil) + response, err := client.Drives().ByDriveId(driveID).Items().ByDriveItemId(itemID).Children().Get(ctx, nil) if err != nil { fatal(ctx, "getting child folder", err) } @@ -465,8 +560,7 @@ func getOneDriveChildFolder( // currently we don't restore blank folders. // skip permission check for empty folders if ptr.Val(driveItem.GetFolder().GetChildCount()) == 0 { - logger.Ctx(ctx).Info("skipped empty folder: ", fullName) - fmt.Println("skipped empty folder: ", fullName) + logAndPrint(ctx, "skipped empty folder: %s", fullName) continue } @@ -485,8 +579,10 @@ func getRestoredDrive( startTime time.Time, ) { restored, err := client. - DrivesById(driveID). - ItemsById(restoreFolderID). + Drives(). + ByDriveId(driveID). + Items(). + ByDriveItemId(restoreFolderID). Children(). Get(ctx, nil) if err != nil { @@ -526,8 +622,10 @@ func permissionIn( pi := []permissionInfo{} pcr, err := client. - DrivesById(driveID). - ItemsById(itemID). + Drives(). + ByDriveId(driveID). + Items(). + ByDriveItemId(itemID). Permissions(). Get(ctx, nil) if err != nil { @@ -545,6 +643,7 @@ func permissionIn( entityID string ) + // TODO: replace with filterUserPermissions in onedrive item.go if gv2.GetUser() != nil { entityID = ptr.Val(gv2.GetUser().GetId()) } else if gv2.GetGroup() != nil { @@ -577,12 +676,12 @@ func fatal(ctx context.Context, msg string, err error) { } func mustGetTimeFromName(ctx context.Context, name string) (time.Time, bool) { - t, err := common.ExtractTime(name) - if err != nil && !errors.Is(err, common.ErrNoTimeString) { + t, err := dttm.ExtractTime(name) + if err != nil && !errors.Is(err, dttm.ErrNoTimeString) { fatal(ctx, "extracting time from name: "+name, err) } - return t, !errors.Is(err, common.ErrNoTimeString) + return t, !errors.Is(err, dttm.ErrNoTimeString) } func isWithinTimeBound(ctx context.Context, bound, check time.Time, hasTime bool) bool { @@ -633,3 +732,8 @@ func assert( os.Exit(1) } + +func logAndPrint(ctx context.Context, tmpl string, vs ...any) { + logger.Ctx(ctx).Infof(tmpl, vs...) + fmt.Printf(tmpl+"\n", vs...) +} diff --git a/src/go.mod b/src/go.mod index f33192cf9..94777caa6 100644 --- a/src/go.mod +++ b/src/go.mod @@ -2,25 +2,25 @@ module github.com/alcionai/corso/src go 1.19 -replace github.com/kopia/kopia => github.com/alcionai/kopia v0.12.2-0.20230417220734-efdcd8c54f7f +replace github.com/kopia/kopia => github.com/alcionai/kopia v0.12.2-0.20230502235504-2509b1d72a79 require ( github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 github.com/alcionai/clues v0.0.0-20230406223931-f48777f4773c github.com/armon/go-metrics v0.4.1 - github.com/aws/aws-sdk-go v1.44.245 + github.com/aws/aws-sdk-go v1.44.264 github.com/aws/aws-xray-sdk-go v1.8.1 github.com/cenkalti/backoff/v4 v4.2.1 github.com/google/uuid v1.3.0 github.com/h2non/gock v1.2.0 github.com/kopia/kopia v0.12.2-0.20230327171220-747baeebdab1 - github.com/microsoft/kiota-abstractions-go v0.18.0 - github.com/microsoft/kiota-authentication-azure-go v0.6.0 - github.com/microsoft/kiota-http-go v0.16.1 - github.com/microsoft/kiota-serialization-form-go v0.8.2 - github.com/microsoft/kiota-serialization-json-go v0.8.2 - github.com/microsoftgraph/msgraph-sdk-go v0.53.0 - github.com/microsoftgraph/msgraph-sdk-go-core v0.33.0 + github.com/microsoft/kiota-abstractions-go v1.0.0 + github.com/microsoft/kiota-authentication-azure-go v1.0.0 + github.com/microsoft/kiota-http-go v1.0.0 + github.com/microsoft/kiota-serialization-form-go v1.0.0 + github.com/microsoft/kiota-serialization-json-go v1.0.0 + github.com/microsoftgraph/msgraph-sdk-go v1.1.0 + github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0 github.com/pkg/errors v0.9.1 github.com/rudderlabs/analytics-go v3.3.3+incompatible github.com/spatialcurrent/go-lazy v0.0.0-20211115014721-47315cc003d1 @@ -33,9 +33,8 @@ require ( github.com/vbauerster/mpb/v8 v8.1.6 go.uber.org/zap v1.24.0 golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb - golang.org/x/time v0.1.0 - golang.org/x/tools v0.8.0 - gopkg.in/resty.v1 v1.12.0 + golang.org/x/time v0.3.0 + golang.org/x/tools v0.9.1 ) require ( @@ -44,6 +43,7 @@ require ( github.com/andybalholm/brotli v1.0.4 // indirect github.com/dnaeon/go-vcr v1.2.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/gofrs/flock v0.8.1 // indirect github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/hcl v1.0.0 // indirect @@ -59,8 +59,8 @@ require ( ) require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect @@ -70,7 +70,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/dustin/go-humanize v1.0.1 github.com/edsrzf/mmap-go v1.1.0 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/protobuf v1.5.3 // indirect @@ -78,7 +78,7 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.16.4 // indirect + github.com/klauspost/compress v1.16.5 // indirect github.com/klauspost/cpuid/v2 v2.2.4 // indirect github.com/klauspost/pgzip v1.2.5 // indirect github.com/klauspost/reedsolomon v1.11.7 // indirect @@ -88,7 +88,7 @@ require ( github.com/mattn/go-runewidth v0.0.14 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect - github.com/microsoft/kiota-serialization-text-go v0.7.0 + github.com/microsoft/kiota-serialization-text-go v1.0.0 github.com/minio/md5-simd v1.1.2 // indirect github.com/minio/minio-go/v7 v7.0.52 // indirect github.com/minio/sha256-simd v1.0.0 // indirect @@ -111,17 +111,17 @@ require ( github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c // indirect github.com/yosida95/uritemplate/v3 v3.0.2 // indirect github.com/zeebo/blake3 v0.2.3 // indirect - go.opentelemetry.io/otel v1.14.0 // indirect - go.opentelemetry.io/otel/trace v1.14.0 // indirect + go.opentelemetry.io/otel v1.15.1 // indirect + go.opentelemetry.io/otel/trace v1.15.1 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.8.0 // indirect golang.org/x/mod v0.10.0 // indirect - golang.org/x/net v0.9.0 // indirect - golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.7.0 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/sync v0.2.0 // indirect + golang.org/x/sys v0.8.0 // indirect golang.org/x/text v0.9.0 // indirect - google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect google.golang.org/grpc v1.54.0 // indirect google.golang.org/protobuf v1.30.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/src/go.sum b/src/go.sum index e77a40b65..407ab2941 100644 --- a/src/go.sum +++ b/src/go.sum @@ -36,12 +36,12 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.0 h1:VuHAcMq8pU1IWNT/m5yRaGqbK0BiQKHT8X4DTp9CHdI= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.0/go.mod h1:tZoQYdDZNOiIjdSn0dVWVfl0NEPGOJqVLzSrcFk4Is0= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 h1:8kDqDngH+DmVBiCtIjCFTGa7MBnsIOkF9IccInFEbjk= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 h1:t/W5MYAuQy81cvM8VUNfRLzhtKpXhVUAN7Cd7KVbTyc= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0/go.mod h1:NBanQUfSWiWn3QEpWDTCU0IjBECKOYvl2R8xdRtMtiM= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 h1:+5VZ72z0Qan5Bog5C+ZkgSqUbeVUd9wgtHOrIKuc5b8= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 h1:VgSJlZH5u0k2qxSpqyghcFQKmvYckj46uymKK5XzkBM= github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0/go.mod h1:BDJ5qMFKx9DugEg3+uQSDCdbYPr5s9vBTrL9P8TpqOU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -55,8 +55,8 @@ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpH github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/alcionai/clues v0.0.0-20230406223931-f48777f4773c h1:Njdw/Nnq2DN3f8QMaHuZZHdVHTUSxFqPMMxDIInDWB4= github.com/alcionai/clues v0.0.0-20230406223931-f48777f4773c/go.mod h1:DeaMbAwDvYM6ZfPMR/GUl3hceqI5C8jIQ1lstjB2IW8= -github.com/alcionai/kopia v0.12.2-0.20230417220734-efdcd8c54f7f h1:cD7mcWVTEu83qX6Ml3aqgo8DDv+fBZt/7mQQps2TokM= -github.com/alcionai/kopia v0.12.2-0.20230417220734-efdcd8c54f7f/go.mod h1:eTgZSDaU2pDzVGC7QRubbKOeohvHzzbRXvhZMH+AGHA= +github.com/alcionai/kopia v0.12.2-0.20230502235504-2509b1d72a79 h1:Wrl99Y7jftZMnNDiOIcRJrjstZO3IEj3+Q/sip27vmI= +github.com/alcionai/kopia v0.12.2-0.20230502235504-2509b1d72a79/go.mod h1:Iic7CcKhsq+A7MLR9hh6VJfgpcJhLx3Kn+BgjY+azvI= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -66,8 +66,8 @@ github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= -github.com/aws/aws-sdk-go v1.44.245 h1:KtY2s4q31/kn33AdV63R5t77mdxsI7rq3YT7Mgo805M= -github.com/aws/aws-sdk-go v1.44.245/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.264 h1:5klL62ebn6uv3oJ0ixF7K12hKItj8lV3QqWeQPlkFSs= +github.com/aws/aws-sdk-go v1.44.264/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo= github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= @@ -124,13 +124,14 @@ github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= @@ -202,7 +203,7 @@ github.com/h2non/gock v1.2.0 h1:K6ol8rfrRkUOefooBC8elXoaNGYkpp7y2qcxGG6BzUE= github.com/h2non/gock v1.2.0/go.mod h1:tNhoxHYW2W42cYkYb1WqzdbYIieALC99kpYr7rH/BQk= github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw= github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= -github.com/hanwen/go-fuse/v2 v2.2.0 h1:jo5QZYmBLNcl9ovypWaQ5yXMSSV+Ch68xoC3rtZvvBM= +github.com/hanwen/go-fuse/v2 v2.3.0 h1:t5ivNIH2PK+zw4OBul/iJjsoG9K6kXo4nMDoBpciC8A= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= @@ -233,8 +234,8 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.16.4 h1:91KN02FnsOYhuunwU4ssRe8lc2JosWmizWa91B5v1PU= -github.com/klauspost/compress v1.16.4/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= +github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= @@ -272,22 +273,22 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/microsoft/kiota-abstractions-go v0.18.0 h1:H1kQE5hAq/7Q8gENPJ1Y7DuvG9QqKCpglN8D7TJi9qY= -github.com/microsoft/kiota-abstractions-go v0.18.0/go.mod h1:0lbPErVO6Rj3HHpntNYW/OFmHhJJ1ewPdsi1xPxYIMc= -github.com/microsoft/kiota-authentication-azure-go v0.6.0 h1:Il9bLO34J6D8DY89xYAXoGh9muvlphayqG4eihyT6B8= -github.com/microsoft/kiota-authentication-azure-go v0.6.0/go.mod h1:EJCHiLWLXW1/mSgX7lYReAhVO37MzRT5Xi2mcPTwCRQ= -github.com/microsoft/kiota-http-go v0.16.1 h1:5SZbSwHs14Xve5VMQHHz00lwL/kEg3H9rgESAUrXnvw= -github.com/microsoft/kiota-http-go v0.16.1/go.mod h1:pKSaeSaBwh3Zadbnzw3kALEZbCZA1gq7A5PuxwVd/aU= -github.com/microsoft/kiota-serialization-form-go v0.8.2 h1:qrkJGBObZo0NjJwwbT3lUySjaLKqjz+r4TQGQTX/C/c= -github.com/microsoft/kiota-serialization-form-go v0.8.2/go.mod h1:FQqYzIrGX6KUoDOlg+DhDWoGaZoB8AicBYGOsBq0Dw4= -github.com/microsoft/kiota-serialization-json-go v0.8.2 h1:vLKZAOiMsaUxq36RDo3S/FfQbW2VQCdAIu4DS7+Qhrk= -github.com/microsoft/kiota-serialization-json-go v0.8.2/go.mod h1:gGcLNSdIdOZ4la2qztA0vaCq/LtlA53gpP+ur8n/+oA= -github.com/microsoft/kiota-serialization-text-go v0.7.0 h1:uayeq8fpDcZgL0zDyLkYZsH6zNnEXKgp+bRWfR5LcxA= -github.com/microsoft/kiota-serialization-text-go v0.7.0/go.mod h1:2su1PTllHCMNkHugmvpYad+AKBXUUGoiNP3xOAJUL7w= -github.com/microsoftgraph/msgraph-sdk-go v0.53.0 h1:HpQd1Nvr8yQNeqhDuiVSbqn1fkHsFbRFDmnuhhXJXOQ= -github.com/microsoftgraph/msgraph-sdk-go v0.53.0/go.mod h1:BZLyon4n4T4EuLIAlX+kJ5JgneFTXVQDah1AJuq3FRY= -github.com/microsoftgraph/msgraph-sdk-go-core v0.33.0 h1:cDL3ov/IZ2ZarUJdGGPsdR+46ALdd3CRAiDBIylLCoA= -github.com/microsoftgraph/msgraph-sdk-go-core v0.33.0/go.mod h1:d0mU3PQAWnN/C4CwPJEZz2QhesrnR5UDnqRu2ODWPkI= +github.com/microsoft/kiota-abstractions-go v1.0.0 h1:teQS3yOmcTyps+O48AD17LI8TR1B3wCEwGFcwC6K75c= +github.com/microsoft/kiota-abstractions-go v1.0.0/go.mod h1:2yaRQnx2KU7UaenYSApiTT4pf7fFkPV0B71Rm2uYynQ= +github.com/microsoft/kiota-authentication-azure-go v1.0.0 h1:29FNZZ/4nnCOwFcGWlB/sxPvWz487HA2bXH8jR5k2Rk= +github.com/microsoft/kiota-authentication-azure-go v1.0.0/go.mod h1:rnx3PRlkGdXDcA/0lZQTbBwyYGmc+3POt7HpE/e4jGw= +github.com/microsoft/kiota-http-go v1.0.0 h1:F1hd6gMlLeEgH2CkRB7z13ow7LxMKMWEmms/t0VfS+k= +github.com/microsoft/kiota-http-go v1.0.0/go.mod h1:eujxJliqodotsYepIc6ihhK+vXMMt5Q8YiSNL7+7M7U= +github.com/microsoft/kiota-serialization-form-go v1.0.0 h1:UNdrkMnLFqUCccQZerKjblsyVgifS11b3WCx+eFEsAI= +github.com/microsoft/kiota-serialization-form-go v1.0.0/go.mod h1:h4mQOO6KVTNciMF6azi1J9QB19ujSw3ULKcSNyXXOMA= +github.com/microsoft/kiota-serialization-json-go v1.0.0 h1:snT+SwS/R4CMjkmj7mjCHrmib2nKWqGvUWaedgliMbI= +github.com/microsoft/kiota-serialization-json-go v1.0.0/go.mod h1:psfgIfqWm/9P1JAdl2cxHHIg9SdEtYHOetfDLIQ5/dw= +github.com/microsoft/kiota-serialization-text-go v1.0.0 h1:XOaRhAXy+g8ZVpcq7x7a0jlETWnWrEum0RhmbYrTFnA= +github.com/microsoft/kiota-serialization-text-go v1.0.0/go.mod h1:sM1/C6ecnQ7IquQOGUrUldaO5wj+9+v7G2W3sQ3fy6M= +github.com/microsoftgraph/msgraph-sdk-go v1.1.0 h1:NtFsFVIt8lpXcTlRbLG1WuCOTzltzS5j+U8Fecqdnr4= +github.com/microsoftgraph/msgraph-sdk-go v1.1.0/go.mod h1:NIk9kSn7lQ5Hnhhn3FM4NrJWz54JfDHD0JvhJZky27g= +github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0 h1:7NWTfyXvOjoizW7PmxNp3+8wCKPgpODs/D1cUZ3fkAY= +github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0/go.mod h1:tQb4q3YMIj2dWhhXhQSJ4ELpol931ANKzHSYK5kX1qE= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/minio-go/v7 v7.0.52 h1:8XhG36F6oKQUDDSuz6dY3rioMzovKjW40W6ANuN0Dps= @@ -430,10 +431,10 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= -go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= -go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M= -go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= +go.opentelemetry.io/otel v1.15.1 h1:3Iwq3lfRByPaws0f6bU3naAqOR1n5IeDWd9390kWHa8= +go.opentelemetry.io/otel v1.15.1/go.mod h1:mHHGEHVDLal6YrKMmk9LqC4a3sF5g+fHfrttQIB1NTc= +go.opentelemetry.io/otel/trace v1.15.1 h1:uXLo6iHJEzDfrNC0L0mNjItIp06SyaBQxu5t3xMlngY= +go.opentelemetry.io/otel/trace v1.15.1/go.mod h1:IWdQG/5N1x7f6YUlmdLeJvH9yxtuJAfc4VW5Agv9r/8= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= @@ -494,7 +495,6 @@ golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -529,8 +529,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -551,8 +551,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -603,8 +603,8 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -622,8 +622,8 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA= -golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -672,8 +672,8 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= +golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -740,8 +740,8 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd h1:sLpv7bNL1AsX3fdnWh9WVh7ejIzXdOc1RRHGeAmeStU= -google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -782,8 +782,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0 h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/src/internal/common/crash/crash.go b/src/internal/common/crash/crash.go index a35b93f87..bc1c53159 100644 --- a/src/internal/common/crash/crash.go +++ b/src/internal/common/crash/crash.go @@ -5,6 +5,7 @@ import ( "fmt" "runtime" "runtime/debug" + "strings" "github.com/alcionai/clues" @@ -22,31 +23,46 @@ import ( // err = crErr // err needs to be a named return variable // } // }() -func Recovery(ctx context.Context, r any) error { +func Recovery(ctx context.Context, r any, namespace string) error { var ( err error inFile string + j int ) - if r != nil { - if re, ok := r.(error); ok { - err = re - } else if re, ok := r.(string); ok { - err = clues.New(re) - } else { - err = clues.New(fmt.Sprintf("%v", r)) - } - - _, file, _, ok := runtime.Caller(3) - if ok { - inFile = " in file: " + file - } - - err = clues.Wrap(err, "panic recovery"+inFile). - WithClues(ctx). - With("stacktrace", string(debug.Stack())) - logger.CtxErr(ctx, err).Error("backup panic") + if r == nil { + return nil } + if re, ok := r.(error); ok { + err = re + } else if re, ok := r.(string); ok { + err = clues.New(re) + } else { + err = clues.New(fmt.Sprintf("%v", r)) + } + + for i := 1; i < 10; i++ { + _, file, line, ok := runtime.Caller(i) + if j > 0 { + if !strings.Contains(file, "panic.go") { + inFile = fmt.Sprintf(": file %s - line %d", file, line) + break + } + + j = 0 + } + + // skip the location where Recovery() gets called. + if j == 0 && ok && !strings.Contains(file, "panic.go") && !strings.Contains(file, "crash.go") { + j++ + } + } + + err = clues.Wrap(err, "panic recovery"+inFile). + WithClues(ctx). + With("stacktrace", string(debug.Stack())) + logger.CtxErr(ctx, err).Error(namespace + " panic") + return err } diff --git a/src/internal/common/crash/crash_test.go b/src/internal/common/crash/crash_test.go index 09a6559b9..375e6932e 100644 --- a/src/internal/common/crash/crash_test.go +++ b/src/internal/common/crash/crash_test.go @@ -52,7 +52,7 @@ func (suite *CrashTestDummySuite) TestRecovery() { ctx, flush := tester.NewContext() defer func() { - err := crash.Recovery(ctx, recover()) + err := crash.Recovery(ctx, recover(), "test") test.expect(t, err, clues.ToCore(err)) flush() }() diff --git a/src/internal/common/time.go b/src/internal/common/dttm/dttm.go similarity index 55% rename from src/internal/common/time.go rename to src/internal/common/dttm/dttm.go index 23db15b77..ae9e084c5 100644 --- a/src/internal/common/time.go +++ b/src/internal/common/dttm/dttm.go @@ -1,4 +1,4 @@ -package common +package dttm import ( "regexp" @@ -10,8 +10,8 @@ import ( type TimeFormat string const ( - // StandardTime is the canonical format used for all data storage in corso - StandardTime TimeFormat = time.RFC3339Nano + // Standard is the canonical format used for all data storage in corso + Standard TimeFormat = time.RFC3339Nano // DateOnly is accepted by the CLI as a valid input for timestamp-based // filters. Time and timezone are assumed to be 00:00:00 and UTC. @@ -21,23 +21,23 @@ const ( // non-json cli outputs. TabularOutput TimeFormat = "2006-01-02T15:04:05Z" - // LegacyTime is used in /exchange/service_restore to comply with certain + // Legacy is used in /exchange/service_restore to comply with certain // graphAPI time format requirements. - LegacyTime TimeFormat = time.RFC3339 + Legacy TimeFormat = time.RFC3339 - // SimpleDateTime is the default value appended to the root restoration folder name. - SimpleDateTime TimeFormat = "02-Jan-2006_15:04:05" - // SimpleDateTimeOneDrive modifies SimpleDateTimeFormat to comply with onedrive folder + // HumanReadable is the default value appended to the root restoration folder name. + HumanReadable TimeFormat = "02-Jan-2006_15:04:05" + // HumanReadableDriveItem modifies SimpleDateTimeFormat to comply with onedrive folder // restrictions: primarily swapping `-` instead of `:` which is a reserved character. - SimpleDateTimeOneDrive TimeFormat = "02-Jan-2006_15-04-05" + HumanReadableDriveItem TimeFormat = "02-Jan-2006_15-04-05" // m365 will remove the :00 second suffix on folder names, resulting in the following formats. - ClippedSimple TimeFormat = "02-Jan-2006_15:04" - ClippedSimpleOneDrive TimeFormat = "02-Jan-2006_15-04" + ClippedHuman TimeFormat = "02-Jan-2006_15:04" + ClippedHumanDriveItem TimeFormat = "02-Jan-2006_15-04" - // SimpleTimeTesting is used for testing restore destination folders. + // SafeForTesting is used for testing restore destination folders. // Microsecond granularity prevents collisions in parallel package or workflow runs. - SimpleTimeTesting TimeFormat = SimpleDateTimeOneDrive + ".000000" + SafeForTesting TimeFormat = HumanReadableDriveItem + ".000000" // M365dateTimeTimeZoneTimeFormat is the format used by M365 for datetimetimezone resource // https://learn.microsoft.com/en-us/graph/api/resources/datetimetimezone?view=graph-rest-1.0 @@ -48,42 +48,42 @@ const ( // identify the folders produced in external data during automated testing. For safety, each // time format described above should have a matching regexp. var ( - clippedSimpleRE = regexp.MustCompile(`.*(\d{2}-[a-zA-Z]{3}-\d{4}_\d{2}:\d{2}).*`) - clippedSimpleOneDriveRE = regexp.MustCompile(`.*(\d{2}-[a-zA-Z]{3}-\d{4}_\d{2}-\d{2}).*`) - dateOnlyRE = regexp.MustCompile(`.*(\d{4}-\d{2}-\d{2}).*`) - legacyTimeRE = regexp.MustCompile( + clippedHumanRE = regexp.MustCompile(`.*(\d{2}-[a-zA-Z]{3}-\d{4}_\d{2}:\d{2}).*`) + clippedHumanOneDriveRE = regexp.MustCompile(`.*(\d{2}-[a-zA-Z]{3}-\d{4}_\d{2}-\d{2}).*`) + dateOnlyRE = regexp.MustCompile(`.*(\d{4}-\d{2}-\d{2}).*`) + legacyRE = regexp.MustCompile( `.*(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}?([Zz]|[a-zA-Z]{2}|([\+|\-]([01]\d|2[0-3])))).*`) - simpleTimeTestingRE = regexp.MustCompile(`.*(\d{2}-[a-zA-Z]{3}-\d{4}_\d{2}-\d{2}-\d{2}.\d{6}).*`) - simpleDateTimeRE = regexp.MustCompile(`.*(\d{2}-[a-zA-Z]{3}-\d{4}_\d{2}:\d{2}:\d{2}).*`) - simpleDateTimeOneDriveRE = regexp.MustCompile(`.*(\d{2}-[a-zA-Z]{3}-\d{4}_\d{2}-\d{2}-\d{2}).*`) - standardTimeRE = regexp.MustCompile( + SafeForTestingRE = regexp.MustCompile(`.*(\d{2}-[a-zA-Z]{3}-\d{4}_\d{2}-\d{2}-\d{2}.\d{6}).*`) + HumanReadableRE = regexp.MustCompile(`.*(\d{2}-[a-zA-Z]{3}-\d{4}_\d{2}:\d{2}:\d{2}).*`) + HumanReadableOneDriveRE = regexp.MustCompile(`.*(\d{2}-[a-zA-Z]{3}-\d{4}_\d{2}-\d{2}-\d{2}).*`) + standardRE = regexp.MustCompile( `.*(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?([Zz]|[a-zA-Z]{2}|([\+|\-]([01]\d|2[0-3])))).*`) - tabularOutputTimeRE = regexp.MustCompile(`.*(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}([Zz]|[a-zA-Z]{2})).*`) + tabularOutputRE = regexp.MustCompile(`.*(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}([Zz]|[a-zA-Z]{2})).*`) ) var ( // shortened formats (clipped*, DateOnly) must follow behind longer formats, otherwise they'll // get eagerly chosen as the parsable format, slicing out some data. formats = []TimeFormat{ - StandardTime, - SimpleTimeTesting, - SimpleDateTime, - SimpleDateTimeOneDrive, - LegacyTime, + Standard, + SafeForTesting, + HumanReadable, + HumanReadableDriveItem, + Legacy, TabularOutput, - ClippedSimple, - ClippedSimpleOneDrive, + ClippedHuman, + ClippedHumanDriveItem, DateOnly, } regexes = []*regexp.Regexp{ - standardTimeRE, - simpleTimeTestingRE, - simpleDateTimeRE, - simpleDateTimeOneDriveRE, - legacyTimeRE, - tabularOutputTimeRE, - clippedSimpleRE, - clippedSimpleOneDriveRE, + standardRE, + SafeForTestingRE, + HumanReadableRE, + HumanReadableOneDriveRE, + legacyRE, + tabularOutputRE, + clippedHumanRE, + clippedHumanOneDriveRE, dateOnlyRE, } ) @@ -95,43 +95,43 @@ var ( // Now produces the current time as a string in the standard format. func Now() string { - return FormatNow(StandardTime) + return FormatNow(Standard) } // FormatNow produces the current time in UTC using the provided // time format. func FormatNow(fmt TimeFormat) string { - return FormatTimeWith(time.Now(), fmt) + return FormatTo(time.Now(), fmt) } -// FormatTimeWith produces the a datetime with the given format. -func FormatTimeWith(t time.Time, fmt TimeFormat) string { +// FormatTo produces the a datetime with the given format. +func FormatTo(t time.Time, fmt TimeFormat) string { return t.UTC().Format(string(fmt)) } -// FormatTime produces the standard format for corso time values. +// Format produces the standard format for corso time values. // Always formats into the UTC timezone. -func FormatTime(t time.Time) string { - return FormatTimeWith(t, StandardTime) +func Format(t time.Time) string { + return FormatTo(t, Standard) } -// FormatSimpleDateTime produces a simple datetime of the format +// FormatToHumanReadable produces a simple datetime of the format // "02-Jan-2006_15:04:05" -func FormatSimpleDateTime(t time.Time) string { - return FormatTimeWith(t, SimpleDateTime) +func FormatToHumanReadable(t time.Time) string { + return FormatTo(t, HumanReadable) } -// FormatTabularDisplayTime produces the standard format for displaying +// FormatToTabularDisplay produces the standard format for displaying // a timestamp as part of user-readable cli output. // "2016-01-02T15:04:05Z" -func FormatTabularDisplayTime(t time.Time) string { - return FormatTimeWith(t, TabularOutput) +func FormatToTabularDisplay(t time.Time) string { + return FormatTo(t, TabularOutput) } -// FormatLegacyTime produces standard format for string values +// FormatToLegacy produces standard format for string values // that are placed in SingleValueExtendedProperty tags -func FormatLegacyTime(t time.Time) string { - return FormatTimeWith(t, LegacyTime) +func FormatToLegacy(t time.Time) string { + return FormatTo(t, Legacy) } // ParseTime makes a best attempt to produce a time value from diff --git a/src/internal/common/time_test.go b/src/internal/common/dttm/dttm_test.go similarity index 60% rename from src/internal/common/time_test.go rename to src/internal/common/dttm/dttm_test.go index 6c58f7555..3100419dc 100644 --- a/src/internal/common/time_test.go +++ b/src/internal/common/dttm/dttm_test.go @@ -1,4 +1,4 @@ -package common_test +package dttm_test import ( "testing" @@ -9,65 +9,64 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/internal/tester" ) -type CommonTimeUnitSuite struct { +type DTTMUnitSuite struct { tester.Suite } -func TestCommonTimeUnitSuite(t *testing.T) { - s := &CommonTimeUnitSuite{Suite: tester.NewUnitSuite(t)} - suite.Run(t, s) +func TestDTTMUnitSuite(t *testing.T) { + suite.Run(t, &DTTMUnitSuite{Suite: tester.NewUnitSuite(t)}) } -func (suite *CommonTimeUnitSuite) TestFormatTime() { +func (suite *DTTMUnitSuite) TestFormatTime() { t := suite.T() now := time.Now() - result := common.FormatTime(now) + result := dttm.Format(now) assert.Equal(t, now.UTC().Format(time.RFC3339Nano), result) } -func (suite *CommonTimeUnitSuite) TestLegacyTime() { +func (suite *DTTMUnitSuite) TestLegacyTime() { t := suite.T() now := time.Now() - result := common.FormatLegacyTime(now) + result := dttm.FormatToLegacy(now) assert.Equal(t, now.UTC().Format(time.RFC3339), result) } -func (suite *CommonTimeUnitSuite) TestFormatTabularDisplayTime() { +func (suite *DTTMUnitSuite) TestFormatTabularDisplayTime() { t := suite.T() now := time.Now() - result := common.FormatTabularDisplayTime(now) - assert.Equal(t, now.UTC().Format(string(common.TabularOutput)), result) + result := dttm.FormatToTabularDisplay(now) + assert.Equal(t, now.UTC().Format(string(dttm.TabularOutput)), result) } -func (suite *CommonTimeUnitSuite) TestParseTime() { +func (suite *DTTMUnitSuite) TestParseTime() { t := suite.T() now := time.Now() nowStr := now.Format(time.RFC3339Nano) - result, err := common.ParseTime(nowStr) + result, err := dttm.ParseTime(nowStr) require.NoError(t, err, clues.ToCore(err)) assert.Equal(t, now.UTC(), result) - _, err = common.ParseTime("") + _, err = dttm.ParseTime("") require.Error(t, err, clues.ToCore(err)) - _, err = common.ParseTime("flablabls") + _, err = dttm.ParseTime("flablabls") require.Error(t, err, clues.ToCore(err)) } -func (suite *CommonTimeUnitSuite) TestExtractTime() { - comparable := func(t *testing.T, tt time.Time, shortFormat common.TimeFormat) time.Time { - ts := common.FormatLegacyTime(tt.UTC()) +func (suite *DTTMUnitSuite) TestExtractTime() { + comparable := func(t *testing.T, tt time.Time, shortFormat dttm.TimeFormat) time.Time { + ts := dttm.FormatToLegacy(tt.UTC()) if len(shortFormat) > 0 { ts = tt.UTC().Format(string(shortFormat)) } - c, err := common.ParseTime(ts) + c, err := dttm.ParseTime(ts) require.NoError(t, err, clues.ToCore(err)) @@ -92,16 +91,16 @@ func (suite *CommonTimeUnitSuite) TestExtractTime() { parseT("2006-01-02T03:00:04-01:00"), } - formats := []common.TimeFormat{ - common.ClippedSimple, - common.ClippedSimpleOneDrive, - common.LegacyTime, - common.SimpleDateTime, - common.SimpleDateTimeOneDrive, - common.StandardTime, - common.TabularOutput, - common.SimpleTimeTesting, - common.DateOnly, + formats := []dttm.TimeFormat{ + dttm.ClippedHuman, + dttm.ClippedHumanDriveItem, + dttm.Legacy, + dttm.HumanReadable, + dttm.HumanReadableDriveItem, + dttm.Standard, + dttm.TabularOutput, + dttm.SafeForTesting, + dttm.DateOnly, } type presuf struct { @@ -118,7 +117,7 @@ func (suite *CommonTimeUnitSuite) TestExtractTime() { type testable struct { input string - clippedFormat common.TimeFormat + clippedFormat dttm.TimeFormat expect time.Time } @@ -129,13 +128,13 @@ func (suite *CommonTimeUnitSuite) TestExtractTime() { for _, f := range formats { shortFormat := f - if f != common.ClippedSimple && - f != common.ClippedSimpleOneDrive && - f != common.DateOnly { + if f != dttm.ClippedHuman && + f != dttm.ClippedHumanDriveItem && + f != dttm.DateOnly { shortFormat = "" } - v := common.FormatTimeWith(in, f) + v := dttm.FormatTo(in, f) for _, ps := range pss { table = append(table, testable{ @@ -151,7 +150,7 @@ func (suite *CommonTimeUnitSuite) TestExtractTime() { suite.Run(test.input, func() { t := suite.T() - result, err := common.ExtractTime(test.input) + result, err := dttm.ExtractTime(test.input) require.NoError(t, err, clues.ToCore(err)) assert.Equal(t, test.expect, comparable(t, result, test.clippedFormat)) }) diff --git a/src/internal/common/idname.go b/src/internal/common/idname.go deleted file mode 100644 index e50f30760..000000000 --- a/src/internal/common/idname.go +++ /dev/null @@ -1,51 +0,0 @@ -package common - -import ( - "strings" - - "golang.org/x/exp/maps" -) - -type IDNamer interface { - // the canonical id of the thing, generated and usable - // by whichever system has ownership of it. - ID() string - // the human-readable name of the thing. - Name() string -} - -type IDNameSwapper interface { - IDOf(name string) (string, bool) - NameOf(id string) (string, bool) - IDs() []string - Names() []string -} - -var _ IDNameSwapper = &IDsNames{} - -type IDsNames struct { - IDToName map[string]string - NameToID map[string]string -} - -// IDOf returns the id associated with the given name. -func (in IDsNames) IDOf(name string) (string, bool) { - id, ok := in.NameToID[strings.ToLower(name)] - return id, ok -} - -// NameOf returns the name associated with the given id. -func (in IDsNames) NameOf(id string) (string, bool) { - name, ok := in.IDToName[strings.ToLower(id)] - return name, ok -} - -// IDs returns all known ids. -func (in IDsNames) IDs() []string { - return maps.Keys(in.IDToName) -} - -// Names returns all known names. -func (in IDsNames) Names() []string { - return maps.Keys(in.NameToID) -} diff --git a/src/internal/common/idname/idname.go b/src/internal/common/idname/idname.go new file mode 100644 index 000000000..d56fab025 --- /dev/null +++ b/src/internal/common/idname/idname.go @@ -0,0 +1,107 @@ +package idname + +import ( + "strings" + + "golang.org/x/exp/maps" +) + +// Provider is a tuple containing an ID and a Name. Names are +// assumed to be human-displayable versions of system IDs. +// Providers should always be populated, while a nil values is +// likely an error. Compliant structs should provide both a name +// and an ID, never just one. Values are not validated, so both +// values being empty is an allowed conditions, but the assumption +// is that downstream consumers will have problems as a result. +type Provider interface { + // ID returns the canonical id of the thing, generated and + // usable by whichever system has ownership of it. + ID() string + // the human-readable name of the thing. + Name() string +} + +var _ Provider = &is{} + +type is struct { + id string + name string +} + +func (is is) ID() string { return is.id } +func (is is) Name() string { return is.name } + +type Cacher interface { + IDOf(name string) (string, bool) + NameOf(id string) (string, bool) + IDs() []string + Names() []string + ProviderForID(id string) Provider + ProviderForName(id string) Provider +} + +var _ Cacher = &cache{} + +type cache struct { + idToName map[string]string + nameToID map[string]string +} + +func NewCache(idToName map[string]string) cache { + nti := make(map[string]string, len(idToName)) + + for id, name := range idToName { + nti[name] = id + } + + return cache{ + idToName: idToName, + nameToID: nti, + } +} + +// IDOf returns the id associated with the given name. +func (c cache) IDOf(name string) (string, bool) { + id, ok := c.nameToID[strings.ToLower(name)] + return id, ok +} + +// NameOf returns the name associated with the given id. +func (c cache) NameOf(id string) (string, bool) { + name, ok := c.idToName[strings.ToLower(id)] + return name, ok +} + +// IDs returns all known ids. +func (c cache) IDs() []string { + return maps.Keys(c.idToName) +} + +// Names returns all known names. +func (c cache) Names() []string { + return maps.Keys(c.nameToID) +} + +func (c cache) ProviderForID(id string) Provider { + n, ok := c.NameOf(id) + if !ok { + return &is{} + } + + return &is{ + id: id, + name: n, + } +} + +func (c cache) ProviderForName(name string) Provider { + i, ok := c.IDOf(name) + if !ok { + return &is{} + } + + return &is{ + id: i, + name: name, + } +} diff --git a/src/internal/common/idname/mock/mock.go b/src/internal/common/idname/mock/mock.go new file mode 100644 index 000000000..37f6adad5 --- /dev/null +++ b/src/internal/common/idname/mock/mock.go @@ -0,0 +1,84 @@ +package mock + +import ( + "strings" + + "golang.org/x/exp/maps" + + "github.com/alcionai/corso/src/internal/common/idname" +) + +var _ idname.Provider = &in{} + +func NewProvider(id, name string) *in { + return &in{ + id: id, + name: name, + } +} + +type in struct { + id string + name string +} + +func (i in) ID() string { return i.id } +func (i in) Name() string { return i.name } + +type Cache struct { + IDToName map[string]string + NameToID map[string]string +} + +func NewCache(itn, nti map[string]string) Cache { + return Cache{ + IDToName: itn, + NameToID: nti, + } +} + +// IDOf returns the id associated with the given name. +func (c Cache) IDOf(name string) (string, bool) { + id, ok := c.NameToID[strings.ToLower(name)] + return id, ok +} + +// NameOf returns the name associated with the given id. +func (c Cache) NameOf(id string) (string, bool) { + name, ok := c.IDToName[strings.ToLower(id)] + return name, ok +} + +// IDs returns all known ids. +func (c Cache) IDs() []string { + return maps.Keys(c.IDToName) +} + +// Names returns all known names. +func (c Cache) Names() []string { + return maps.Keys(c.NameToID) +} + +func (c Cache) ProviderForID(id string) idname.Provider { + n, ok := c.NameOf(id) + if !ok { + return nil + } + + return &in{ + id: id, + name: n, + } +} + +func (c Cache) ProviderForName(name string) idname.Provider { + i, ok := c.IDOf(name) + if !ok { + return nil + } + + return &in{ + id: i, + name: name, + } +} diff --git a/src/internal/common/prefixmatcher/mock/mock.go b/src/internal/common/prefixmatcher/mock/mock.go new file mode 100644 index 000000000..ad4568114 --- /dev/null +++ b/src/internal/common/prefixmatcher/mock/mock.go @@ -0,0 +1,44 @@ +package mock + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/alcionai/corso/src/internal/common/prefixmatcher" +) + +var _ prefixmatcher.StringSetReader = &PrefixMap{} + +type PrefixMap struct { + prefixmatcher.StringSetBuilder +} + +func NewPrefixMap(m map[string]map[string]struct{}) *PrefixMap { + r := PrefixMap{StringSetBuilder: prefixmatcher.NewMatcher[map[string]struct{}]()} + + for k, v := range m { + r.Add(k, v) + } + + return &r +} + +func (pm PrefixMap) AssertEqual(t *testing.T, r prefixmatcher.StringSetReader) { + if pm.Empty() { + require.True(t, r.Empty(), "both prefix maps are empty") + return + } + + pks := pm.Keys() + rks := r.Keys() + + assert.ElementsMatch(t, pks, rks, "prefix keys match") + + for _, pk := range pks { + p, _ := pm.Get(pk) + r, _ := r.Get(pk) + assert.Equal(t, p, r, "values match") + } +} diff --git a/src/internal/common/prefixmatcher/prefix_matcher.go b/src/internal/common/prefixmatcher/prefix_matcher.go index cb244cf26..cc7403612 100644 --- a/src/internal/common/prefixmatcher/prefix_matcher.go +++ b/src/internal/common/prefixmatcher/prefix_matcher.go @@ -2,28 +2,48 @@ package prefixmatcher import ( "strings" + + "golang.org/x/exp/maps" ) -type View[T any] interface { +type Reader[T any] interface { Get(key string) (T, bool) LongestPrefix(key string) (string, T, bool) Empty() bool + Keys() []string } -type Matcher[T any] interface { +type Builder[T any] interface { // Add adds or updates the item with key to have value value. Add(key string, value T) - View[T] + Reader[T] } +// --------------------------------------------------------------------------- +// Implementation +// --------------------------------------------------------------------------- + +// prefixMatcher implements Builder type prefixMatcher[T any] struct { data map[string]T } -func (m *prefixMatcher[T]) Add(key string, value T) { - m.data[key] = value +func NewMatcher[T any]() Builder[T] { + return &prefixMatcher[T]{ + data: map[string]T{}, + } } +func NopReader[T any]() *prefixMatcher[T] { + return &prefixMatcher[T]{ + data: make(map[string]T), + } +} + +func (m *prefixMatcher[T]) Add(key string, value T) { m.data[key] = value } +func (m prefixMatcher[T]) Empty() bool { return len(m.data) == 0 } +func (m prefixMatcher[T]) Keys() []string { return maps.Keys(m.data) } + func (m *prefixMatcher[T]) Get(key string) (T, bool) { if m == nil { return *new(T), false @@ -58,11 +78,3 @@ func (m *prefixMatcher[T]) LongestPrefix(key string) (string, T, bool) { return rk, rv, found } - -func (m prefixMatcher[T]) Empty() bool { - return len(m.data) == 0 -} - -func NewMatcher[T any]() Matcher[T] { - return &prefixMatcher[T]{data: map[string]T{}} -} diff --git a/src/internal/common/prefixmatcher/prefix_matcher_test.go b/src/internal/common/prefixmatcher/prefix_matcher_test.go index 998b0184e..815e0fd49 100644 --- a/src/internal/common/prefixmatcher/prefix_matcher_test.go +++ b/src/internal/common/prefixmatcher/prefix_matcher_test.go @@ -5,6 +5,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" + "golang.org/x/exp/maps" "github.com/alcionai/corso/src/internal/common/prefixmatcher" "github.com/alcionai/corso/src/internal/tester" @@ -41,6 +42,8 @@ func (suite *PrefixMatcherUnitSuite) TestAdd_Get() { assert.True(t, ok, "searching for key", k) assert.Equal(t, v, val, "returned value") } + + assert.ElementsMatch(t, maps.Keys(kvs), pm.Keys()) } func (suite *PrefixMatcherUnitSuite) TestLongestPrefix() { diff --git a/src/internal/common/prefixmatcher/string_set_matcher.go b/src/internal/common/prefixmatcher/string_set_matcher.go new file mode 100644 index 000000000..2de4396a2 --- /dev/null +++ b/src/internal/common/prefixmatcher/string_set_matcher.go @@ -0,0 +1,122 @@ +package prefixmatcher + +import "golang.org/x/exp/maps" + +// StringSetReader is a reader designed specifially to contain a set +// of string values (ie: Reader[map[string]struct{}]). +// This is a quality-of-life typecast for the generic Reader. +type StringSetReader interface { + Reader[map[string]struct{}] +} + +// StringSetReader is a builder designed specifially to contain a set +// of string values (ie: Builder[map[string]struct{}]). +// This is a quality-of-life typecast for the generic Builder. +type StringSetBuilder interface { + Builder[map[string]struct{}] +} + +// --------------------------------------------------------------------------- +// Implementation +// --------------------------------------------------------------------------- + +var ( + _ StringSetReader = &StringSetMatcher{} + _ StringSetBuilder = &StringSetMatchBuilder{} +) + +// Items that should be excluded when sourcing data from the base backup. +// Parent Path -> item ID -> {} +type StringSetMatcher struct { + ssb StringSetBuilder +} + +func (m *StringSetMatcher) LongestPrefix(parent string) (string, map[string]struct{}, bool) { + if m == nil { + return "", nil, false + } + + return m.ssb.LongestPrefix(parent) +} + +func (m *StringSetMatcher) Empty() bool { + return m == nil || m.ssb.Empty() +} + +func (m *StringSetMatcher) Get(parent string) (map[string]struct{}, bool) { + if m == nil { + return nil, false + } + + return m.ssb.Get(parent) +} + +func (m *StringSetMatcher) Keys() []string { + if m == nil { + return []string{} + } + + return m.ssb.Keys() +} + +func (m *StringSetMatchBuilder) ToReader() *StringSetMatcher { + if m == nil { + return nil + } + + return m.ssm +} + +// Items that should be excluded when sourcing data from the base backup. +// Parent Path -> item ID -> {} +type StringSetMatchBuilder struct { + ssm *StringSetMatcher +} + +func NewStringSetBuilder() *StringSetMatchBuilder { + return &StringSetMatchBuilder{ + ssm: &StringSetMatcher{ + ssb: NewMatcher[map[string]struct{}](), + }, + } +} + +// copies all items into the key's bucket. +func (m *StringSetMatchBuilder) Add(key string, items map[string]struct{}) { + if m == nil { + return + } + + vs, ok := m.ssm.Get(key) + if !ok { + m.ssm.ssb.Add(key, items) + return + } + + maps.Copy(vs, items) + m.ssm.ssb.Add(key, vs) +} + +func (m *StringSetMatchBuilder) LongestPrefix(parent string) (string, map[string]struct{}, bool) { + return m.ssm.LongestPrefix(parent) +} + +func (m *StringSetMatchBuilder) Empty() bool { + return m == nil || m.ssm.Empty() +} + +func (m *StringSetMatchBuilder) Get(parent string) (map[string]struct{}, bool) { + if m == nil { + return nil, false + } + + return m.ssm.Get(parent) +} + +func (m *StringSetMatchBuilder) Keys() []string { + if m == nil { + return []string{} + } + + return m.ssm.Keys() +} diff --git a/src/internal/common/prefixmatcher/string_set_matcher_test.go b/src/internal/common/prefixmatcher/string_set_matcher_test.go new file mode 100644 index 000000000..d9a18bc98 --- /dev/null +++ b/src/internal/common/prefixmatcher/string_set_matcher_test.go @@ -0,0 +1,166 @@ +package prefixmatcher_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "golang.org/x/exp/maps" + + "github.com/alcionai/corso/src/internal/common/prefixmatcher" + "github.com/alcionai/corso/src/internal/tester" +) + +type StringSetUnitSuite struct { + tester.Suite +} + +func TestSTringSetUnitSuite(t *testing.T) { + suite.Run(t, &StringSetUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *StringSetUnitSuite) TestEmpty() { + pm := prefixmatcher.NewStringSetBuilder() + assert.True(suite.T(), pm.Empty()) +} + +func (suite *StringSetUnitSuite) TestToReader() { + var ( + pr prefixmatcher.StringSetReader + t = suite.T() + pm = prefixmatcher.NewStringSetBuilder() + ) + + pr = pm.ToReader() + _, ok := pr.(prefixmatcher.StringSetBuilder) + assert.False(t, ok, "cannot cast to builder") +} + +func (suite *StringSetUnitSuite) TestAdd_Get() { + t := suite.T() + pm := prefixmatcher.NewStringSetBuilder() + kvs := map[string]map[string]struct{}{ + "hello": {"world": {}}, + "hola": {"mundo": {}}, + "foo": {"bar": {}}, + } + + for k, v := range kvs { + pm.Add(k, v) + } + + for k, v := range kvs { + val, ok := pm.Get(k) + assert.True(t, ok, "searching for key", k) + assert.Equal(t, v, val, "returned value") + } + + assert.ElementsMatch(t, maps.Keys(kvs), pm.Keys()) +} + +func (suite *StringSetUnitSuite) TestAdd_Union() { + t := suite.T() + pm := prefixmatcher.NewStringSetBuilder() + pm.Add("hello", map[string]struct{}{ + "world": {}, + "mundo": {}, + }) + pm.Add("hello", map[string]struct{}{ + "goodbye": {}, + "aideu": {}, + }) + + expect := map[string]struct{}{ + "world": {}, + "mundo": {}, + "goodbye": {}, + "aideu": {}, + } + + result, _ := pm.Get("hello") + assert.Equal(t, expect, result) + assert.ElementsMatch(t, []string{"hello"}, pm.Keys()) +} + +func (suite *StringSetUnitSuite) TestLongestPrefix() { + key := "hello" + value := "world" + + table := []struct { + name string + inputKVs map[string]map[string]struct{} + searchKey string + expectedKey string + expectedValue map[string]struct{} + expectedFound assert.BoolAssertionFunc + }{ + { + name: "Empty Prefix", + inputKVs: map[string]map[string]struct{}{ + "": {value: {}}, + }, + searchKey: key, + expectedKey: "", + expectedValue: map[string]struct{}{value: {}}, + expectedFound: assert.True, + }, + { + name: "Exact Match", + inputKVs: map[string]map[string]struct{}{ + key: {value: {}}, + }, + searchKey: key, + expectedKey: key, + expectedValue: map[string]struct{}{value: {}}, + expectedFound: assert.True, + }, + { + name: "Prefix Match", + inputKVs: map[string]map[string]struct{}{ + key[:len(key)-2]: {value: {}}, + }, + searchKey: key, + expectedKey: key[:len(key)-2], + expectedValue: map[string]struct{}{value: {}}, + expectedFound: assert.True, + }, + { + name: "Longest Prefix Match", + inputKVs: map[string]map[string]struct{}{ + key[:len(key)-2]: {value: {}}, + "": {value + "2": {}}, + key[:len(key)-4]: {value + "3": {}}, + }, + searchKey: key, + expectedKey: key[:len(key)-2], + expectedValue: map[string]struct{}{value: {}}, + expectedFound: assert.True, + }, + { + name: "No Match", + inputKVs: map[string]map[string]struct{}{ + "foo": {value: {}}, + }, + searchKey: key, + expectedKey: "", + expectedValue: nil, + expectedFound: assert.False, + }, + } + + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + pm := prefixmatcher.NewStringSetBuilder() + + for k, v := range test.inputKVs { + pm.Add(k, v) + } + + k, v, ok := pm.LongestPrefix(test.searchKey) + assert.Equal(t, test.expectedKey, k, "key") + assert.Equal(t, test.expectedValue, v, "value") + test.expectedFound(t, ok, "found") + }) + } +} diff --git a/src/internal/connector/data_collections.go b/src/internal/connector/data_collections.go index 9a51b8f3f..dbfe882e0 100644 --- a/src/internal/connector/data_collections.go +++ b/src/internal/connector/data_collections.go @@ -6,7 +6,8 @@ import ( "github.com/alcionai/clues" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/idname" + "github.com/alcionai/corso/src/internal/common/prefixmatcher" "github.com/alcionai/corso/src/internal/connector/discovery" "github.com/alcionai/corso/src/internal/connector/exchange" "github.com/alcionai/corso/src/internal/connector/graph" @@ -19,6 +20,8 @@ import ( "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/filters" + "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" ) @@ -34,28 +37,36 @@ import ( // prior history (ie, incrementals) and run a full backup. func (gc *GraphConnector) ProduceBackupCollections( ctx context.Context, - owner common.IDNamer, + owner idname.Provider, sels selectors.Selector, metadata []data.RestoreCollection, + lastBackupVersion int, ctrlOpts control.Options, errs *fault.Bus, -) ([]data.BackupCollection, map[string]map[string]struct{}, error) { +) ([]data.BackupCollection, prefixmatcher.StringSetReader, error) { ctx, end := diagnostics.Span( ctx, "gc:produceBackupCollections", diagnostics.Index("service", sels.Service.String())) defer end() + ctx = graph.BindRateLimiterConfig(ctx, graph.LimiterCfg{Service: sels.PathService()}) + + // Limit the max number of active requests to graph from this collection. + ctrlOpts.Parallelism.ItemFetch = graph.Parallelism(sels.PathService()). + ItemOverride(ctx, ctrlOpts.Parallelism.ItemFetch) + err := verifyBackupInputs(sels, gc.IDNameLookup.IDs()) if err != nil { return nil, nil, clues.Stack(err).WithClues(ctx) } - serviceEnabled, err := checkServiceEnabled( + serviceEnabled, canMakeDeltaQueries, err := checkServiceEnabled( ctx, gc.Discovery.Users(), path.ServiceType(sels.Service), - sels.DiscreteOwner) + sels.DiscreteOwner, + ) if err != nil { return nil, nil, err } @@ -64,12 +75,23 @@ func (gc *GraphConnector) ProduceBackupCollections( return []data.BackupCollection{}, nil, nil } + var ( + colls []data.BackupCollection + ssmb *prefixmatcher.StringSetMatcher + ) + + if !canMakeDeltaQueries { + logger.Ctx(ctx).Info("delta requests not available") + + ctrlOpts.ToggleFeatures.DisableDelta = true + } + switch sels.Service { case selectors.ServiceExchange: - colls, excludes, err := exchange.DataCollections( + colls, ssmb, err = exchange.DataCollections( ctx, sels, - sels, + owner, metadata, gc.credentials, gc.UpdateStatus, @@ -79,26 +101,13 @@ func (gc *GraphConnector) ProduceBackupCollections( return nil, nil, err } - for _, c := range colls { - // kopia doesn't stream Items() from deleted collections, - // and so they never end up calling the UpdateStatus closer. - // This is a brittle workaround, since changes in consumer - // behavior (such as calling Items()) could inadvertently - // break the process state, putting us into deadlock or - // panics. - if c.State() != data.DeletedState { - gc.incrementAwaitingMessages() - } - } - - return colls, excludes, nil - case selectors.ServiceOneDrive: - colls, excludes, err := onedrive.DataCollections( + colls, ssmb, err = onedrive.DataCollections( ctx, sels, - sels, + owner, metadata, + lastBackupVersion, gc.credentials.AzureTenantID, gc.itemClient, gc.Service, @@ -109,20 +118,13 @@ func (gc *GraphConnector) ProduceBackupCollections( return nil, nil, err } - for _, c := range colls { - // kopia doesn't stream Items() from deleted collections. - if c.State() != data.DeletedState { - gc.incrementAwaitingMessages() - } - } - - return colls, excludes, nil - case selectors.ServiceSharePoint: - colls, excludes, err := sharepoint.DataCollections( + colls, ssmb, err = sharepoint.DataCollections( ctx, gc.itemClient, sels, + owner, + metadata, gc.credentials, gc.Service, gc, @@ -132,13 +134,23 @@ func (gc *GraphConnector) ProduceBackupCollections( return nil, nil, err } - gc.incrementMessagesBy(len(colls)) - - return colls, excludes, nil - default: return nil, nil, clues.Wrap(clues.New(sels.Service.String()), "service not supported").WithClues(ctx) } + + for _, c := range colls { + // kopia doesn't stream Items() from deleted collections, + // and so they never end up calling the UpdateStatus closer. + // This is a brittle workaround, since changes in consumer + // behavior (such as calling Items()) could inadvertently + // break the process state, putting us into deadlock or + // panics. + if c.State() != data.DeletedState { + gc.incrementAwaitingMessages() + } + } + + return colls, ssmb, nil } func verifyBackupInputs(sels selectors.Selector, siteIDs []string) error { @@ -155,16 +167,7 @@ func verifyBackupInputs(sels selectors.Selector, siteIDs []string) error { resourceOwner := strings.ToLower(sels.DiscreteOwner) - var found bool - - for _, id := range ids { - if strings.ToLower(id) == resourceOwner { - found = true - break - } - } - - if !found { + if !filters.Equal(ids).Compare(resourceOwner) { return clues.Stack(graph.ErrResourceOwnerNotFound).With("missing_resource_owner", sels.DiscreteOwner) } @@ -176,22 +179,28 @@ func checkServiceEnabled( gi discovery.GetInfoer, service path.ServiceType, resource string, -) (bool, error) { +) (bool, bool, error) { if service == path.SharePointService { // No "enabled" check required for sharepoint - return true, nil + return true, true, nil } info, err := gi.GetInfo(ctx, resource) if err != nil { - return false, err + return false, false, err } if !info.ServiceEnabled(service) { - return false, clues.Wrap(graph.ErrServiceNotEnabled, "checking service access") + return false, false, clues.Wrap(graph.ErrServiceNotEnabled, "checking service access") } - return true, nil + canMakeDeltaQueries := true + if service == path.ExchangeService { + // we currently can only check quota exceeded for exchange + canMakeDeltaQueries = info.CanMakeDeltaQueries() + } + + return true, canMakeDeltaQueries, nil } // ConsumeRestoreCollections restores data from the specified collections @@ -201,7 +210,7 @@ func (gc *GraphConnector) ConsumeRestoreCollections( ctx context.Context, backupVersion int, acct account.Account, - selector selectors.Selector, + sels selectors.Selector, dest control.RestoreDestination, opts control.Options, dcs []data.RestoreCollection, @@ -210,6 +219,8 @@ func (gc *GraphConnector) ConsumeRestoreCollections( ctx, end := diagnostics.Span(ctx, "connector:restore") defer end() + ctx = graph.BindRateLimiterConfig(ctx, graph.LimiterCfg{Service: sels.PathService()}) + var ( status *support.ConnectorOperationStatus deets = &details.Builder{} @@ -220,15 +231,15 @@ func (gc *GraphConnector) ConsumeRestoreCollections( return nil, clues.Wrap(err, "malformed azure credentials") } - switch selector.Service { + switch sels.Service { case selectors.ServiceExchange: status, err = exchange.RestoreExchangeDataCollections(ctx, creds, gc.Service, dest, dcs, deets, errs) case selectors.ServiceOneDrive: status, err = onedrive.RestoreCollections(ctx, creds, backupVersion, gc.Service, dest, opts, dcs, deets, errs) case selectors.ServiceSharePoint: - status, err = sharepoint.RestoreCollections(ctx, backupVersion, creds, gc.Service, dest, dcs, deets, errs) + status, err = sharepoint.RestoreCollections(ctx, backupVersion, creds, gc.Service, dest, opts, dcs, deets, errs) default: - err = clues.Wrap(clues.New(selector.Service.String()), "service not supported") + err = clues.Wrap(clues.New(sels.Service.String()), "service not supported") } gc.incrementAwaitingMessages() diff --git a/src/internal/connector/data_collections_test.go b/src/internal/connector/data_collections_test.go index cfa4e171a..649f8c59b 100644 --- a/src/internal/connector/data_collections_test.go +++ b/src/internal/connector/data_collections_test.go @@ -10,15 +10,17 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + inMock "github.com/alcionai/corso/src/internal/common/idname/mock" "github.com/alcionai/corso/src/internal/connector/exchange" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/sharepoint" "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/internal/version" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" - "github.com/alcionai/corso/src/pkg/selectors/testdata" + selTD "github.com/alcionai/corso/src/pkg/selectors/testdata" ) // --------------------------------------------------------------------------- @@ -93,44 +95,57 @@ func (suite *DataCollectionIntgSuite) TestExchangeDataCollection() { } for _, test := range tests { - suite.Run(test.name, func() { - t := suite.T() + for _, canMakeDeltaQueries := range []bool{true, false} { + name := test.name - sel := test.getSelector(t) - - collections, excludes, err := exchange.DataCollections( - ctx, - sel, - sel, - nil, - connector.credentials, - connector.UpdateStatus, - control.Options{}, - fault.New(true)) - require.NoError(t, err, clues.ToCore(err)) - assert.Empty(t, excludes) - - for range collections { - connector.incrementAwaitingMessages() + if canMakeDeltaQueries { + name += "-delta" + } else { + name += "-non-delta" } - // Categories with delta endpoints will produce a collection for metadata - // as well as the actual data pulled, and the "temp" root collection. - assert.GreaterOrEqual(t, len(collections), 1, "expected 1 <= num collections <= 2") - assert.GreaterOrEqual(t, 3, len(collections), "expected 1 <= num collections <= 3") + suite.Run(name, func() { + t := suite.T() - for _, col := range collections { - for object := range col.Items(ctx, fault.New(true)) { - buf := &bytes.Buffer{} - _, err := buf.ReadFrom(object.ToReader()) - assert.NoError(t, err, "received a buf.Read error", clues.ToCore(err)) + sel := test.getSelector(t) + + ctrlOpts := control.Defaults() + ctrlOpts.ToggleFeatures.DisableDelta = !canMakeDeltaQueries + + collections, excludes, err := exchange.DataCollections( + ctx, + sel, + sel, + nil, + connector.credentials, + connector.UpdateStatus, + ctrlOpts, + fault.New(true)) + require.NoError(t, err, clues.ToCore(err)) + assert.True(t, excludes.Empty()) + + for range collections { + connector.incrementAwaitingMessages() } - } - status := connector.Wait() - assert.NotZero(t, status.Successes) - t.Log(status.String()) - }) + // Categories with delta endpoints will produce a collection for metadata + // as well as the actual data pulled, and the "temp" root collection. + assert.GreaterOrEqual(t, len(collections), 1, "expected 1 <= num collections <= 2") + assert.GreaterOrEqual(t, 3, len(collections), "expected 1 <= num collections <= 3") + + for _, col := range collections { + for object := range col.Items(ctx, fault.New(true)) { + buf := &bytes.Buffer{} + _, err := buf.ReadFrom(object.ToReader()) + assert.NoError(t, err, "received a buf.Read error", clues.ToCore(err)) + } + } + + status := connector.Wait() + assert.NotZero(t, status.Successes) + t.Log(status.String()) + }) + } } } @@ -158,7 +173,7 @@ func (suite *DataCollectionIntgSuite) TestDataCollections_invalidResourceOwner() name: "Invalid onedrive backup user", getSelector: func(t *testing.T) selectors.Selector { sel := selectors.NewOneDriveBackup(owners) - sel.Include(sel.Folders(selectors.Any())) + sel.Include(selTD.OneDriveBackupFolderScope(sel)) return sel.Selector }, }, @@ -166,7 +181,7 @@ func (suite *DataCollectionIntgSuite) TestDataCollections_invalidResourceOwner() name: "Invalid sharepoint backup site", getSelector: func(t *testing.T) selectors.Selector { sel := selectors.NewSharePointBackup(owners) - sel.Include(testdata.SharePointBackupFolderScope(sel)) + sel.Include(selTD.SharePointBackupFolderScope(sel)) return sel.Selector }, }, @@ -183,7 +198,7 @@ func (suite *DataCollectionIntgSuite) TestDataCollections_invalidResourceOwner() name: "missing onedrive backup user", getSelector: func(t *testing.T) selectors.Selector { sel := selectors.NewOneDriveBackup(owners) - sel.Include(sel.Folders(selectors.Any())) + sel.Include(selTD.OneDriveBackupFolderScope(sel)) sel.DiscreteOwner = "" return sel.Selector }, @@ -192,7 +207,7 @@ func (suite *DataCollectionIntgSuite) TestDataCollections_invalidResourceOwner() name: "missing sharepoint backup site", getSelector: func(t *testing.T) selectors.Selector { sel := selectors.NewSharePointBackup(owners) - sel.Include(testdata.SharePointBackupFolderScope(sel)) + sel.Include(selTD.SharePointBackupFolderScope(sel)) sel.DiscreteOwner = "" return sel.Selector }, @@ -208,11 +223,12 @@ func (suite *DataCollectionIntgSuite) TestDataCollections_invalidResourceOwner() test.getSelector(t), test.getSelector(t), nil, - control.Options{}, + version.NoBackup, + control.Defaults(), fault.New(true)) assert.Error(t, err, clues.ToCore(err)) assert.Empty(t, collections) - assert.Empty(t, excludes) + assert.Nil(t, excludes) }) } } @@ -236,7 +252,7 @@ func (suite *DataCollectionIntgSuite) TestSharePointDataCollection() { name: "Libraries", getSelector: func() selectors.Selector { sel := selectors.NewSharePointBackup(selSites) - sel.Include(testdata.SharePointBackupFolderScope(sel)) + sel.Include(selTD.SharePointBackupFolderScope(sel)) return sel.Selector }, }, @@ -258,16 +274,18 @@ func (suite *DataCollectionIntgSuite) TestSharePointDataCollection() { collections, excludes, err := sharepoint.DataCollections( ctx, - graph.HTTPClient(graph.NoTimeout()), + graph.NewNoTimeoutHTTPWrapper(), sel, + sel, + nil, connector.credentials, connector.Service, connector, - control.Options{}, + control.Defaults(), fault.New(true)) require.NoError(t, err, clues.ToCore(err)) // Not expecting excludes as this isn't an incremental backup. - assert.Empty(t, excludes) + assert.True(t, excludes.Empty()) for range collections { connector.incrementAwaitingMessages() @@ -342,15 +360,16 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Libraries() { cols, excludes, err := gc.ProduceBackupCollections( ctx, - sel.Selector, + inMock.NewProvider(id, name), sel.Selector, nil, - control.Options{}, + version.NoBackup, + control.Defaults(), fault.New(true)) require.NoError(t, err, clues.ToCore(err)) require.Len(t, cols, 2) // 1 collection, 1 path prefix directory to ensure the root path exists. // No excludes yet as this isn't an incremental backup. - assert.Empty(t, excludes) + assert.True(t, excludes.Empty()) t.Logf("cols[0] Path: %s\n", cols[0].FullPath().String()) assert.Equal( @@ -386,15 +405,16 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Lists() { cols, excludes, err := gc.ProduceBackupCollections( ctx, - sel.Selector, + inMock.NewProvider(id, name), sel.Selector, nil, - control.Options{}, + version.NoBackup, + control.Defaults(), fault.New(true)) require.NoError(t, err, clues.ToCore(err)) assert.Less(t, 0, len(cols)) // No excludes yet as this isn't an incremental backup. - assert.Empty(t, excludes) + assert.True(t, excludes.Empty()) for _, collection := range cols { t.Logf("Path: %s\n", collection.FullPath().String()) diff --git a/src/internal/connector/discovery/discovery.go b/src/internal/connector/discovery/discovery.go index 82a9b916b..df31402b9 100644 --- a/src/internal/connector/discovery/discovery.go +++ b/src/internal/connector/discovery/discovery.go @@ -69,6 +69,22 @@ func Users( return users, nil } +// UserDetails fetches detailed info like - userPurpose for all users in the tenant. +func GetUserInfo( + ctx context.Context, + acct account.Account, + userID string, + errs *fault.Bus, +) (*api.UserInfo, error) { + client, err := apiClient(ctx, acct) + if err != nil { + return nil, err + } + + return client.Users().GetInfo(ctx, userID) +} + +// User fetches a single user's data. func User( ctx context.Context, gwi getWithInfoer, @@ -77,7 +93,7 @@ func User( u, err := gwi.GetByID(ctx, userID) if err != nil { if graph.IsErrUserNotFound(err) { - return nil, nil, clues.Stack(graph.ErrResourceOwnerNotFound).With("user_id", userID) + return nil, nil, clues.Stack(graph.ErrResourceOwnerNotFound, err).With("user_id", userID) } return nil, nil, clues.Wrap(err, "getting user") diff --git a/src/internal/connector/discovery/discovery_test.go b/src/internal/connector/discovery/discovery_test.go index dd9971b08..198e9e653 100644 --- a/src/internal/connector/discovery/discovery_test.go +++ b/src/internal/connector/discovery/discovery_test.go @@ -18,19 +18,19 @@ import ( "github.com/alcionai/corso/src/pkg/services/m365/api" ) -type DiscoveryIntegrationSuite struct { +type DiscoveryIntgSuite struct { tester.Suite } -func TestDiscoveryIntegrationSuite(t *testing.T) { - suite.Run(t, &DiscoveryIntegrationSuite{ +func TestDiscoveryIntgSuite(t *testing.T) { + suite.Run(t, &DiscoveryIntgSuite{ Suite: tester.NewIntegrationSuite( t, [][]string{tester.M365AcctCredEnvs}), }) } -func (suite *DiscoveryIntegrationSuite) TestUsers() { +func (suite *DiscoveryIntgSuite) TestUsers() { ctx, flush := tester.NewContext() defer flush() @@ -55,7 +55,7 @@ func (suite *DiscoveryIntegrationSuite) TestUsers() { assert.NotEmpty(t, users) } -func (suite *DiscoveryIntegrationSuite) TestUsers_InvalidCredentials() { +func (suite *DiscoveryIntgSuite) TestUsers_InvalidCredentials() { table := []struct { name string acct func(t *testing.T) account.Account @@ -101,7 +101,7 @@ func (suite *DiscoveryIntegrationSuite) TestUsers_InvalidCredentials() { } } -func (suite *DiscoveryIntegrationSuite) TestSites() { +func (suite *DiscoveryIntgSuite) TestSites() { ctx, flush := tester.NewContext() defer flush() @@ -120,7 +120,7 @@ func (suite *DiscoveryIntegrationSuite) TestSites() { assert.NotEmpty(t, sites) } -func (suite *DiscoveryIntegrationSuite) TestSites_InvalidCredentials() { +func (suite *DiscoveryIntgSuite) TestSites_InvalidCredentials() { ctx, flush := tester.NewContext() defer flush() @@ -171,10 +171,9 @@ func (suite *DiscoveryIntegrationSuite) TestSites_InvalidCredentials() { } } -func (suite *DiscoveryIntegrationSuite) TestUserInfo() { +func (suite *DiscoveryIntgSuite) TestUserInfo() { t := suite.T() acct := tester.NewM365Account(t) - userID := tester.M365UserID(t) creds, err := acct.M365Config() require.NoError(t, err) @@ -185,26 +184,86 @@ func (suite *DiscoveryIntegrationSuite) TestUserInfo() { uapi := cli.Users() table := []struct { - name string - user string - expect *api.UserInfo + name string + user string + expect *api.UserInfo + expectErr require.ErrorAssertionFunc }{ { name: "standard test user", - user: userID, + user: tester.M365UserID(t), expect: &api.UserInfo{ - DiscoveredServices: map[path.ServiceType]struct{}{ + ServicesEnabled: map[path.ServiceType]struct{}{ path.ExchangeService: {}, path.OneDriveService: {}, }, + Mailbox: api.MailboxInfo{ + Purpose: "user", + ErrGetMailBoxSetting: nil, + }, }, + expectErr: require.NoError, }, { name: "user does not exist", user: uuid.NewString(), expect: &api.UserInfo{ - DiscoveredServices: map[path.ServiceType]struct{}{ - path.OneDriveService: {}, // currently statically populated + ServicesEnabled: map[path.ServiceType]struct{}{}, + Mailbox: api.MailboxInfo{}, + }, + expectErr: require.NoError, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + ctx, flush := tester.NewContext() + defer flush() + + t := suite.T() + + result, err := discovery.UserInfo(ctx, uapi, test.user) + test.expectErr(t, err, clues.ToCore(err)) + + if err != nil { + return + } + + assert.Equal(t, test.expect.ServicesEnabled, result.ServicesEnabled) + }) + } +} + +func (suite *DiscoveryIntgSuite) TestUserWithoutDrive() { + t := suite.T() + acct := tester.NewM365Account(t) + userID := tester.M365UserID(t) + + table := []struct { + name string + user string + expect *api.UserInfo + }{ + { + name: "user without drive and exchange", + user: "a53c26f7-5100-4acb-a910-4d20960b2c19", // User: testevents@10rqc2.onmicrosoft.com + expect: &api.UserInfo{ + ServicesEnabled: map[path.ServiceType]struct{}{}, + Mailbox: api.MailboxInfo{ + ErrGetMailBoxSetting: []error{api.ErrMailBoxSettingsNotFound}, + }, + }, + }, + { + name: "user with drive and exchange", + user: userID, + expect: &api.UserInfo{ + ServicesEnabled: map[path.ServiceType]struct{}{ + path.ExchangeService: {}, + path.OneDriveService: {}, + }, + Mailbox: api.MailboxInfo{ + Purpose: "user", + ErrGetMailBoxSetting: []error{}, }, }, }, @@ -216,9 +275,11 @@ func (suite *DiscoveryIntegrationSuite) TestUserInfo() { t := suite.T() - result, err := discovery.UserInfo(ctx, uapi, test.user) + result, err := discovery.GetUserInfo(ctx, acct, test.user, fault.New(true)) require.NoError(t, err, clues.ToCore(err)) - assert.Equal(t, test.expect, result) + assert.Equal(t, test.expect.ServicesEnabled, result.ServicesEnabled) + assert.Equal(t, test.expect.Mailbox.ErrGetMailBoxSetting, result.Mailbox.ErrGetMailBoxSetting) + assert.Equal(t, test.expect.Mailbox.Purpose, result.Mailbox.Purpose) }) } } diff --git a/src/internal/connector/exchange/api/api.go b/src/internal/connector/exchange/api/api.go deleted file mode 100644 index 444251d6b..000000000 --- a/src/internal/connector/exchange/api/api.go +++ /dev/null @@ -1,147 +0,0 @@ -package api - -import ( - "context" - "strings" - - "github.com/alcionai/clues" - "github.com/microsoft/kiota-abstractions-go/serialization" - "github.com/microsoftgraph/msgraph-sdk-go/models" - - "github.com/alcionai/corso/src/internal/common/ptr" - "github.com/alcionai/corso/src/internal/connector/graph" - "github.com/alcionai/corso/src/pkg/account" -) - -// --------------------------------------------------------------------------- -// common types and consts -// --------------------------------------------------------------------------- - -// DeltaUpdate holds the results of a current delta token. It normally -// gets produced when aggregating the addition and removal of items in -// a delta-queryable folder. -type DeltaUpdate struct { - // the deltaLink itself - URL string - // true if the old delta was marked as invalid - Reset bool -} - -// GraphQuery represents functions which perform exchange-specific queries -// into M365 backstore. Responses -> returned items will only contain the information -// that is included in the options -// TODO: use selector or path for granularity into specific folders or specific date ranges -type GraphQuery func(ctx context.Context, userID string) (serialization.Parsable, error) - -// GraphRetrievalFunctions are functions from the Microsoft Graph API that retrieve -// the default associated data of a M365 object. This varies by object. Additional -// Queries must be run to obtain the omitted fields. -type GraphRetrievalFunc func( - ctx context.Context, - user, m365ID string, -) (serialization.Parsable, error) - -// --------------------------------------------------------------------------- -// interfaces -// --------------------------------------------------------------------------- - -// Client is used to fulfill the interface for exchange -// queries that are traditionally backed by GraphAPI. A -// struct is used in this case, instead of deferring to -// pure function wrappers, so that the boundary separates the -// granular implementation of the graphAPI and kiota away -// from the exchange package's broader intents. -type Client struct { - Credentials account.M365Config - - // The Stable service is re-usable for any non-paged request. - // This allows us to maintain performance across async requests. - Stable graph.Servicer - - // The LargeItem graph servicer is configured specifically for - // downloading large items. Specifically for use when handling - // attachments, and for no other use. - LargeItem graph.Servicer -} - -// NewClient produces a new exchange api client. Must be used in -// place of creating an ad-hoc client struct. -func NewClient(creds account.M365Config) (Client, error) { - s, err := NewService(creds) - if err != nil { - return Client{}, err - } - - li, err := newLargeItemService(creds) - if err != nil { - return Client{}, err - } - - return Client{creds, s, li}, nil -} - -// service generates a new service. Used for paged and other long-running -// requests instead of the client's stable service, so that in-flight state -// within the adapter doesn't get clobbered -func (c Client) service() (*graph.Service, error) { - s, err := NewService(c.Credentials) - return s, err -} - -func NewService(creds account.M365Config, opts ...graph.Option) (*graph.Service, error) { - a, err := graph.CreateAdapter( - creds.AzureTenantID, - creds.AzureClientID, - creds.AzureClientSecret, - opts...) - if err != nil { - return nil, clues.Wrap(err, "generating graph adapter") - } - - return graph.NewService(a), nil -} - -func newLargeItemService(creds account.M365Config) (*graph.Service, error) { - a, err := NewService(creds, graph.NoTimeout()) - if err != nil { - return nil, clues.Wrap(err, "generating no-timeout graph adapter") - } - - return a, nil -} - -// --------------------------------------------------------------------------- -// helper funcs -// --------------------------------------------------------------------------- - -// checkIDAndName is a helper function to ensure that -// the ID and name pointers are set prior to being called. -func checkIDAndName(c graph.Container) error { - id := ptr.Val(c.GetId()) - if len(id) == 0 { - return clues.New("container missing ID") - } - - dn := ptr.Val(c.GetDisplayName()) - if len(dn) == 0 { - return clues.New("container missing display name").With("container_id", id) - } - - return nil -} - -func HasAttachments(body models.ItemBodyable) bool { - if body == nil { - return false - } - - if ct, ok := ptr.ValOK(body.GetContentType()); !ok || ct == models.TEXT_BODYTYPE { - return false - } - - if body, ok := ptr.ValOK(body.GetContent()); !ok || len(body) == 0 { - return false - } - - return strings.Contains(ptr.Val(body.GetContent()), "src=\"cid:") -} diff --git a/src/internal/connector/exchange/attachment.go b/src/internal/connector/exchange/attachment.go index 4c6c99d13..07fb0e7dd 100644 --- a/src/internal/connector/exchange/attachment.go +++ b/src/internal/connector/exchange/attachment.go @@ -11,7 +11,6 @@ import ( "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/support" - "github.com/alcionai/corso/src/internal/connector/uploadsession" "github.com/alcionai/corso/src/pkg/logger" ) @@ -93,10 +92,12 @@ func uploadLargeAttachment( uploader attachmentUploadable, attachment models.Attachmentable, ) error { - var ( - bs = attachmentBytes(attachment) - size = int64(len(bs)) - ) + bs, err := GetAttachmentBytes(attachment) + if err != nil { + return clues.Stack(err).WithClues(ctx) + } + + size := int64(len(bs)) session, err := uploader.uploadSession(ctx, ptr.Val(attachment.GetName()), size) if err != nil { @@ -104,7 +105,7 @@ func uploadLargeAttachment( } url := ptr.Val(session.GetUploadUrl()) - aw := uploadsession.NewWriter(uploader.getItemID(), url, size) + aw := graph.NewLargeItemWriter(uploader.getItemID(), url, size) logger.Ctx(ctx).Debugw("uploading large attachment", "attachment_url", graph.LoggableURL(url)) // Upload the stream data diff --git a/src/internal/connector/exchange/attachment_uploadable.go b/src/internal/connector/exchange/attachment_uploadable.go index 0dc3ced1e..1423f56df 100644 --- a/src/internal/connector/exchange/attachment_uploadable.go +++ b/src/internal/connector/exchange/attachment_uploadable.go @@ -37,9 +37,12 @@ func (mau *mailAttachmentUploader) getItemID() string { func (mau *mailAttachmentUploader) uploadSmallAttachment(ctx context.Context, attach models.Attachmentable) error { _, err := mau.service.Client(). - UsersById(mau.userID). - MailFoldersById(mau.folderID). - MessagesById(mau.itemID). + Users(). + ByUserId(mau.userID). + MailFolders(). + ByMailFolderId(mau.folderID). + Messages(). + ByMessageId(mau.itemID). Attachments(). Post(ctx, attach, nil) if err != nil { @@ -60,9 +63,12 @@ func (mau *mailAttachmentUploader) uploadSession( r, err := mau. service. Client(). - UsersById(mau.userID). - MailFoldersById(mau.folderID). - MessagesById(mau.itemID). + Users(). + ByUserId(mau.userID). + MailFolders(). + ByMailFolderId(mau.folderID). + Messages(). + ByMessageId(mau.itemID). Attachments(). CreateUploadSession(). Post(ctx, session, nil) @@ -87,9 +93,12 @@ func (eau *eventAttachmentUploader) getItemID() string { func (eau *eventAttachmentUploader) uploadSmallAttachment(ctx context.Context, attach models.Attachmentable) error { _, err := eau.service.Client(). - UsersById(eau.userID). - CalendarsById(eau.calendarID). - EventsById(eau.itemID). + Users(). + ByUserId(eau.userID). + Calendars(). + ByCalendarId(eau.calendarID). + Events(). + ByEventId(eau.itemID). Attachments(). Post(ctx, attach, nil) if err != nil { @@ -108,9 +117,12 @@ func (eau *eventAttachmentUploader) uploadSession( session.SetAttachmentItem(makeSessionAttachment(attachmentName, attachmentSize)) r, err := eau.service.Client(). - UsersById(eau.userID). - CalendarsById(eau.calendarID). - EventsById(eau.itemID). + Users(). + ByUserId(eau.userID). + Calendars(). + ByCalendarId(eau.calendarID). + Events(). + ByEventId(eau.itemID). Attachments(). CreateUploadSession(). Post(ctx, session, nil) diff --git a/src/internal/connector/exchange/container_resolver_test.go b/src/internal/connector/exchange/container_resolver_test.go index 572162263..de050d25a 100644 --- a/src/internal/connector/exchange/container_resolver_test.go +++ b/src/internal/connector/exchange/container_resolver_test.go @@ -549,7 +549,7 @@ func (suite *FolderCacheIntegrationSuite) TestCreateContainerDestination() { var ( user = tester.M365UserID(suite.T()) directoryCaches = make(map[path.CategoryType]graph.ContainerResolver) - folderName = tester.DefaultTestRestoreDestination().ContainerName + folderName = tester.DefaultTestRestoreDestination("").ContainerName tests = []struct { name string pathFunc1 func(t *testing.T) path.Path diff --git a/src/internal/connector/exchange/data_collections.go b/src/internal/connector/exchange/data_collections.go index 734771de2..d07ee4300 100644 --- a/src/internal/connector/exchange/data_collections.go +++ b/src/internal/connector/exchange/data_collections.go @@ -6,8 +6,8 @@ import ( "github.com/alcionai/clues" - "github.com/alcionai/corso/src/internal/common" - "github.com/alcionai/corso/src/internal/connector/exchange/api" + "github.com/alcionai/corso/src/internal/common/idname" + "github.com/alcionai/corso/src/internal/common/prefixmatcher" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/data" @@ -17,6 +17,7 @@ import ( "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) // MetadataFileNames produces the category-specific set of filenames used to @@ -40,7 +41,7 @@ func (dps DeltaPaths) AddDelta(k, d string) { dp = DeltaPath{} } - dp.delta = d + dp.Delta = d dps[k] = dp } @@ -50,13 +51,13 @@ func (dps DeltaPaths) AddPath(k, p string) { dp = DeltaPath{} } - dp.path = p + dp.Path = p dps[k] = dp } type DeltaPath struct { - delta string - path string + Delta string + Path string } // ParseMetadataCollections produces a map of structs holding delta @@ -147,7 +148,7 @@ func parseMetadataCollections( // complete backup on the next run. for _, dps := range cdp { for k, dp := range dps { - if len(dp.delta) == 0 || len(dp.path) == 0 { + if len(dp.Path) == 0 { delete(dps, k) } } @@ -163,14 +164,14 @@ func parseMetadataCollections( // Add iota to this call -> mail, contacts, calendar, etc. func DataCollections( ctx context.Context, - user common.IDNamer, selector selectors.Selector, + user idname.Provider, metadata []data.RestoreCollection, acct account.M365Config, su support.StatusUpdater, ctrlOpts control.Options, errs *fault.Bus, -) ([]data.BackupCollection, map[string]map[string]struct{}, error) { +) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, error) { eb, err := selector.ToExchangeBackup() if err != nil { return nil, nil, clues.Wrap(err, "exchange dataCollection selector").WithClues(ctx) @@ -182,6 +183,12 @@ func DataCollections( categories = map[path.CategoryType]struct{}{} ) + // Turn on concurrency limiter middleware for exchange backups + // unless explicitly disabled through DisableConcurrencyLimiterFN cli flag + if !ctrlOpts.ToggleFeatures.DisableConcurrencyLimiter { + graph.InitializeConcurrencyLimiter(ctrlOpts.Parallelism.ItemFetch) + } + cdps, err := parseMetadataCollections(ctx, metadata, errs) if err != nil { return nil, nil, err @@ -214,6 +221,7 @@ func DataCollections( if len(collections) > 0 { baseCols, err := graph.BaseCollections( ctx, + collections, acct.AzureTenantID, user.ID(), path.ExchangeService, @@ -249,7 +257,7 @@ func getterByType(ac api.Client, category path.CategoryType) (addedAndRemovedIte func createCollections( ctx context.Context, creds account.M365Config, - user common.IDNamer, + user idname.Provider, scope selectors.ExchangeScope, dps DeltaPaths, ctrlOpts control.Options, @@ -269,9 +277,6 @@ func createCollections( return nil, clues.Stack(err).WithClues(ctx) } - // Create collection of ExchangeDataCollection - collections := make(map[string]data.BackupCollection) - qp := graph.QueryParams{ Category: category, ResourceOwner: user, @@ -289,11 +294,10 @@ func createCollections( return nil, clues.Wrap(err, "populating container cache") } - err = filterContainersAndFillCollections( + collections, err := filterContainersAndFillCollections( ctx, qp, getter, - collections, su, resolver, scope, diff --git a/src/internal/connector/exchange/data_collections_test.go b/src/internal/connector/exchange/data_collections_test.go index d69948c69..ef34de5ff 100644 --- a/src/internal/connector/exchange/data_collections_test.go +++ b/src/internal/connector/exchange/data_collections_test.go @@ -10,8 +10,8 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + inMock "github.com/alcionai/corso/src/internal/common/idname/mock" "github.com/alcionai/corso/src/internal/common/ptr" - "github.com/alcionai/corso/src/internal/connector/exchange/api" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/data" @@ -20,6 +20,7 @@ import ( "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) // --------------------------------------------------------------------------- @@ -67,7 +68,12 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() { data: []fileValues{ {graph.PreviousPathFileName, "prev-path"}, }, - expect: map[string]DeltaPath{}, + expect: map[string]DeltaPath{ + "key": { + Delta: "delta-link", + Path: "prev-path", + }, + }, expectError: assert.NoError, }, { @@ -86,8 +92,8 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() { }, expect: map[string]DeltaPath{ "key": { - delta: "delta-link", - path: "prev-path", + Delta: "delta-link", + Path: "prev-path", }, }, expectError: assert.NoError, @@ -107,7 +113,12 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() { {graph.DeltaURLsFileName, ""}, {graph.PreviousPathFileName, "prev-path"}, }, - expect: map[string]DeltaPath{}, + expect: map[string]DeltaPath{ + "key": { + Delta: "delta-link", + Path: "prev-path", + }, + }, expectError: assert.NoError, }, { @@ -118,8 +129,8 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() { }, expect: map[string]DeltaPath{ "key": { - delta: "`!@#$%^&*()_[]{}/\"\\", - path: "prev-path", + Delta: "`!@#$%^&*()_[]{}/\"\\", + Path: "prev-path", }, }, expectError: assert.NoError, @@ -132,8 +143,8 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() { }, expect: map[string]DeltaPath{ "key": { - delta: "\\n\\r\\t\\b\\f\\v\\0\\\\", - path: "prev-path", + Delta: "\\n\\r\\t\\b\\f\\v\\0\\\\", + Path: "prev-path", }, }, expectError: assert.NoError, @@ -149,8 +160,8 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() { }, expect: map[string]DeltaPath{ "key": { - delta: "\\n", - path: "prev-path", + Delta: "\\n", + Path: "prev-path", }, }, expectError: assert.NoError, @@ -190,8 +201,8 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() { assert.Len(t, emails, len(test.expect)) for k, v := range emails { - assert.Equal(t, v.delta, emails[k].delta, "delta") - assert.Equal(t, v.path, emails[k].path, "path") + assert.Equal(t, v.Delta, emails[k].Delta, "delta") + assert.Equal(t, v.Path, emails[k].Path, "path") } }) } @@ -239,15 +250,15 @@ func (suite *DataCollectionsIntegrationSuite) TestMailFetch() { userID = tester.M365UserID(suite.T()) users = []string{userID} acct, err = tester.NewM365Account(suite.T()).M365Config() - ss = selectors.Selector{}.SetDiscreteOwnerIDName(userID, userID) ) require.NoError(suite.T(), err, clues.ToCore(err)) tests := []struct { - name string - scope selectors.ExchangeScope - folderNames map[string]struct{} + name string + scope selectors.ExchangeScope + folderNames map[string]struct{} + canMakeDeltaQueries bool }{ { name: "Folder Iterative Check Mail", @@ -258,6 +269,18 @@ func (suite *DataCollectionsIntegrationSuite) TestMailFetch() { folderNames: map[string]struct{}{ DefaultMailFolder: {}, }, + canMakeDeltaQueries: true, + }, + { + name: "Folder Iterative Check Mail Non-Delta", + scope: selectors.NewExchangeBackup(users).MailFolders( + []string{DefaultMailFolder}, + selectors.PrefixMatch(), + )[0], + folderNames: map[string]struct{}{ + DefaultMailFolder: {}, + }, + canMakeDeltaQueries: false, }, } @@ -265,13 +288,16 @@ func (suite *DataCollectionsIntegrationSuite) TestMailFetch() { suite.Run(test.name, func() { t := suite.T() + ctrlOpts := control.Defaults() + ctrlOpts.ToggleFeatures.DisableDelta = !test.canMakeDeltaQueries + collections, err := createCollections( ctx, acct, - ss, + inMock.NewProvider(userID, userID), test.scope, DeltaPaths{}, - control.Options{}, + ctrlOpts, func(status *support.ConnectorOperationStatus) {}, fault.New(true)) require.NoError(t, err, clues.ToCore(err)) @@ -282,9 +308,18 @@ func (suite *DataCollectionsIntegrationSuite) TestMailFetch() { } require.NotEmpty(t, c.FullPath().Folder(false)) - folder := c.FullPath().Folder(false) - delete(test.folderNames, folder) + // TODO(ashmrtn): Remove when LocationPath is made part of BackupCollection + // interface. + if !assert.Implements(t, (*data.LocationPather)(nil), c) { + continue + } + + loc := c.(data.LocationPather).LocationPath().String() + + require.NotEmpty(t, loc) + + delete(test.folderNames, loc) } assert.Empty(t, test.folderNames) @@ -300,7 +335,6 @@ func (suite *DataCollectionsIntegrationSuite) TestDelta() { userID = tester.M365UserID(suite.T()) users = []string{userID} acct, err = tester.NewM365Account(suite.T()).M365Config() - ss = selectors.Selector{}.SetDiscreteOwnerIDName(userID, userID) ) require.NoError(suite.T(), err, clues.ToCore(err)) @@ -339,10 +373,10 @@ func (suite *DataCollectionsIntegrationSuite) TestDelta() { collections, err := createCollections( ctx, acct, - ss, + inMock.NewProvider(userID, userID), test.scope, DeltaPaths{}, - control.Options{}, + control.Defaults(), func(status *support.ConnectorOperationStatus) {}, fault.New(true)) require.NoError(t, err, clues.ToCore(err)) @@ -370,10 +404,10 @@ func (suite *DataCollectionsIntegrationSuite) TestDelta() { collections, err = createCollections( ctx, acct, - ss, + inMock.NewProvider(userID, userID), test.scope, dps, - control.Options{}, + control.Defaults(), func(status *support.ConnectorOperationStatus) {}, fault.New(true)) require.NoError(t, err, clues.ToCore(err)) @@ -405,7 +439,6 @@ func (suite *DataCollectionsIntegrationSuite) TestMailSerializationRegression() t = suite.T() wg sync.WaitGroup users = []string{suite.user} - ss = selectors.Selector{}.SetDiscreteOwnerIDName(suite.user, suite.user) ) acct, err := tester.NewM365Account(t).M365Config() @@ -417,10 +450,10 @@ func (suite *DataCollectionsIntegrationSuite) TestMailSerializationRegression() collections, err := createCollections( ctx, acct, - ss, + inMock.NewProvider(suite.user, suite.user), sel.Scopes()[0], DeltaPaths{}, - control.Options{}, + control.Defaults(), newStatusUpdater(t, &wg), fault.New(true)) require.NoError(t, err, clues.ToCore(err)) @@ -467,7 +500,6 @@ func (suite *DataCollectionsIntegrationSuite) TestContactSerializationRegression require.NoError(suite.T(), err, clues.ToCore(err)) users := []string{suite.user} - ss := selectors.Selector{}.SetDiscreteOwnerIDName(suite.user, suite.user) tests := []struct { name string @@ -491,10 +523,10 @@ func (suite *DataCollectionsIntegrationSuite) TestContactSerializationRegression edcs, err := createCollections( ctx, acct, - ss, + inMock.NewProvider(suite.user, suite.user), test.scope, DeltaPaths{}, - control.Options{}, + control.Defaults(), newStatusUpdater(t, &wg), fault.New(true)) require.NoError(t, err, clues.ToCore(err)) @@ -528,7 +560,16 @@ func (suite *DataCollectionsIntegrationSuite) TestContactSerializationRegression continue } - assert.Equal(t, edc.FullPath().Folder(false), DefaultContactFolder) + // TODO(ashmrtn): Remove when LocationPath is made part of BackupCollection + // interface. + if !assert.Implements(t, (*data.LocationPather)(nil), edc) { + continue + } + + assert.Equal( + t, + edc.(data.LocationPather).LocationPath().String(), + DefaultContactFolder) assert.NotZero(t, count) } @@ -556,8 +597,6 @@ func (suite *DataCollectionsIntegrationSuite) TestEventsSerializationRegression( bdayID string ) - ss := selectors.Selector{}.SetDiscreteOwnerIDName(suite.user, suite.user) - fn := func(gcf graph.CacheFolder) error { if ptr.Val(gcf.GetDisplayName()) == DefaultCalendar { calID = ptr.Val(gcf.GetId()) @@ -605,10 +644,10 @@ func (suite *DataCollectionsIntegrationSuite) TestEventsSerializationRegression( collections, err := createCollections( ctx, acct, - ss, + inMock.NewProvider(suite.user, suite.user), test.scope, DeltaPaths{}, - control.Options{}, + control.Defaults(), newStatusUpdater(t, &wg), fault.New(true)) require.NoError(t, err, clues.ToCore(err)) diff --git a/src/internal/connector/exchange/exchange_data_collection.go b/src/internal/connector/exchange/exchange_data_collection.go index 2c45175da..441056ed6 100644 --- a/src/internal/connector/exchange/exchange_data_collection.go +++ b/src/internal/connector/exchange/exchange_data_collection.go @@ -35,10 +35,6 @@ var ( const ( collectionChannelBufferSize = 1000 numberOfRetries = 4 - - // Outlooks expects max 4 concurrent requests - // https://learn.microsoft.com/en-us/graph/throttling-limits#outlook-service-limits - urlPrefetchChannelBufferSize = 4 ) type itemer interface { @@ -186,8 +182,7 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) { colProgress, closer = observe.CollectionProgress( ctx, col.fullPath.Category().String(), - // TODO(keepers): conceal compliance in path, drop Hide() - clues.Hide(col.fullPath.Folder(false))) + col.LocationPath().Elements()) go closer() @@ -196,22 +191,7 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) { }() } - // Limit the max number of active requests to GC - fetchParallelism := col.ctrl.ItemFetchParallelism - if fetchParallelism < 1 || fetchParallelism > urlPrefetchChannelBufferSize { - fetchParallelism = urlPrefetchChannelBufferSize - logger.Ctx(ctx).Infow( - "fetch parallelism value not set or out of bounds, using default", - "default_parallelism", - urlPrefetchChannelBufferSize, - "requested_parallellism", - col.ctrl.ItemFetchParallelism, - ) - } - - logger.Ctx(ctx).Infow("fetching data with parallelism", "fetch_parallelism", fetchParallelism) - - semaphoreCh := make(chan struct{}, fetchParallelism) + semaphoreCh := make(chan struct{}, col.ctrl.Parallelism.ItemFetch) defer close(semaphoreCh) // delete all removed items @@ -280,7 +260,12 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) { return } - info.Size = int64(len(data)) + // In case of mail the size of data is calc as- size of body content+size of attachment + // in all other case the size is - total item's serialized size + if info.Size <= 0 { + info.Size = int64(len(data)) + } + info.ParentPath = col.locationPath.String() col.data <- &Stream{ diff --git a/src/internal/connector/exchange/exchange_data_collection_test.go b/src/internal/connector/exchange/exchange_data_collection_test.go index 94d08fcef..ae9a7d3ce 100644 --- a/src/internal/connector/exchange/exchange_data_collection_test.go +++ b/src/internal/connector/exchange/exchange_data_collection_test.go @@ -179,7 +179,7 @@ func (suite *ExchangeDataCollectionSuite) TestNewCollection_state() { test.curr, test.prev, test.loc, 0, &mockItemer{}, nil, - control.Options{}, + control.Defaults(), false) assert.Equal(t, test.expect, c.State(), "collection state") assert.Equal(t, test.curr, c.fullPath, "full path") diff --git a/src/internal/connector/exchange/folder_resolver_test.go b/src/internal/connector/exchange/folder_resolver_test.go index 76c32a4fd..69e78229d 100644 --- a/src/internal/connector/exchange/folder_resolver_test.go +++ b/src/internal/connector/exchange/folder_resolver_test.go @@ -8,11 +8,11 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/alcionai/corso/src/internal/connector/exchange/api" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) type CacheResolverSuite struct { diff --git a/src/internal/connector/exchange/mail_folder_cache_test.go b/src/internal/connector/exchange/mail_folder_cache_test.go index 0ae08ce88..46132125b 100644 --- a/src/internal/connector/exchange/mail_folder_cache_test.go +++ b/src/internal/connector/exchange/mail_folder_cache_test.go @@ -9,10 +9,10 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/alcionai/corso/src/internal/connector/exchange/api" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) const ( diff --git a/src/internal/connector/exchange/mock/event.go b/src/internal/connector/exchange/mock/event.go index 560358e4f..7df667af6 100644 --- a/src/internal/connector/exchange/mock/event.go +++ b/src/internal/connector/exchange/mock/event.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" ) // Order of fields to fill in: @@ -221,8 +221,8 @@ func EventBytes(subject string) []byte { func EventWithSubjectBytes(subject string) []byte { tomorrow := time.Now().UTC().AddDate(0, 0, 1) at := time.Date(tomorrow.Year(), tomorrow.Month(), tomorrow.Day(), tomorrow.Hour(), 0, 0, 0, time.UTC) - atTime := common.FormatTime(at) - endTime := common.FormatTime(at.Add(30 * time.Minute)) + atTime := dttm.Format(at) + endTime := dttm.Format(at.Add(30 * time.Minute)) return EventWith( defaultEventOrganizer, subject, @@ -234,7 +234,7 @@ func EventWithSubjectBytes(subject string) []byte { func EventWithAttachment(subject string) []byte { tomorrow := time.Now().UTC().AddDate(0, 0, 1) at := time.Date(tomorrow.Year(), tomorrow.Month(), tomorrow.Day(), tomorrow.Hour(), 0, 0, 0, time.UTC) - atTime := common.FormatTime(at) + atTime := dttm.Format(at) return EventWith( defaultEventOrganizer, subject, @@ -246,7 +246,7 @@ func EventWithAttachment(subject string) []byte { func EventWithRecurrenceBytes(subject, recurrenceTimeZone string) []byte { tomorrow := time.Now().UTC().AddDate(0, 0, 1) at := time.Date(tomorrow.Year(), tomorrow.Month(), tomorrow.Day(), tomorrow.Hour(), 0, 0, 0, time.UTC) - atTime := common.FormatTime(at) + atTime := dttm.Format(at) timeSlice := strings.Split(atTime, "T") recurrence := string(fmt.Sprintf( @@ -265,7 +265,7 @@ func EventWithRecurrenceBytes(subject, recurrenceTimeZone string) []byte { func EventWithAttendeesBytes(subject string) []byte { tomorrow := time.Now().UTC().AddDate(0, 0, 1) at := time.Date(tomorrow.Year(), tomorrow.Month(), tomorrow.Day(), tomorrow.Hour(), 0, 0, 0, time.UTC) - atTime := common.FormatTime(at) + atTime := dttm.Format(at) return EventWith( defaultEventOrganizer, subject, diff --git a/src/internal/connector/exchange/mock/mail.go b/src/internal/connector/exchange/mock/mail.go index 32b2bc04f..cb6e296d1 100644 --- a/src/internal/connector/exchange/mock/mail.go +++ b/src/internal/connector/exchange/mock/mail.go @@ -11,7 +11,7 @@ import ( "github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/stretchr/testify/require" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" ) //nolint:lll @@ -107,7 +107,7 @@ const ( // Contents verified as working with sample data from kiota-serialization-json-go v0.5.5 func MessageBytes(subject string) []byte { return MessageWithBodyBytes( - "TPS Report "+subject+" "+common.FormatNow(common.SimpleDateTime), + "TPS Report "+subject+" "+dttm.FormatNow(dttm.HumanReadable), defaultMessageBody, defaultMessagePreview) } diff --git a/src/internal/connector/exchange/mock/mock_test.go b/src/internal/connector/exchange/mock/mock_test.go index aab25e055..d1b560173 100644 --- a/src/internal/connector/exchange/mock/mock_test.go +++ b/src/internal/connector/exchange/mock/mock_test.go @@ -67,9 +67,9 @@ func (suite *MockSuite) TestMockExchangeCollection_NewExchangeCollectionMail_Hyd t := suite.T() mdc := NewCollection(nil, nil, 3) - buf := &bytes.Buffer{} for stream := range mdc.Items(ctx, fault.New(true)) { + buf := &bytes.Buffer{} _, err := buf.ReadFrom(stream.ToReader()) assert.NoError(t, err, clues.ToCore(err)) diff --git a/src/internal/connector/exchange/restore_test.go b/src/internal/connector/exchange/restore_test.go index 1e298e754..de67ab8e6 100644 --- a/src/internal/connector/exchange/restore_test.go +++ b/src/internal/connector/exchange/restore_test.go @@ -3,16 +3,13 @@ package exchange import ( "context" "testing" - "time" "github.com/alcionai/clues" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/alcionai/corso/src/internal/common" "github.com/alcionai/corso/src/internal/common/ptr" - "github.com/alcionai/corso/src/internal/connector/exchange/api" exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/tester" @@ -20,6 +17,7 @@ import ( "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) type ExchangeRestoreSuite struct { @@ -67,8 +65,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreContact() { var ( t = suite.T() userID = tester.M365UserID(t) - now = time.Now() - folderName = "TestRestoreContact: " + common.FormatSimpleDateTime(now) + folderName = tester.DefaultTestRestoreDestination("contact").ContainerName ) aFolder, err := suite.ac.Contacts().CreateContactFolder(ctx, userID, folderName) @@ -102,7 +99,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreEvent() { var ( t = suite.T() userID = tester.M365UserID(t) - subject = "TestRestoreEvent: " + common.FormatSimpleDateTime(time.Now()) + subject = tester.DefaultTestRestoreDestination("event").ContainerName ) calendar, err := suite.ac.Events().CreateCalendar(ctx, userID, subject) @@ -172,7 +169,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() { } userID := tester.M365UserID(suite.T()) - now := time.Now() + tests := []struct { name string bytes []byte @@ -184,7 +181,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() { bytes: exchMock.MessageBytes("Restore Exchange Object"), category: path.EmailCategory, destination: func(t *testing.T, ctx context.Context) string { - folderName := "TestRestoreMailObject: " + common.FormatSimpleDateTime(now) + folderName := tester.DefaultTestRestoreDestination("mailobj").ContainerName folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName) require.NoError(t, err, clues.ToCore(err)) @@ -196,7 +193,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() { bytes: exchMock.MessageWithDirectAttachment("Restore 1 Attachment"), category: path.EmailCategory, destination: func(t *testing.T, ctx context.Context) string { - folderName := "TestRestoreMailwithAttachment: " + common.FormatSimpleDateTime(now) + folderName := tester.DefaultTestRestoreDestination("mailwattch").ContainerName folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName) require.NoError(t, err, clues.ToCore(err)) @@ -208,7 +205,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() { bytes: exchMock.MessageWithItemAttachmentEvent("Event Item Attachment"), category: path.EmailCategory, destination: func(t *testing.T, ctx context.Context) string { - folderName := "TestRestoreEventItemAttachment: " + common.FormatSimpleDateTime(now) + folderName := tester.DefaultTestRestoreDestination("eventwattch").ContainerName folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName) require.NoError(t, err, clues.ToCore(err)) @@ -220,7 +217,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() { bytes: exchMock.MessageWithItemAttachmentMail("Mail Item Attachment"), category: path.EmailCategory, destination: func(t *testing.T, ctx context.Context) string { - folderName := "TestRestoreMailItemAttachment: " + common.FormatSimpleDateTime(now) + folderName := tester.DefaultTestRestoreDestination("mailitemattch").ContainerName folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName) require.NoError(t, err, clues.ToCore(err)) @@ -235,7 +232,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() { ), category: path.EmailCategory, destination: func(t *testing.T, ctx context.Context) string { - folderName := "TestRestoreMailBasicItemAttachment: " + common.FormatSimpleDateTime(now) + folderName := tester.DefaultTestRestoreDestination("mailbasicattch").ContainerName folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName) require.NoError(t, err, clues.ToCore(err)) @@ -250,7 +247,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() { ), category: path.EmailCategory, destination: func(t *testing.T, ctx context.Context) string { - folderName := "ItemMailAttachmentwAttachment " + common.FormatSimpleDateTime(now) + folderName := tester.DefaultTestRestoreDestination("mailnestattch").ContainerName folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName) require.NoError(t, err, clues.ToCore(err)) @@ -265,7 +262,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() { ), category: path.EmailCategory, destination: func(t *testing.T, ctx context.Context) string { - folderName := "ItemMailAttachment_Contact " + common.FormatSimpleDateTime(now) + folderName := tester.DefaultTestRestoreDestination("mailcontactattch").ContainerName folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName) require.NoError(t, err, clues.ToCore(err)) @@ -277,7 +274,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() { bytes: exchMock.MessageWithNestedItemAttachmentEvent("Nested Item Attachment"), category: path.EmailCategory, destination: func(t *testing.T, ctx context.Context) string { - folderName := "TestRestoreNestedEventItemAttachment: " + common.FormatSimpleDateTime(now) + folderName := tester.DefaultTestRestoreDestination("nestedattch").ContainerName folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName) require.NoError(t, err, clues.ToCore(err)) @@ -289,7 +286,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() { bytes: exchMock.MessageWithLargeAttachment("Restore Large Attachment"), category: path.EmailCategory, destination: func(t *testing.T, ctx context.Context) string { - folderName := "TestRestoreMailwithLargeAttachment: " + common.FormatSimpleDateTime(now) + folderName := tester.DefaultTestRestoreDestination("maillargeattch").ContainerName folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName) require.NoError(t, err, clues.ToCore(err)) @@ -301,7 +298,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() { bytes: exchMock.MessageWithTwoAttachments("Restore 2 Attachments"), category: path.EmailCategory, destination: func(t *testing.T, ctx context.Context) string { - folderName := "TestRestoreMailwithAttachments: " + common.FormatSimpleDateTime(now) + folderName := tester.DefaultTestRestoreDestination("mailtwoattch").ContainerName folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName) require.NoError(t, err, clues.ToCore(err)) @@ -313,7 +310,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() { bytes: exchMock.MessageWithOneDriveAttachment("Restore Reference(OneDrive) Attachment"), category: path.EmailCategory, destination: func(t *testing.T, ctx context.Context) string { - folderName := "TestRestoreMailwithReferenceAttachment: " + common.FormatSimpleDateTime(now) + folderName := tester.DefaultTestRestoreDestination("mailrefattch").ContainerName folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName) require.NoError(t, err, clues.ToCore(err)) @@ -326,7 +323,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() { bytes: exchMock.ContactBytes("Test_Omega"), category: path.ContactsCategory, destination: func(t *testing.T, ctx context.Context) string { - folderName := "TestRestoreContactObject: " + common.FormatSimpleDateTime(now) + folderName := tester.DefaultTestRestoreDestination("contact").ContainerName folder, err := suite.ac.Contacts().CreateContactFolder(ctx, userID, folderName) require.NoError(t, err, clues.ToCore(err)) @@ -338,8 +335,8 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() { bytes: exchMock.EventBytes("Restored Event Object"), category: path.EventsCategory, destination: func(t *testing.T, ctx context.Context) string { - calendarName := "TestRestoreEventObject: " + common.FormatSimpleDateTime(now) - calendar, err := suite.ac.Events().CreateCalendar(ctx, userID, calendarName) + folderName := tester.DefaultTestRestoreDestination("event").ContainerName + calendar, err := suite.ac.Events().CreateCalendar(ctx, userID, folderName) require.NoError(t, err, clues.ToCore(err)) return ptr.Val(calendar.GetId()) @@ -350,8 +347,8 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() { bytes: exchMock.EventWithAttachment("Restored Event Attachment"), category: path.EventsCategory, destination: func(t *testing.T, ctx context.Context) string { - calendarName := "TestRestoreEventObject_" + common.FormatSimpleDateTime(now) - calendar, err := suite.ac.Events().CreateCalendar(ctx, userID, calendarName) + folderName := tester.DefaultTestRestoreDestination("eventobj").ContainerName + calendar, err := suite.ac.Events().CreateCalendar(ctx, userID, folderName) require.NoError(t, err, clues.ToCore(err)) return ptr.Val(calendar.GetId()) diff --git a/src/internal/connector/exchange/service_functions.go b/src/internal/connector/exchange/service_functions.go index cad25cdd8..bde0f1ae4 100644 --- a/src/internal/connector/exchange/service_functions.go +++ b/src/internal/connector/exchange/service_functions.go @@ -6,13 +6,13 @@ import ( "github.com/alcionai/clues" "github.com/alcionai/corso/src/internal/common/ptr" - "github.com/alcionai/corso/src/internal/connector/exchange/api" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) var ErrFolderNotFound = clues.New("folder not found") @@ -137,21 +137,15 @@ func includeContainer( directory = locPath.Folder(false) } - var ( - ok bool - pathRes path.Path - ) + var ok bool switch category { case path.EmailCategory: ok = scope.Matches(selectors.ExchangeMailFolder, directory) - pathRes = locPath case path.ContactsCategory: ok = scope.Matches(selectors.ExchangeContactFolder, directory) - pathRes = locPath case path.EventsCategory: ok = scope.Matches(selectors.ExchangeEventCalendar, directory) - pathRes = dirPath default: return nil, nil, false } @@ -162,5 +156,5 @@ func includeContainer( "matches_input", directory, ).Debug("backup folder selection filter") - return pathRes, loc, ok + return dirPath, loc, ok } diff --git a/src/internal/connector/exchange/service_iterators.go b/src/internal/connector/exchange/service_iterators.go index dc2fa42ca..3d96cf4b5 100644 --- a/src/internal/connector/exchange/service_iterators.go +++ b/src/internal/connector/exchange/service_iterators.go @@ -7,7 +7,6 @@ import ( "github.com/alcionai/corso/src/internal/common/pii" "github.com/alcionai/corso/src/internal/common/ptr" - "github.com/alcionai/corso/src/internal/connector/exchange/api" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/data" @@ -16,6 +15,7 @@ import ( "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) type addedAndRemovedItemIDsGetter interface { @@ -23,6 +23,7 @@ type addedAndRemovedItemIDsGetter interface { ctx context.Context, user, containerID, oldDeltaToken string, immutableIDs bool, + canMakeDeltaQueries bool, ) ([]string, []string, api.DeltaUpdate, error) } @@ -31,19 +32,24 @@ type addedAndRemovedItemIDsGetter interface { // into a BackupCollection. Messages outside of those directories are omitted. // @param collection is filled with during this function. // Supports all exchange applications: Contacts, Events, and Mail +// +// TODO(ashmrtn): This should really return []data.BackupCollection but +// unfortunately some of our tests rely on being able to lookup returned +// collections by ID and it would be non-trivial to change them. func filterContainersAndFillCollections( ctx context.Context, qp graph.QueryParams, getter addedAndRemovedItemIDsGetter, - collections map[string]data.BackupCollection, statusUpdater support.StatusUpdater, resolver graph.ContainerResolver, scope selectors.ExchangeScope, dps DeltaPaths, ctrlOpts control.Options, errs *fault.Bus, -) error { +) (map[string]data.BackupCollection, error) { var ( + // folder ID -> BackupCollection. + collections = map[string]data.BackupCollection{} // folder ID -> delta url or folder path lookups deltaURLs = map[string]string{} currPaths = map[string]string{} @@ -60,19 +66,19 @@ func filterContainersAndFillCollections( // But this will work for the short term. ac, err := api.NewClient(qp.Credentials) if err != nil { - return err + return nil, err } ibt, err := itemerByType(ac, category) if err != nil { - return err + return nil, err } el := errs.Local() for _, c := range resolver.Items() { if el.Failure() != nil { - return el.Failure() + return nil, el.Failure() } cID := ptr.Val(c.GetId()) @@ -80,8 +86,8 @@ func filterContainersAndFillCollections( var ( dp = dps[cID] - prevDelta = dp.delta - prevPathStr = dp.path // do not log: pii; log prevPath instead + prevDelta = dp.Delta + prevPathStr = dp.Path // do not log: pii; log prevPath instead prevPath path.Path ictx = clues.Add( ctx, @@ -114,7 +120,8 @@ func filterContainersAndFillCollections( qp.ResourceOwner.ID(), cID, prevDelta, - ctrlOpts.ToggleFeatures.ExchangeImmutableIDs) + ctrlOpts.ToggleFeatures.ExchangeImmutableIDs, + !ctrlOpts.ToggleFeatures.DisableDelta) if err != nil { if !graph.IsErrDeletedInFlight(err) { el.AddRecoverable(clues.Stack(err).Label(fault.LabelForceNoBackupCreation)) @@ -171,7 +178,7 @@ func filterContainersAndFillCollections( // resolver (which contains all the resource owners' current containers). for id, p := range tombstones { if el.Failure() != nil { - return el.Failure() + return nil, el.Failure() } ictx := clues.Add(ctx, "tombstone_id", id) @@ -223,12 +230,12 @@ func filterContainersAndFillCollections( }, statusUpdater) if err != nil { - return clues.Wrap(err, "making metadata collection") + return nil, clues.Wrap(err, "making metadata collection") } collections["metadata"] = col - return el.Failure() + return collections, el.Failure() } // produces a set of id:path pairs from the deltapaths map. @@ -238,7 +245,7 @@ func makeTombstones(dps DeltaPaths) map[string]string { r := make(map[string]string, len(dps)) for id, v := range dps { - r[id] = v.path + r[id] = v.Path } return r diff --git a/src/internal/connector/exchange/service_iterators_test.go b/src/internal/connector/exchange/service_iterators_test.go index a752883d1..a3b35c7f4 100644 --- a/src/internal/connector/exchange/service_iterators_test.go +++ b/src/internal/connector/exchange/service_iterators_test.go @@ -9,8 +9,8 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + inMock "github.com/alcionai/corso/src/internal/common/idname/mock" "github.com/alcionai/corso/src/internal/common/ptr" - "github.com/alcionai/corso/src/internal/connector/exchange/api" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/data" @@ -20,6 +20,7 @@ import ( "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) // --------------------------------------------------------------------------- @@ -29,7 +30,10 @@ import ( var _ addedAndRemovedItemIDsGetter = &mockGetter{} type ( - mockGetter map[string]mockGetterResults + mockGetter struct { + noReturnDelta bool + results map[string]mockGetterResults + } mockGetterResults struct { added []string removed []string @@ -42,18 +46,24 @@ func (mg mockGetter) GetAddedAndRemovedItemIDs( ctx context.Context, userID, cID, prevDelta string, _ bool, + _ bool, ) ( []string, []string, api.DeltaUpdate, error, ) { - results, ok := mg[cID] + results, ok := mg.results[cID] if !ok { return nil, nil, api.DeltaUpdate{}, clues.New("mock not found for " + cID) } - return results.added, results.removed, results.newDelta, results.err + delta := results.newDelta + if mg.noReturnDelta { + delta.URL = "" + } + + return results.added, results.removed, delta, results.err } var _ graph.ContainerResolver = &mockResolver{} @@ -117,12 +127,10 @@ func (suite *ServiceIteratorsSuite) SetupSuite() { } func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections() { - ss := selectors.Selector{}.SetDiscreteOwnerIDName("user_id", "user_id") - var ( qp = graph.QueryParams{ Category: path.EmailCategory, // doesn't matter which one we use. - ResourceOwner: ss, + ResourceOwner: inMock.NewProvider("user_id", "user_name"), Credentials: suite.creds, } statusUpdater = func(*support.ConnectorOperationStatus) {} @@ -172,8 +180,10 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections() { }{ { name: "happy path, one container", - getter: map[string]mockGetterResults{ - "1": commonResult, + getter: mockGetter{ + results: map[string]mockGetterResults{ + "1": commonResult, + }, }, resolver: newMockResolver(container1), scope: allScope, @@ -183,21 +193,11 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections() { }, { name: "happy path, many containers", - getter: map[string]mockGetterResults{ - "1": commonResult, - "2": commonResult, - }, - resolver: newMockResolver(container1, container2), - scope: allScope, - expectErr: assert.NoError, - expectNewColls: 2, - expectMetadataColls: 1, - }, - { - name: "happy path, many containers, same display name", - getter: map[string]mockGetterResults{ - "1": commonResult, - "2": commonResult, + getter: mockGetter{ + results: map[string]mockGetterResults{ + "1": commonResult, + "2": commonResult, + }, }, resolver: newMockResolver(container1, container2), scope: allScope, @@ -207,9 +207,11 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections() { }, { name: "no containers pass scope", - getter: map[string]mockGetterResults{ - "1": commonResult, - "2": commonResult, + getter: mockGetter{ + results: map[string]mockGetterResults{ + "1": commonResult, + "2": commonResult, + }, }, resolver: newMockResolver(container1, container2), scope: selectors.NewExchangeBackup(nil).MailFolders(selectors.None())[0], @@ -219,8 +221,10 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections() { }, { name: "err: deleted in flight", - getter: map[string]mockGetterResults{ - "1": deletedInFlightResult, + getter: mockGetter{ + results: map[string]mockGetterResults{ + "1": deletedInFlightResult, + }, }, resolver: newMockResolver(container1), scope: allScope, @@ -231,8 +235,10 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections() { }, { name: "err: other error", - getter: map[string]mockGetterResults{ - "1": errorResult, + getter: mockGetter{ + results: map[string]mockGetterResults{ + "1": errorResult, + }, }, resolver: newMockResolver(container1), scope: allScope, @@ -242,9 +248,11 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections() { }, { name: "half collections error: deleted in flight", - getter: map[string]mockGetterResults{ - "1": deletedInFlightResult, - "2": commonResult, + getter: mockGetter{ + results: map[string]mockGetterResults{ + "1": deletedInFlightResult, + "2": commonResult, + }, }, resolver: newMockResolver(container1, container2), scope: allScope, @@ -255,9 +263,11 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections() { }, { name: "half collections error: other error", - getter: map[string]mockGetterResults{ - "1": errorResult, - "2": commonResult, + getter: mockGetter{ + results: map[string]mockGetterResults{ + "1": errorResult, + "2": commonResult, + }, }, resolver: newMockResolver(container1, container2), scope: allScope, @@ -267,9 +277,11 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections() { }, { name: "half collections error: deleted in flight, fail fast", - getter: map[string]mockGetterResults{ - "1": deletedInFlightResult, - "2": commonResult, + getter: mockGetter{ + results: map[string]mockGetterResults{ + "1": deletedInFlightResult, + "2": commonResult, + }, }, resolver: newMockResolver(container1, container2), scope: allScope, @@ -281,9 +293,11 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections() { }, { name: "half collections error: other error, fail fast", - getter: map[string]mockGetterResults{ - "1": errorResult, - "2": commonResult, + getter: mockGetter{ + results: map[string]mockGetterResults{ + "1": errorResult, + "2": commonResult, + }, }, resolver: newMockResolver(container1, container2), scope: allScope, @@ -294,78 +308,420 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections() { }, } for _, test := range table { - suite.Run(test.name, func() { - t := suite.T() + for _, canMakeDeltaQueries := range []bool{true, false} { + name := test.name - ctx, flush := tester.NewContext() - defer flush() - - collections := map[string]data.BackupCollection{} - - err := filterContainersAndFillCollections( - ctx, - qp, - test.getter, - collections, - statusUpdater, - test.resolver, - test.scope, - dps, - control.Options{FailureHandling: test.failFast}, - fault.New(test.failFast == control.FailFast)) - test.expectErr(t, err, clues.ToCore(err)) - - // collection assertions - - deleteds, news, metadatas, doNotMerges := 0, 0, 0, 0 - for _, c := range collections { - if c.FullPath().Service() == path.ExchangeMetadataService { - metadatas++ - continue - } - - if c.State() == data.DeletedState { - deleteds++ - } - - if c.State() == data.NewState { - news++ - } - - if c.DoNotMergeItems() { - doNotMerges++ - } + if canMakeDeltaQueries { + name += "-delta" + } else { + name += "-non-delta" } - assert.Zero(t, deleteds, "deleted collections") - assert.Equal(t, test.expectNewColls, news, "new collections") - assert.Equal(t, test.expectMetadataColls, metadatas, "metadata collections") - assert.Equal(t, test.expectDoNotMergeColls, doNotMerges, "doNotMerge collections") + suite.Run(name, func() { + t := suite.T() - // items in collections assertions - for k, expect := range test.getter { - coll := collections[k] + ctx, flush := tester.NewContext() + defer flush() - if coll == nil { - continue - } + ctrlOpts := control.Options{FailureHandling: test.failFast} + ctrlOpts.ToggleFeatures.DisableDelta = !canMakeDeltaQueries - exColl, ok := coll.(*Collection) - require.True(t, ok, "collection is an *exchange.Collection") + collections, err := filterContainersAndFillCollections( + ctx, + qp, + test.getter, + statusUpdater, + test.resolver, + test.scope, + dps, + ctrlOpts, + fault.New(test.failFast == control.FailFast)) + test.expectErr(t, err, clues.ToCore(err)) - ids := [][]string{ - make([]string, 0, len(exColl.added)), - make([]string, 0, len(exColl.removed)), - } + // collection assertions - for i, cIDs := range []map[string]struct{}{exColl.added, exColl.removed} { - for id := range cIDs { - ids[i] = append(ids[i], id) + deleteds, news, metadatas, doNotMerges := 0, 0, 0, 0 + for _, c := range collections { + if c.FullPath().Service() == path.ExchangeMetadataService { + metadatas++ + continue + } + + if c.State() == data.DeletedState { + deleteds++ + } + + if c.State() == data.NewState { + news++ + } + + if c.DoNotMergeItems() { + doNotMerges++ } } - assert.ElementsMatch(t, expect.added, ids[0], "added items") - assert.ElementsMatch(t, expect.removed, ids[1], "removed items") + assert.Zero(t, deleteds, "deleted collections") + assert.Equal(t, test.expectNewColls, news, "new collections") + assert.Equal(t, test.expectMetadataColls, metadatas, "metadata collections") + assert.Equal(t, test.expectDoNotMergeColls, doNotMerges, "doNotMerge collections") + + // items in collections assertions + for k, expect := range test.getter.results { + coll := collections[k] + + if coll == nil { + continue + } + + exColl, ok := coll.(*Collection) + require.True(t, ok, "collection is an *exchange.Collection") + + ids := [][]string{ + make([]string, 0, len(exColl.added)), + make([]string, 0, len(exColl.removed)), + } + + for i, cIDs := range []map[string]struct{}{exColl.added, exColl.removed} { + for id := range cIDs { + ids[i] = append(ids[i], id) + } + } + + assert.ElementsMatch(t, expect.added, ids[0], "added items") + assert.ElementsMatch(t, expect.removed, ids[1], "removed items") + } + }) + } + } +} + +func checkMetadata( + t *testing.T, + ctx context.Context, //revive:disable-line:context-as-argument + cat path.CategoryType, + expect DeltaPaths, + c data.BackupCollection, +) { + catPaths, err := parseMetadataCollections( + ctx, + []data.RestoreCollection{data.NotFoundRestoreCollection{Collection: c}}, + fault.New(true)) + if !assert.NoError(t, err, "getting metadata", clues.ToCore(err)) { + return + } + + assert.Equal(t, expect, catPaths[cat]) +} + +func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_DuplicateFolders() { + type scopeCat struct { + scope selectors.ExchangeScope + cat path.CategoryType + } + + var ( + qp = graph.QueryParams{ + ResourceOwner: inMock.NewProvider("user_id", "user_name"), + Credentials: suite.creds, + } + + statusUpdater = func(*support.ConnectorOperationStatus) {} + + dataTypes = []scopeCat{ + { + scope: selectors.NewExchangeBackup(nil).MailFolders(selectors.Any())[0], + cat: path.EmailCategory, + }, + { + scope: selectors.NewExchangeBackup(nil).ContactFolders(selectors.Any())[0], + cat: path.ContactsCategory, + }, + { + scope: selectors.NewExchangeBackup(nil).EventCalendars(selectors.Any())[0], + cat: path.EventsCategory, + }, + } + + location = path.Builder{}.Append("foo", "bar") + + result1 = mockGetterResults{ + added: []string{"a1", "a2", "a3"}, + removed: []string{"r1", "r2", "r3"}, + newDelta: api.DeltaUpdate{URL: "delta_url"}, + } + result2 = mockGetterResults{ + added: []string{"a4", "a5", "a6"}, + removed: []string{"r4", "r5", "r6"}, + newDelta: api.DeltaUpdate{URL: "delta_url2"}, + } + + container1 = mockContainer{ + id: strPtr("1"), + displayName: strPtr("bar"), + p: path.Builder{}.Append("1"), + l: location, + } + container2 = mockContainer{ + id: strPtr("2"), + displayName: strPtr("bar"), + p: path.Builder{}.Append("2"), + l: location, + } + ) + + oldPath1 := func(t *testing.T, cat path.CategoryType) path.Path { + res, err := location.Append("1").ToDataLayerPath( + suite.creds.AzureTenantID, + qp.ResourceOwner.ID(), + path.ExchangeService, + cat, + false) + require.NoError(t, err, clues.ToCore(err)) + + return res + } + + oldPath2 := func(t *testing.T, cat path.CategoryType) path.Path { + res, err := location.Append("2").ToDataLayerPath( + suite.creds.AzureTenantID, + qp.ResourceOwner.ID(), + path.ExchangeService, + cat, + false) + require.NoError(t, err, clues.ToCore(err)) + + return res + } + + idPath1 := func(t *testing.T, cat path.CategoryType) path.Path { + res, err := path.Builder{}.Append("1").ToDataLayerPath( + suite.creds.AzureTenantID, + qp.ResourceOwner.ID(), + path.ExchangeService, + cat, + false) + require.NoError(t, err, clues.ToCore(err)) + + return res + } + + idPath2 := func(t *testing.T, cat path.CategoryType) path.Path { + res, err := path.Builder{}.Append("2").ToDataLayerPath( + suite.creds.AzureTenantID, + qp.ResourceOwner.ID(), + path.ExchangeService, + cat, + false) + require.NoError(t, err, clues.ToCore(err)) + + return res + } + + table := []struct { + name string + getter mockGetter + resolver graph.ContainerResolver + inputMetadata func(t *testing.T, cat path.CategoryType) DeltaPaths + expectNewColls int + expectDeleted int + expectMetadata func(t *testing.T, cat path.CategoryType) DeltaPaths + }{ + { + name: "1 moved to duplicate", + getter: mockGetter{ + results: map[string]mockGetterResults{ + "1": result1, + "2": result2, + }, + }, + resolver: newMockResolver(container1, container2), + inputMetadata: func(t *testing.T, cat path.CategoryType) DeltaPaths { + return DeltaPaths{ + "1": DeltaPath{ + Delta: "old_delta", + Path: oldPath1(t, cat).String(), + }, + "2": DeltaPath{ + Delta: "old_delta", + Path: idPath2(t, cat).String(), + }, + } + }, + expectMetadata: func(t *testing.T, cat path.CategoryType) DeltaPaths { + return DeltaPaths{ + "1": DeltaPath{ + Delta: "delta_url", + Path: idPath1(t, cat).String(), + }, + "2": DeltaPath{ + Delta: "delta_url2", + Path: idPath2(t, cat).String(), + }, + } + }, + }, + { + name: "both move to duplicate", + getter: mockGetter{ + results: map[string]mockGetterResults{ + "1": result1, + "2": result2, + }, + }, + resolver: newMockResolver(container1, container2), + inputMetadata: func(t *testing.T, cat path.CategoryType) DeltaPaths { + return DeltaPaths{ + "1": DeltaPath{ + Delta: "old_delta", + Path: oldPath1(t, cat).String(), + }, + "2": DeltaPath{ + Delta: "old_delta", + Path: oldPath2(t, cat).String(), + }, + } + }, + expectMetadata: func(t *testing.T, cat path.CategoryType) DeltaPaths { + return DeltaPaths{ + "1": DeltaPath{ + Delta: "delta_url", + Path: idPath1(t, cat).String(), + }, + "2": DeltaPath{ + Delta: "delta_url2", + Path: idPath2(t, cat).String(), + }, + } + }, + }, + { + name: "both new", + getter: mockGetter{ + results: map[string]mockGetterResults{ + "1": result1, + "2": result2, + }, + }, + resolver: newMockResolver(container1, container2), + inputMetadata: func(t *testing.T, cat path.CategoryType) DeltaPaths { + return DeltaPaths{} + }, + expectNewColls: 2, + expectMetadata: func(t *testing.T, cat path.CategoryType) DeltaPaths { + return DeltaPaths{ + "1": DeltaPath{ + Delta: "delta_url", + Path: idPath1(t, cat).String(), + }, + "2": DeltaPath{ + Delta: "delta_url2", + Path: idPath2(t, cat).String(), + }, + } + }, + }, + { + name: "add 1 remove 2", + getter: mockGetter{ + results: map[string]mockGetterResults{ + "1": result1, + }, + }, + resolver: newMockResolver(container1), + inputMetadata: func(t *testing.T, cat path.CategoryType) DeltaPaths { + return DeltaPaths{ + "2": DeltaPath{ + Delta: "old_delta", + Path: idPath2(t, cat).String(), + }, + } + }, + expectNewColls: 1, + expectDeleted: 1, + expectMetadata: func(t *testing.T, cat path.CategoryType) DeltaPaths { + return DeltaPaths{ + "1": DeltaPath{ + Delta: "delta_url", + Path: idPath1(t, cat).String(), + }, + } + }, + }, + } + + for _, sc := range dataTypes { + suite.Run(sc.cat.String(), func() { + qp.Category = sc.cat + + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + ctx, flush := tester.NewContext() + defer flush() + + collections, err := filterContainersAndFillCollections( + ctx, + qp, + test.getter, + statusUpdater, + test.resolver, + sc.scope, + test.inputMetadata(t, qp.Category), + control.Options{FailureHandling: control.FailFast}, + fault.New(true)) + require.NoError(t, err, "getting collections", clues.ToCore(err)) + + // collection assertions + + deleteds, news, metadatas := 0, 0, 0 + for _, c := range collections { + if c.State() == data.DeletedState { + deleteds++ + continue + } + + if c.FullPath().Service() == path.ExchangeMetadataService { + metadatas++ + checkMetadata(t, ctx, qp.Category, test.expectMetadata(t, qp.Category), c) + continue + } + + if c.State() == data.NewState { + news++ + } + } + + assert.Equal(t, test.expectDeleted, deleteds, "deleted collections") + assert.Equal(t, test.expectNewColls, news, "new collections") + assert.Equal(t, 1, metadatas, "metadata collections") + + // items in collections assertions + for k, expect := range test.getter.results { + coll := collections[k] + + if coll == nil { + continue + } + + exColl, ok := coll.(*Collection) + require.True(t, ok, "collection is an *exchange.Collection") + + ids := [][]string{ + make([]string, 0, len(exColl.added)), + make([]string, 0, len(exColl.removed)), + } + + for i, cIDs := range []map[string]struct{}{exColl.added, exColl.removed} { + for id := range cIDs { + ids[i] = append(ids[i], id) + } + } + + assert.ElementsMatch(t, expect.added, ids[0], "added items") + assert.ElementsMatch(t, expect.removed, ids[1], "removed items") + } + }) } }) } @@ -382,10 +738,12 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_repea }{ { name: "repeated adds", - getter: map[string]mockGetterResults{ - "1": { - added: []string{"a1", "a2", "a3", "a1"}, - newDelta: newDelta, + getter: mockGetter{ + results: map[string]mockGetterResults{ + "1": { + added: []string{"a1", "a2", "a3", "a1"}, + newDelta: newDelta, + }, }, }, expectAdded: map[string]struct{}{ @@ -397,10 +755,12 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_repea }, { name: "repeated removes", - getter: map[string]mockGetterResults{ - "1": { - removed: []string{"r1", "r2", "r3", "r1"}, - newDelta: newDelta, + getter: mockGetter{ + results: map[string]mockGetterResults{ + "1": { + removed: []string{"r1", "r2", "r3", "r1"}, + newDelta: newDelta, + }, }, }, expectAdded: map[string]struct{}{}, @@ -412,11 +772,13 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_repea }, { name: "remove for same item wins", - getter: map[string]mockGetterResults{ - "1": { - added: []string{"i1", "a2", "a3"}, - removed: []string{"i1", "r2", "r3"}, - newDelta: newDelta, + getter: mockGetter{ + results: map[string]mockGetterResults{ + "1": { + added: []string{"i1", "a2", "a3"}, + removed: []string{"i1", "r2", "r3"}, + newDelta: newDelta, + }, }, }, expectAdded: map[string]struct{}{ @@ -437,12 +799,10 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_repea ctx, flush := tester.NewContext() defer flush() - ss := selectors.Selector{}.SetDiscreteOwnerIDName("user_id", "user_id") - var ( qp = graph.QueryParams{ Category: path.EmailCategory, // doesn't matter which one we use. - ResourceOwner: ss, + ResourceOwner: inMock.NewProvider("user_id", "user_name"), Credentials: suite.creds, } statusUpdater = func(*support.ConnectorOperationStatus) {} @@ -458,15 +818,12 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_repea ) require.Equal(t, "user_id", qp.ResourceOwner.ID(), qp.ResourceOwner) - require.Equal(t, "user_id", qp.ResourceOwner.Name(), qp.ResourceOwner) + require.Equal(t, "user_name", qp.ResourceOwner.Name(), qp.ResourceOwner) - collections := map[string]data.BackupCollection{} - - err := filterContainersAndFillCollections( + collections, err := filterContainersAndFillCollections( ctx, qp, test.getter, - collections, statusUpdater, resolver, allScope, @@ -503,7 +860,7 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_repea assert.Zero(t, doNotMerges, "doNotMerge collections") // items in collections assertions - for k := range test.getter { + for k := range test.getter.results { coll := collections[k] if !assert.NotNilf(t, coll, "missing collection for path %s", k) { continue @@ -519,16 +876,14 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_repea } } -func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incrementals() { - ss := selectors.Selector{}.SetDiscreteOwnerIDName("user_id", "user_id") - +func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incrementals_nondelta() { var ( userID = "user_id" tenantID = suite.creds.AzureTenantID cat = path.EmailCategory // doesn't matter which one we use, qp = graph.QueryParams{ Category: cat, - ResourceOwner: ss, + ResourceOwner: inMock.NewProvider("user_id", "user_name"), Credentials: suite.creds, } statusUpdater = func(*support.ConnectorOperationStatus) {} @@ -559,16 +914,19 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incre } table := []struct { - name string - getter mockGetter - resolver graph.ContainerResolver - dps DeltaPaths - expect map[string]endState + name string + getter mockGetter + resolver graph.ContainerResolver + dps DeltaPaths + expect map[string]endState + skipWhenForcedNoDelta bool }{ { name: "new container", - getter: map[string]mockGetterResults{ - "1": commonResults, + getter: mockGetter{ + results: map[string]mockGetterResults{ + "1": commonResults, + }, }, resolver: newMockResolver(mockContainer{ id: strPtr("1"), @@ -583,8 +941,10 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incre }, { name: "not moved container", - getter: map[string]mockGetterResults{ - "1": commonResults, + getter: mockGetter{ + results: map[string]mockGetterResults{ + "1": commonResults, + }, }, resolver: newMockResolver(mockContainer{ id: strPtr("1"), @@ -594,8 +954,8 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incre }), dps: DeltaPaths{ "1": DeltaPath{ - delta: "old_delta_url", - path: prevPath(suite.T(), "1", "not_moved").String(), + Delta: "old_delta_url", + Path: prevPath(suite.T(), "1", "not_moved").String(), }, }, expect: map[string]endState{ @@ -604,8 +964,10 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incre }, { name: "moved container", - getter: map[string]mockGetterResults{ - "1": commonResults, + getter: mockGetter{ + results: map[string]mockGetterResults{ + "1": commonResults, + }, }, resolver: newMockResolver(mockContainer{ id: strPtr("1"), @@ -615,8 +977,8 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incre }), dps: DeltaPaths{ "1": DeltaPath{ - delta: "old_delta_url", - path: prevPath(suite.T(), "1", "prev").String(), + Delta: "old_delta_url", + Path: prevPath(suite.T(), "1", "prev").String(), }, }, expect: map[string]endState{ @@ -624,13 +986,15 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incre }, }, { - name: "deleted container", - getter: map[string]mockGetterResults{}, + name: "deleted container", + getter: mockGetter{ + results: map[string]mockGetterResults{}, + }, resolver: newMockResolver(), dps: DeltaPaths{ "1": DeltaPath{ - delta: "old_delta_url", - path: prevPath(suite.T(), "1", "deleted").String(), + Delta: "old_delta_url", + Path: prevPath(suite.T(), "1", "deleted").String(), }, }, expect: map[string]endState{ @@ -639,8 +1003,10 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incre }, { name: "one deleted, one new", - getter: map[string]mockGetterResults{ - "2": commonResults, + getter: mockGetter{ + results: map[string]mockGetterResults{ + "2": commonResults, + }, }, resolver: newMockResolver(mockContainer{ id: strPtr("2"), @@ -650,8 +1016,8 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incre }), dps: DeltaPaths{ "1": DeltaPath{ - delta: "old_delta_url", - path: prevPath(suite.T(), "1", "deleted").String(), + Delta: "old_delta_url", + Path: prevPath(suite.T(), "1", "deleted").String(), }, }, expect: map[string]endState{ @@ -661,8 +1027,10 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incre }, { name: "one deleted, one new, same path", - getter: map[string]mockGetterResults{ - "2": commonResults, + getter: mockGetter{ + results: map[string]mockGetterResults{ + "2": commonResults, + }, }, resolver: newMockResolver(mockContainer{ id: strPtr("2"), @@ -672,8 +1040,8 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incre }), dps: DeltaPaths{ "1": DeltaPath{ - delta: "old_delta_url", - path: prevPath(suite.T(), "1", "same").String(), + Delta: "old_delta_url", + Path: prevPath(suite.T(), "1", "same").String(), }, }, expect: map[string]endState{ @@ -683,9 +1051,11 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incre }, { name: "one moved, one new, same path", - getter: map[string]mockGetterResults{ - "1": commonResults, - "2": commonResults, + getter: mockGetter{ + results: map[string]mockGetterResults{ + "1": commonResults, + "2": commonResults, + }, }, resolver: newMockResolver( mockContainer{ @@ -703,8 +1073,8 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incre ), dps: DeltaPaths{ "1": DeltaPath{ - delta: "old_delta_url", - path: prevPath(suite.T(), "1", "prev").String(), + Delta: "old_delta_url", + Path: prevPath(suite.T(), "1", "prev").String(), }, }, expect: map[string]endState{ @@ -714,8 +1084,10 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incre }, { name: "bad previous path strings", - getter: map[string]mockGetterResults{ - "1": commonResults, + getter: mockGetter{ + results: map[string]mockGetterResults{ + "1": commonResults, + }, }, resolver: newMockResolver(mockContainer{ id: strPtr("1"), @@ -725,12 +1097,12 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incre }), dps: DeltaPaths{ "1": DeltaPath{ - delta: "old_delta_url", - path: "1/fnords/mc/smarfs", + Delta: "old_delta_url", + Path: "1/fnords/mc/smarfs", }, "2": DeltaPath{ - delta: "old_delta_url", - path: "2/fnords/mc/smarfs", + Delta: "old_delta_url", + Path: "2/fnords/mc/smarfs", }, }, expect: map[string]endState{ @@ -739,8 +1111,10 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incre }, { name: "delta expiration", - getter: map[string]mockGetterResults{ - "1": expiredResults, + getter: mockGetter{ + results: map[string]mockGetterResults{ + "1": expiredResults, + }, }, resolver: newMockResolver(mockContainer{ id: strPtr("1"), @@ -750,22 +1124,25 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incre }), dps: DeltaPaths{ "1": DeltaPath{ - delta: "old_delta_url", - path: prevPath(suite.T(), "1", "same").String(), + Delta: "old_delta_url", + Path: prevPath(suite.T(), "1", "same").String(), }, }, expect: map[string]endState{ "1": {data.NotMovedState, true}, }, + skipWhenForcedNoDelta: true, // this is not a valid test for non-delta }, { name: "a little bit of everything", - getter: map[string]mockGetterResults{ - "1": commonResults, // new - "2": commonResults, // notMoved - "3": commonResults, // moved - "4": expiredResults, // moved - // "5" gets deleted + getter: mockGetter{ + results: map[string]mockGetterResults{ + "1": commonResults, // new + "2": commonResults, // notMoved + "3": commonResults, // moved + "4": expiredResults, // moved + // "5" gets deleted + }, }, resolver: newMockResolver( mockContainer{ @@ -795,20 +1172,20 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incre ), dps: DeltaPaths{ "2": DeltaPath{ - delta: "old_delta_url", - path: prevPath(suite.T(), "2", "not_moved").String(), + Delta: "old_delta_url", + Path: prevPath(suite.T(), "2", "not_moved").String(), }, "3": DeltaPath{ - delta: "old_delta_url", - path: prevPath(suite.T(), "3", "prev").String(), + Delta: "old_delta_url", + Path: prevPath(suite.T(), "3", "prev").String(), }, "4": DeltaPath{ - delta: "old_delta_url", - path: prevPath(suite.T(), "4", "prev").String(), + Delta: "old_delta_url", + Path: prevPath(suite.T(), "4", "prev").String(), }, "5": DeltaPath{ - delta: "old_delta_url", - path: prevPath(suite.T(), "5", "deleted").String(), + Delta: "old_delta_url", + Path: prevPath(suite.T(), "5", "deleted").String(), }, }, expect: map[string]endState{ @@ -818,54 +1195,83 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incre "4": {data.MovedState, true}, "5": {data.DeletedState, false}, }, + skipWhenForcedNoDelta: true, }, } for _, test := range table { - suite.Run(test.name, func() { - t := suite.T() + for _, deltaBefore := range []bool{true, false} { + for _, deltaAfter := range []bool{true, false} { + name := test.name - ctx, flush := tester.NewContext() - defer flush() - - collections := map[string]data.BackupCollection{} - - err := filterContainersAndFillCollections( - ctx, - qp, - test.getter, - collections, - statusUpdater, - test.resolver, - allScope, - test.dps, - control.Options{}, - fault.New(true)) - assert.NoError(t, err, clues.ToCore(err)) - - metadatas := 0 - for _, c := range collections { - p := c.FullPath() - if p == nil { - p = c.PreviousPath() + if deltaAfter { + name += "-delta" + } else { + if test.skipWhenForcedNoDelta { + suite.T().Skip("intentionally skipped non-delta case") + } + name += "-non-delta" } - require.NotNil(t, p) + suite.Run(name, func() { + t := suite.T() - if p.Service() == path.ExchangeMetadataService { - metadatas++ - continue - } + ctx, flush := tester.NewContext() + defer flush() - p0 := p.Folders()[0] + ctrlOpts := control.Defaults() + ctrlOpts.ToggleFeatures.DisableDelta = !deltaAfter - expect, ok := test.expect[p0] - assert.True(t, ok, "collection is expected in result") + getter := test.getter + if !deltaAfter { + getter.noReturnDelta = false + } - assert.Equalf(t, expect.state, c.State(), "collection %s state", p0) - assert.Equalf(t, expect.doNotMerge, c.DoNotMergeItems(), "collection %s DoNotMergeItems", p0) + dps := test.dps + if !deltaBefore { + for k, dp := range dps { + dp.Delta = "" + dps[k] = dp + } + } + + collections, err := filterContainersAndFillCollections( + ctx, + qp, + test.getter, + statusUpdater, + test.resolver, + allScope, + test.dps, + ctrlOpts, + fault.New(true)) + assert.NoError(t, err, clues.ToCore(err)) + + metadatas := 0 + for _, c := range collections { + p := c.FullPath() + if p == nil { + p = c.PreviousPath() + } + + require.NotNil(t, p) + + if p.Service() == path.ExchangeMetadataService { + metadatas++ + continue + } + + p0 := p.Folders()[0] + + expect, ok := test.expect[p0] + assert.True(t, ok, "collection is expected in result") + + assert.Equalf(t, expect.state, c.State(), "collection %s state", p0) + assert.Equalf(t, expect.doNotMerge, c.DoNotMergeItems(), "collection %s DoNotMergeItems", p0) + } + + assert.Equal(t, 1, metadatas, "metadata collections") + }) } - - assert.Equal(t, 1, metadatas, "metadata collections") - }) + } } } diff --git a/src/internal/connector/exchange/service_restore.go b/src/internal/connector/exchange/service_restore.go index c9178e947..4ddf369c2 100644 --- a/src/internal/connector/exchange/service_restore.go +++ b/src/internal/connector/exchange/service_restore.go @@ -4,15 +4,13 @@ import ( "bytes" "context" "fmt" - "reflect" "runtime/trace" "github.com/alcionai/clues" "github.com/microsoftgraph/msgraph-sdk-go/models" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/internal/common/ptr" - "github.com/alcionai/corso/src/internal/connector/exchange/api" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/data" @@ -24,6 +22,7 @@ import ( "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) // RestoreExchangeObject directs restore pipeline towards restore function @@ -74,7 +73,13 @@ func RestoreExchangeContact( ctx = clues.Add(ctx, "item_id", ptr.Val(contact.GetId())) - response, err := service.Client().UsersById(user).ContactFoldersById(destination).Contacts().Post(ctx, contact, nil) + response, err := service.Client(). + Users(). + ByUserId(user). + ContactFolders(). + ByContactFolderId(destination). + Contacts(). + Post(ctx, contact, nil) if err != nil { return nil, graph.Wrap(ctx, err, "uploading Contact") } @@ -122,7 +127,13 @@ func RestoreExchangeEvent( transformedEvent.SetAttachments([]models.Attachmentable{}) } - response, err := service.Client().UsersById(user).CalendarsById(destination).Events().Post(ctx, transformedEvent, nil) + response, err := service.Client(). + Users(). + ByUserId(user). + Calendars(). + ByCalendarId(destination). + Events(). + Post(ctx, transformedEvent, nil) if err != nil { return nil, graph.Wrap(ctx, err, "uploading event") } @@ -194,7 +205,7 @@ func RestoreMailMessage( if clone.GetSentDateTime() != nil { sv2 := models.NewSingleValueLegacyExtendedProperty() - sendPropertyValue := common.FormatLegacyTime(ptr.Val(clone.GetSentDateTime())) + sendPropertyValue := dttm.FormatToLegacy(ptr.Val(clone.GetSentDateTime())) sendPropertyTag := MailSendDateTimeOverrideProperty sv2.SetId(&sendPropertyTag) sv2.SetValue(&sendPropertyValue) @@ -204,7 +215,7 @@ func RestoreMailMessage( if clone.GetReceivedDateTime() != nil { sv3 := models.NewSingleValueLegacyExtendedProperty() - recvPropertyValue := common.FormatLegacyTime(ptr.Val(clone.GetReceivedDateTime())) + recvPropertyValue := dttm.FormatToLegacy(ptr.Val(clone.GetReceivedDateTime())) recvPropertyTag := MailReceiveDateTimeOverriveProperty sv3.SetId(&recvPropertyTag) sv3.SetValue(&recvPropertyValue) @@ -218,16 +229,24 @@ func RestoreMailMessage( return nil, err } - info := api.MailInfo(clone) - info.Size = int64(len(bits)) + info := api.MailInfo(clone, int64(len(bits))) return info, nil } -// attachmentBytes is a helper to retrieve the attachment content from a models.Attachmentable -// TODO: Revisit how we retrieve/persist attachment content during backup so this is not needed -func attachmentBytes(attachment models.Attachmentable) []byte { - return reflect.Indirect(reflect.ValueOf(attachment)).FieldByName("contentBytes").Bytes() +// GetAttachmentBytes is a helper to retrieve the attachment content from a models.Attachmentable +func GetAttachmentBytes(attachment models.Attachmentable) ([]byte, error) { + bi, err := attachment.GetBackingStore().Get("contentBytes") + if err != nil { + return nil, err + } + + bts, ok := bi.([]byte) + if !ok { + return nil, clues.New(fmt.Sprintf("unexpected type for attachment content: %T", bi)) + } + + return bts, nil } // SendMailToBackStore function for transporting in-memory messageable item to M365 backstore @@ -246,7 +265,13 @@ func SendMailToBackStore( // Item.Attachments --> HasAttachments doesn't always have a value populated when deserialized message.SetAttachments([]models.Attachmentable{}) - response, err := service.Client().UsersById(user).MailFoldersById(destination).Messages().Post(ctx, message, nil) + response, err := service.Client(). + Users(). + ByUserId(user). + MailFolders(). + ByMailFolderId(destination). + Messages(). + Post(ctx, message, nil) if err != nil { return graph.Wrap(ctx, err, "restoring mail") } @@ -436,16 +461,13 @@ func restoreCollection( metrics.Bytes += int64(len(byteArray)) metrics.Successes++ - itemPath, err := dc.FullPath().Append(itemData.UUID(), true) + itemPath, err := dc.FullPath().AppendItem(itemData.UUID()) if err != nil { errs.AddRecoverable(clues.Wrap(err, "building full path with item").WithClues(ctx)) continue } - locationRef := &path.Builder{} - if category == path.ContactsCategory { - locationRef = locationRef.Append(itemPath.Folders()...) - } + locationRef := path.Builder{}.Append(itemPath.Folders()...) err = deets.Add( itemPath, @@ -689,10 +711,20 @@ func establishEventsRestoreLocation( ctx = clues.Add(ctx, "is_new_cache", isNewCache) temp, err := ac.Events().CreateCalendar(ctx, user, folders[0]) - if err != nil { + if err != nil && !graph.IsErrFolderExists(err) { return "", err } + // 409 handling: Fetch folder if it exists and add to cache. + // This is rare, but may happen if CreateCalendar() POST fails with 5xx, + // potentially leaving dirty state in graph. + if graph.IsErrFolderExists(err) { + temp, err = ac.Events().GetContainerByName(ctx, user, folders[0]) + if err != nil { + return "", err + } + } + folderID := ptr.Val(temp.GetId()) if isNewCache { diff --git a/src/internal/connector/graph/api/api.go b/src/internal/connector/graph/api/api.go index 0870f9ea0..3db26bf85 100644 --- a/src/internal/connector/graph/api/api.go +++ b/src/internal/connector/graph/api/api.go @@ -27,3 +27,25 @@ func NextLink(pl PageLinker) string { func NextAndDeltaLink(pl DeltaPageLinker) (string, string) { return NextLink(pl), ptr.Val(pl.GetOdataDeltaLink()) } + +type Valuer[T any] interface { + GetValue() []T +} + +type PageLinkValuer[T any] interface { + PageLinker + Valuer[T] +} + +// EmptyDeltaLinker is used to convert PageLinker to DeltaPageLinker +type EmptyDeltaLinker[T any] struct { + PageLinkValuer[T] +} + +func (EmptyDeltaLinker[T]) GetOdataDeltaLink() *string { + return ptr.To("") +} + +func (e EmptyDeltaLinker[T]) GetValue() []T { + return e.PageLinkValuer.GetValue() +} diff --git a/src/internal/connector/graph/collections.go b/src/internal/connector/graph/collections.go index b57ca5b38..ee941f81c 100644 --- a/src/internal/connector/graph/collections.go +++ b/src/internal/connector/graph/collections.go @@ -11,14 +11,19 @@ import ( "github.com/alcionai/corso/src/pkg/path" ) -var _ data.BackupCollection = emptyCollection{} +var _ data.BackupCollection = prefixCollection{} -type emptyCollection struct { - p path.Path - su support.StatusUpdater +// TODO: move this out of graph. /data would be a much better owner +// for a generic struct like this. However, support.StatusUpdater makes +// it difficult to extract from this package in a generic way. +type prefixCollection struct { + full path.Path + prev path.Path + su support.StatusUpdater + state data.CollectionState } -func (c emptyCollection) Items(ctx context.Context, _ *fault.Bus) <-chan data.Stream { +func (c prefixCollection) Items(ctx context.Context, _ *fault.Bus) <-chan data.Stream { res := make(chan data.Stream) close(res) @@ -28,26 +33,29 @@ func (c emptyCollection) Items(ctx context.Context, _ *fault.Bus) <-chan data.St return res } -func (c emptyCollection) FullPath() path.Path { - return c.p +func (c prefixCollection) FullPath() path.Path { + return c.full } -func (c emptyCollection) PreviousPath() path.Path { - return c.p +func (c prefixCollection) PreviousPath() path.Path { + return c.prev } -func (c emptyCollection) State() data.CollectionState { - // This assumes we won't change the prefix path. Could probably use MovedState - // as well if we do need to change things around. - return data.NotMovedState +func (c prefixCollection) State() data.CollectionState { + return c.state } -func (c emptyCollection) DoNotMergeItems() bool { +func (c prefixCollection) DoNotMergeItems() bool { return false } +// --------------------------------------------------------------------------- +// base collections +// --------------------------------------------------------------------------- + func BaseCollections( ctx context.Context, + colls []data.BackupCollection, tenant, rOwner string, service path.ServiceType, categories map[path.CategoryType]struct{}, @@ -55,15 +63,23 @@ func BaseCollections( errs *fault.Bus, ) ([]data.BackupCollection, error) { var ( - res = []data.BackupCollection{} - el = errs.Local() - lastErr error + res = []data.BackupCollection{} + el = errs.Local() + lastErr error + collKeys = map[string]struct{}{} ) + // won't catch deleted collections, since they have no FullPath + for _, c := range colls { + if c.FullPath() != nil { + collKeys[c.FullPath().String()] = struct{}{} + } + } + for cat := range categories { ictx := clues.Add(ctx, "base_service", service, "base_category", cat) - p, err := path.Build(tenant, rOwner, service, cat, false, "tmp") + full, err := path.ServicePrefix(tenant, rOwner, service, cat) if err != nil { // Shouldn't happen. err = clues.Wrap(err, "making path").WithClues(ictx) @@ -73,19 +89,63 @@ func BaseCollections( continue } - // Pop off the last path element because we just want the prefix. - p, err = p.Dir() - if err != nil { - // Shouldn't happen. - err = clues.Wrap(err, "getting base prefix").WithClues(ictx) - el.AddRecoverable(err) - lastErr = err - - continue + // only add this collection if it doesn't already exist in the set. + if _, ok := collKeys[full.String()]; !ok { + res = append(res, &prefixCollection{ + prev: full, + full: full, + su: su, + state: data.StateOf(full, full), + }) } - - res = append(res, emptyCollection{p: p, su: su}) } return res, lastErr } + +// --------------------------------------------------------------------------- +// prefix migration +// --------------------------------------------------------------------------- + +// Creates a new collection that only handles prefix pathing. +func NewPrefixCollection( + prev, full path.Path, + su support.StatusUpdater, +) (*prefixCollection, error) { + if prev != nil { + if len(prev.Item()) > 0 { + return nil, clues.New("prefix collection previous path contains an item") + } + + if len(prev.Folders()) > 0 { + return nil, clues.New("prefix collection previous path contains folders") + } + } + + if full != nil { + if len(full.Item()) > 0 { + return nil, clues.New("prefix collection full path contains an item") + } + + if len(full.Folders()) > 0 { + return nil, clues.New("prefix collection full path contains folders") + } + } + + pc := &prefixCollection{ + prev: prev, + full: full, + su: su, + state: data.StateOf(prev, full), + } + + if pc.state == data.DeletedState { + return nil, clues.New("collection attempted to delete prefix") + } + + if pc.state == data.NewState { + return nil, clues.New("collection attempted to create a new prefix") + } + + return pc, nil +} diff --git a/src/internal/connector/graph/collections_test.go b/src/internal/connector/graph/collections_test.go new file mode 100644 index 000000000..a01064bae --- /dev/null +++ b/src/internal/connector/graph/collections_test.go @@ -0,0 +1,100 @@ +package graph + +import ( + "testing" + + "github.com/alcionai/clues" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/pkg/path" +) + +type CollectionsUnitSuite struct { + tester.Suite +} + +func TestCollectionsUnitSuite(t *testing.T) { + suite.Run(t, &CollectionsUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *CollectionsUnitSuite) TestNewPrefixCollection() { + t := suite.T() + serv := path.OneDriveService + cat := path.FilesCategory + + p1, err := path.ServicePrefix("t", "ro1", serv, cat) + require.NoError(t, err, clues.ToCore(err)) + + p2, err := path.ServicePrefix("t", "ro2", serv, cat) + require.NoError(t, err, clues.ToCore(err)) + + items, err := path.Build("t", "ro", serv, cat, true, "fld", "itm") + require.NoError(t, err, clues.ToCore(err)) + + folders, err := path.Build("t", "ro", serv, cat, false, "fld") + require.NoError(t, err, clues.ToCore(err)) + + table := []struct { + name string + prev path.Path + full path.Path + expectErr require.ErrorAssertionFunc + }{ + { + name: "not moved", + prev: p1, + full: p1, + expectErr: require.NoError, + }, + { + name: "moved", + prev: p1, + full: p2, + expectErr: require.NoError, + }, + { + name: "deleted", + prev: p1, + full: nil, + expectErr: require.Error, + }, + { + name: "new", + prev: nil, + full: p2, + expectErr: require.Error, + }, + { + name: "prev has items", + prev: items, + full: p1, + expectErr: require.Error, + }, + { + name: "prev has folders", + prev: folders, + full: p1, + expectErr: require.Error, + }, + { + name: "full has items", + prev: p1, + full: items, + expectErr: require.Error, + }, + { + name: "full has folders", + prev: p1, + full: folders, + expectErr: require.Error, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + _, err := NewPrefixCollection(test.prev, test.full, nil) + test.expectErr(suite.T(), err, clues.ToCore(err)) + }) + } +} diff --git a/src/internal/connector/graph/concurrency_middleware.go b/src/internal/connector/graph/concurrency_middleware.go new file mode 100644 index 000000000..2756a60c6 --- /dev/null +++ b/src/internal/connector/graph/concurrency_middleware.go @@ -0,0 +1,202 @@ +package graph + +import ( + "context" + "net/http" + "sync" + + "github.com/alcionai/clues" + khttp "github.com/microsoft/kiota-http-go" + "golang.org/x/time/rate" + + "github.com/alcionai/corso/src/pkg/logger" + "github.com/alcionai/corso/src/pkg/path" +) + +// --------------------------------------------------------------------------- +// Concurrency Limiter +// "how many calls at one time" +// --------------------------------------------------------------------------- + +// concurrencyLimiter middleware limits the number of concurrent requests to graph API +type concurrencyLimiter struct { + semaphore chan struct{} +} + +var ( + once sync.Once + concurrencyLim *concurrencyLimiter + maxConcurrentRequests = 4 +) + +func generateConcurrencyLimiter(capacity int) *concurrencyLimiter { + if capacity < 1 || capacity > maxConcurrentRequests { + capacity = maxConcurrentRequests + } + + return &concurrencyLimiter{ + semaphore: make(chan struct{}, capacity), + } +} + +func InitializeConcurrencyLimiter(capacity int) { + once.Do(func() { + concurrencyLim = generateConcurrencyLimiter(capacity) + }) +} + +func (cl *concurrencyLimiter) Intercept( + pipeline khttp.Pipeline, + middlewareIndex int, + req *http.Request, +) (*http.Response, error) { + if cl == nil || cl.semaphore == nil { + return nil, clues.New("nil concurrency limiter") + } + + cl.semaphore <- struct{}{} + defer func() { + <-cl.semaphore + }() + + return pipeline.Next(req, middlewareIndex) +} + +//nolint:lll +// --------------------------------------------------------------------------- +// Rate Limiter +// "how many calls in a minute" +// https://learn.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online +// --------------------------------------------------------------------------- + +const ( + // Default goal is to keep calls below the 10k-per-10-minute threshold. + // 14 tokens every second nets 840 per minute. That's 8400 every 10 minutes, + // which is a bit below the mark. + // But suppose we have a minute-long dry spell followed by a 10 minute tsunami. + // We'll have built up 750 tokens in reserve, so the first 750 calls go through + // immediately. Over the next 10 minutes, we'll partition out the other calls + // at a rate of 840-per-minute, ending at a total of 9150. Theoretically, if + // the volume keeps up after that, we'll always stay between 8400 and 9150 out + // of 10k. Worst case scenario, we have an extra minute of padding to allow + // up to 9990. + defaultPerSecond = 14 // 14 * 60 = 840 + defaultMaxCap = 750 // real cap is 10k-per-10-minutes + // since drive runs on a per-minute, rather than per-10-minute bucket, we have + // to keep the max cap equal to the per-second cap. A large maxCap pool (say, + // 1200, similar to the per-minute cap) would allow us to make a flood of 2400 + // calls in the first minute, putting us over the per-minute limit. Keeping + // the cap at the per-second burst means we only dole out a max of 1240 in one + // minute (20 cap + 1200 per minute + one burst of padding). + drivePerSecond = 20 // 20 * 60 = 1200 + driveMaxCap = 20 // real cap is 1250-per-minute +) + +var ( + driveLimiter = rate.NewLimiter(drivePerSecond, driveMaxCap) + // also used as the exchange service limiter + defaultLimiter = rate.NewLimiter(defaultPerSecond, defaultMaxCap) +) + +type LimiterCfg struct { + Service path.ServiceType +} + +type limiterCfgKey string + +const limiterCfgCtxKey limiterCfgKey = "corsoGaphRateLimiterCfg" + +func BindRateLimiterConfig(ctx context.Context, lc LimiterCfg) context.Context { + return context.WithValue(ctx, limiterCfgCtxKey, lc) +} + +func ctxLimiter(ctx context.Context) *rate.Limiter { + lc, ok := extractRateLimiterConfig(ctx) + if !ok { + return defaultLimiter + } + + switch lc.Service { + case path.OneDriveService, path.SharePointService: + return driveLimiter + default: + return defaultLimiter + } +} + +func extractRateLimiterConfig(ctx context.Context) (LimiterCfg, bool) { + l := ctx.Value(limiterCfgCtxKey) + if l == nil { + return LimiterCfg{}, false + } + + lc, ok := l.(LimiterCfg) + + return lc, ok +} + +type limiterConsumptionKey string + +const limiterConsumptionCtxKey limiterConsumptionKey = "corsoGraphRateLimiterConsumption" + +const ( + defaultLC = 1 + driveDefaultLC = 2 + // limit consumption rate for single-item GETs requests, + // or delta-based multi-item GETs. + SingleGetOrDeltaLC = 1 + // limit consumption rate for anything permissions related + PermissionsLC = 5 +) + +// ConsumeNTokens ensures any calls using this context will consume +// n rate-limiter tokens. Default is 1, and this value does not need +// to be established in the context to consume the default tokens. +// This should only get used on a per-call basis, to avoid cross-pollination. +func ConsumeNTokens(ctx context.Context, n int) context.Context { + return context.WithValue(ctx, limiterConsumptionCtxKey, n) +} + +func ctxLimiterConsumption(ctx context.Context, defaultConsumption int) int { + l := ctx.Value(limiterConsumptionCtxKey) + if l == nil { + return defaultConsumption + } + + lc, ok := l.(int) + if !ok || lc < 1 { + return defaultConsumption + } + + return lc +} + +// QueueRequest will allow the request to occur immediately if we're under the +// calls-per-minute rate. Otherwise, the call will wait in a queue until +// the next token set is available. +func QueueRequest(ctx context.Context) { + limiter := ctxLimiter(ctx) + defaultConsumed := defaultLC + + if limiter == driveLimiter { + defaultConsumed = driveDefaultLC + } + + consume := ctxLimiterConsumption(ctx, defaultConsumed) + + if err := limiter.WaitN(ctx, consume); err != nil { + logger.CtxErr(ctx, err).Error("graph middleware waiting on the limiter") + } +} + +// RateLimiterMiddleware is used to ensure we don't overstep per-min request limits. +type RateLimiterMiddleware struct{} + +func (mw *RateLimiterMiddleware) Intercept( + pipeline khttp.Pipeline, + middlewareIndex int, + req *http.Request, +) (*http.Response, error) { + QueueRequest(req.Context()) + return pipeline.Next(req, middlewareIndex) +} diff --git a/src/internal/connector/graph/concurrency_middleware_test.go b/src/internal/connector/graph/concurrency_middleware_test.go new file mode 100644 index 000000000..c5734a665 --- /dev/null +++ b/src/internal/connector/graph/concurrency_middleware_test.go @@ -0,0 +1,120 @@ +package graph + +import ( + "math/rand" + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" + + khttp "github.com/microsoft/kiota-http-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/tester" +) + +type ConcurrencyLimiterUnitTestSuite struct { + tester.Suite +} + +func TestConcurrencyLimiterSuite(t *testing.T) { + suite.Run(t, &ConcurrencyLimiterUnitTestSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *ConcurrencyLimiterUnitTestSuite) TestConcurrencyLimiter() { + t := suite.T() + + maxConcurrentRequests := 4 + cl := generateConcurrencyLimiter(maxConcurrentRequests) + client := khttp.GetDefaultClient(cl) + + // Server side handler to simulate 429s + sem := make(chan struct{}, maxConcurrentRequests) + reqHandler := func(w http.ResponseWriter, r *http.Request) { + select { + case sem <- struct{}{}: + defer func() { + <-sem + }() + + time.Sleep(time.Duration(rand.Intn(50)+50) * time.Millisecond) + w.WriteHeader(http.StatusOK) + + return + default: + w.WriteHeader(http.StatusTooManyRequests) + return + } + } + + ts := httptest.NewServer(http.HandlerFunc(reqHandler)) + defer ts.Close() + + var wg sync.WaitGroup + for i := 0; i < 20; i++ { + wg.Add(1) + + go func() { + defer wg.Done() + + resp, err := client.Get(ts.URL) + require.NoError(t, err) + + defer resp.Body.Close() + + assert.Equal(t, http.StatusOK, resp.StatusCode) + }() + } + wg.Wait() +} + +func (suite *ConcurrencyLimiterUnitTestSuite) TestInitializeConcurrencyLimiter() { + t := suite.T() + + InitializeConcurrencyLimiter(2) + InitializeConcurrencyLimiter(4) + + assert.Equal(t, cap(concurrencyLim.semaphore), 2, "singleton semaphore capacity changed") +} + +func (suite *ConcurrencyLimiterUnitTestSuite) TestGenerateConcurrencyLimiter() { + tests := []struct { + name string + cap int + expectedCap int + }{ + { + name: "valid capacity", + cap: 2, + expectedCap: 2, + }, + { + name: "zero capacity", + cap: 0, + expectedCap: maxConcurrentRequests, + }, + { + name: "negative capacity", + cap: -1, + expectedCap: maxConcurrentRequests, + }, + { + name: "out of bounds capacity", + cap: 10, + expectedCap: maxConcurrentRequests, + }, + } + + for _, test := range tests { + suite.Run(test.name, func() { + t := suite.T() + + actual := generateConcurrencyLimiter(test.cap) + assert.Equal(t, cap(actual.semaphore), test.expectedCap, + "retrieved semaphore capacity vs expected capacity") + }) + } +} diff --git a/src/internal/connector/graph/consts.go b/src/internal/connector/graph/consts.go index 14dac934f..32a549e8c 100644 --- a/src/internal/connector/graph/consts.go +++ b/src/internal/connector/graph/consts.go @@ -1,5 +1,12 @@ package graph +import ( + "context" + + "github.com/alcionai/corso/src/pkg/logger" + "github.com/alcionai/corso/src/pkg/path" +) + // --------------------------------------------------------------------------- // item response AdditionalData // --------------------------------------------------------------------------- @@ -25,3 +32,84 @@ const ( // given endpoint. PreviousPathFileName = "previouspath" ) + +// --------------------------------------------------------------------------- +// Runtime Configuration +// --------------------------------------------------------------------------- + +type parallelism struct { + // sets the collection buffer size before blocking. + collectionBuffer int + // sets the parallelism of item population within a collection. + item int +} + +func (p parallelism) CollectionBufferSize() int { + if p.collectionBuffer == 0 { + return 1 + } + + return p.collectionBuffer +} + +func (p parallelism) CollectionBufferOverride(ctx context.Context, override int) int { + logger.Ctx(ctx).Infow( + "collection buffer parallelism", + "default_parallelism", p.collectionBuffer, + "requested_paralellism", override) + + if !isWithin(1, p.collectionBuffer, override) { + return p.collectionBuffer + } + + return override +} + +func (p parallelism) ItemOverride(ctx context.Context, override int) int { + logger.Ctx(ctx).Infow( + "item-level parallelism", + "default_parallelism", p.item, + "requested_paralellism", override) + + if !isWithin(1, p.item, override) { + return p.item + } + + return override +} + +func (p parallelism) Item() int { + if p.item == 0 { + return 1 + } + + return p.item +} + +// returns low <= v <= high +// if high < low, returns low <= v +func isWithin(low, high, v int) bool { + return v >= low && (high < low || v <= high) +} + +var sp = map[path.ServiceType]parallelism{ + path.ExchangeService: { + collectionBuffer: 4, + item: 4, + }, + path.OneDriveService: { + collectionBuffer: 5, + item: 4, + }, + // sharepoint libraries are considered "onedrive" parallelism. + // this only controls lists/pages. + path.SharePointService: { + collectionBuffer: 5, + item: 4, + }, +} + +// Parallelism returns the Parallelism for the requested service. +func Parallelism(srv path.ServiceType) parallelism { + return sp[srv] +} diff --git a/src/internal/connector/graph/consts_test.go b/src/internal/connector/graph/consts_test.go new file mode 100644 index 000000000..84f8b694e --- /dev/null +++ b/src/internal/connector/graph/consts_test.go @@ -0,0 +1,40 @@ +package graph + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/tester" +) + +type ConstsUnitSuite struct { + tester.Suite +} + +func TestConstsUnitSuite(t *testing.T) { + suite.Run(t, &ConstsUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *ConstsUnitSuite) TestIsWithin() { + table := []struct { + name string + low, high, v int + expect assert.BoolAssertionFunc + }{ + {"1 < 3 < 5", 1, 5, 3, assert.True}, + {"1 < 3, no high", 1, 0, 3, assert.True}, + {"1 <= 1 <= 1", 1, 1, 1, assert.True}, + {"1 <= 1 <= 5", 1, 5, 1, assert.True}, + {"1 <= 5 <= 5", 1, 5, 5, assert.True}, + {"1 <= 0 <= 2", 1, 1, 0, assert.False}, + {"1 <= 3 <= 2", 1, 1, 3, assert.False}, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + test.expect(t, isWithin(test.low, test.high, test.v)) + }) + } +} diff --git a/src/internal/connector/graph/errors.go b/src/internal/connector/graph/errors.go index f3f47da4b..81886965e 100644 --- a/src/internal/connector/graph/errors.go +++ b/src/internal/connector/graph/errors.go @@ -14,10 +14,10 @@ import ( "github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors" "github.com/pkg/errors" - "golang.org/x/exp/slices" "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/filters" "github.com/alcionai/corso/src/pkg/logger" ) @@ -25,19 +25,33 @@ import ( // Error Interpretation Helpers // --------------------------------------------------------------------------- +type errorCode string + const ( - errCodeActivityLimitReached = "activityLimitReached" - errCodeItemNotFound = "ErrorItemNotFound" - errCodeItemNotFoundShort = "itemNotFound" - errCodeEmailFolderNotFound = "ErrorSyncFolderNotFound" - errCodeResyncRequired = "ResyncRequired" // alt: resyncRequired - errCodeMalwareDetected = "malwareDetected" - errCodeSyncFolderNotFound = "ErrorSyncFolderNotFound" - errCodeSyncStateNotFound = "SyncStateNotFound" - errCodeSyncStateInvalid = "SyncStateInvalid" - errCodeResourceNotFound = "ResourceNotFound" - errCodeRequestResourceNotFound = "Request_ResourceNotFound" - errCodeMailboxNotEnabledForRESTAPI = "MailboxNotEnabledForRESTAPI" + activityLimitReached errorCode = "activityLimitReached" + emailFolderNotFound errorCode = "ErrorSyncFolderNotFound" + errorAccessDenied errorCode = "ErrorAccessDenied" + itemNotFound errorCode = "ErrorItemNotFound" + itemNotFoundShort errorCode = "itemNotFound" + mailboxNotEnabledForRESTAPI errorCode = "MailboxNotEnabledForRESTAPI" + malwareDetected errorCode = "malwareDetected" + requestResourceNotFound errorCode = "Request_ResourceNotFound" + quotaExceeded errorCode = "ErrorQuotaExceeded" + resourceNotFound errorCode = "ResourceNotFound" + resyncRequired errorCode = "ResyncRequired" // alt: resyncRequired + syncFolderNotFound errorCode = "ErrorSyncFolderNotFound" + syncStateInvalid errorCode = "SyncStateInvalid" + syncStateNotFound errorCode = "SyncStateNotFound" + // This error occurs when an attempt is made to create a folder that has + // the same name as another folder in the same parent. Such duplicate folder + // names are not allowed by graph. + folderExists errorCode = "ErrorFolderExists" +) + +type errorMessage string + +const ( + IOErrDuringRead errorMessage = "IO error during request payload read" ) const ( @@ -83,9 +97,9 @@ func IsErrDeletedInFlight(err error) bool { if hasErrorCode( err, - errCodeItemNotFound, - errCodeItemNotFoundShort, - errCodeSyncFolderNotFound, + itemNotFound, + itemNotFoundShort, + syncFolderNotFound, ) { return true } @@ -94,16 +108,28 @@ func IsErrDeletedInFlight(err error) bool { } func IsErrInvalidDelta(err error) bool { - return hasErrorCode(err, errCodeSyncStateNotFound, errCodeResyncRequired, errCodeSyncStateInvalid) || + return hasErrorCode(err, syncStateNotFound, resyncRequired, syncStateInvalid) || errors.Is(err, ErrInvalidDelta) } +func IsErrQuotaExceeded(err error) bool { + return hasErrorCode(err, quotaExceeded) +} + func IsErrExchangeMailFolderNotFound(err error) bool { - return hasErrorCode(err, errCodeResourceNotFound, errCodeMailboxNotEnabledForRESTAPI) + return hasErrorCode(err, resourceNotFound, mailboxNotEnabledForRESTAPI) } func IsErrUserNotFound(err error) bool { - return hasErrorCode(err, errCodeRequestResourceNotFound) + return hasErrorCode(err, requestResourceNotFound) +} + +func IsErrResourceNotFound(err error) bool { + return hasErrorCode(err, resourceNotFound) +} + +func IsErrAccessDenied(err error) bool { + return hasErrorCode(err, errorAccessDenied) || clues.HasLabel(err, LabelStatus(http.StatusForbidden)) } func IsErrTimeout(err error) bool { @@ -138,7 +164,7 @@ func LabelStatus(statusCode int) string { // IsMalware is true if the graphAPI returns a "malware detected" error code. func IsMalware(err error) bool { - return hasErrorCode(err, errCodeMalwareDetected) + return hasErrorCode(err, malwareDetected) } func IsMalwareResp(ctx context.Context, resp *http.Response) bool { @@ -154,18 +180,22 @@ func IsMalwareResp(ctx context.Context, resp *http.Response) bool { return false } - if strings.Contains(string(respDump), errCodeMalwareDetected) { + if strings.Contains(string(respDump), string(malwareDetected)) { return true } return false } +func IsErrFolderExists(err error) bool { + return hasErrorCode(err, folderExists) +} + // --------------------------------------------------------------------------- // error parsers // --------------------------------------------------------------------------- -func hasErrorCode(err error, codes ...string) bool { +func hasErrorCode(err error, codes ...errorCode) bool { if err == nil { return false } @@ -175,16 +205,17 @@ func hasErrorCode(err error, codes ...string) bool { return false } - if oDataError.GetError().GetCode() == nil { + code, ok := ptr.ValOK(oDataError.GetError().GetCode()) + if !ok { return false } - lcodes := []string{} - for _, c := range codes { - lcodes = append(lcodes, strings.ToLower(c)) + cs := make([]string, len(codes)) + for i, c := range codes { + cs[i] = string(c) } - return slices.Contains(lcodes, strings.ToLower(*oDataError.GetError().GetCode())) + return filters.Equal(cs).Compare(code) } // Wrap is a helper function that extracts ODataError metadata from @@ -229,6 +260,29 @@ func Stack(ctx context.Context, e error) *clues.Err { return setLabels(clues.Stack(e).WithClues(ctx).With(data...), innerMsg) } +// stackReq is a helper function that extracts ODataError metadata from +// the error, plus http req/resp data. If the error is not an ODataError +// type, returns the error with only the req/resp values. +func stackReq( + ctx context.Context, + req *http.Request, + resp *http.Response, + e error, +) *clues.Err { + if e == nil { + return nil + } + + se := Stack(ctx, e). + WithMap(reqData(req)). + WithMap(respData(resp)) + + return se +} + +// Checks for the following conditions and labels the error accordingly: +// * mysiteNotFound | mysiteURLNotFound +// * malware func setLabels(err *clues.Err, msg string) *clues.Err { if err == nil { return nil @@ -239,6 +293,10 @@ func setLabels(err *clues.Err, msg string) *clues.Err { err = err.Label(LabelsMysiteNotFound) } + if IsMalware(err) { + err = err.Label(LabelsMalware) + } + return err } @@ -271,6 +329,34 @@ func errData(err odataerrors.ODataErrorable) (string, []any, string) { return mainMsg, data, strings.ToLower(msgConcat) } +func reqData(req *http.Request) map[string]any { + if req == nil { + return nil + } + + r := map[string]any{} + r["req_method"] = req.Method + r["req_len"] = req.ContentLength + + if req.URL != nil { + r["req_url"] = LoggableURL(req.URL.String()) + } + + return r +} + +func respData(resp *http.Response) map[string]any { + if resp == nil { + return nil + } + + r := map[string]any{} + r["resp_status"] = resp.Status + r["resp_len"] = resp.ContentLength + + return r +} + func appendIf(a []any, k string, v *string) []any { if v == nil { return a @@ -298,6 +384,15 @@ func ItemInfo(item models.DriveItemable) map[string]any { if parent != nil { m[fault.AddtlContainerID] = ptr.Val(parent.GetId()) m[fault.AddtlContainerName] = ptr.Val(parent.GetName()) + containerPath := "" + + // Remove the "/drives/b!vF-sdsdsds-sdsdsa-sdsd/root:" prefix + splitPath := strings.SplitN(ptr.Val(parent.GetPath()), ":", 2) + if len(splitPath) > 1 { + containerPath = splitPath[1] + } + + m[fault.AddtlContainerPath] = containerPath } malware := item.GetMalware() diff --git a/src/internal/connector/graph/errors_test.go b/src/internal/connector/graph/errors_test.go index 56b2fba1f..a0095dc1e 100644 --- a/src/internal/connector/graph/errors_test.go +++ b/src/internal/connector/graph/errors_test.go @@ -25,10 +25,10 @@ func TestGraphErrorsUnitSuite(t *testing.T) { } func odErr(code string) *odataerrors.ODataError { - odErr := &odataerrors.ODataError{} - merr := odataerrors.MainError{} + odErr := odataerrors.NewODataError() + merr := odataerrors.NewMainError() merr.SetCode(&code) - odErr.SetError(&merr) + odErr.SetError(merr) return odErr } @@ -90,12 +90,12 @@ func (suite *GraphErrorsUnitSuite) TestIsErrDeletedInFlight() { }, { name: "not-found oDataErr", - err: odErr(errCodeItemNotFound), + err: odErr(string(itemNotFound)), expect: assert.True, }, { name: "sync-not-found oDataErr", - err: odErr(errCodeSyncFolderNotFound), + err: odErr(string(syncFolderNotFound)), expect: assert.True, }, } @@ -134,12 +134,12 @@ func (suite *GraphErrorsUnitSuite) TestIsErrInvalidDelta() { }, { name: "resync-required oDataErr", - err: odErr(errCodeResyncRequired), + err: odErr(string(resyncRequired)), expect: assert.True, }, { name: "sync state invalid oDataErr", - err: odErr(errCodeSyncStateInvalid), + err: odErr(string(syncStateInvalid)), expect: assert.True, }, // next two tests are to make sure the checks are case insensitive @@ -161,6 +161,45 @@ func (suite *GraphErrorsUnitSuite) TestIsErrInvalidDelta() { } } +func (suite *GraphErrorsUnitSuite) TestIsErrQuotaExceeded() { + table := []struct { + name string + err error + expect assert.BoolAssertionFunc + }{ + { + name: "nil", + err: nil, + expect: assert.False, + }, + { + name: "non-matching", + err: assert.AnError, + expect: assert.False, + }, + { + name: "as", + err: ErrInvalidDelta, + expect: assert.False, + }, + { + name: "non-matching oDataErr", + err: odErr("fnords"), + expect: assert.False, + }, + { + name: "quota-exceeded oDataErr", + err: odErr("ErrorQuotaExceeded"), + expect: assert.True, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + test.expect(suite.T(), IsErrQuotaExceeded(test.err)) + }) + } +} + func (suite *GraphErrorsUnitSuite) TestIsErrUserNotFound() { table := []struct { name string @@ -184,7 +223,7 @@ func (suite *GraphErrorsUnitSuite) TestIsErrUserNotFound() { }, { name: "request resource not found oDataErr", - err: odErr(errCodeRequestResourceNotFound), + err: odErr(string(requestResourceNotFound)), expect: assert.True, }, } @@ -261,38 +300,87 @@ func (suite *GraphErrorsUnitSuite) TestIsErrUnauthorized() { func (suite *GraphErrorsUnitSuite) TestMalwareInfo() { var ( - i = models.DriveItem{} - cb = models.User{} - cbID = "created-by" - lm = models.User{} - lmID = "last-mod-by" - ref = models.ItemReference{} - refCID = "container-id" - refCN = "container-name" - mal = models.Malware{} - malDesc = "malware-description" + i = models.NewDriveItem() + cb = models.NewUser() + cbID = "created-by" + lm = models.NewUser() + lmID = "last-mod-by" + ref = models.NewItemReference() + refCID = "container-id" + refCN = "container-name" + refCP = "/drives/b!vF-sdsdsds-sdsdsa-sdsd/root:/Folder/container-name" + refCPexp = "/Folder/container-name" + mal = models.NewMalware() + malDesc = "malware-description" ) cb.SetId(&cbID) - i.SetCreatedByUser(&cb) + i.SetCreatedByUser(cb) lm.SetId(&lmID) - i.SetLastModifiedByUser(&lm) + i.SetLastModifiedByUser(lm) ref.SetId(&refCID) ref.SetName(&refCN) - i.SetParentReference(&ref) + ref.SetPath(&refCP) + i.SetParentReference(ref) mal.SetDescription(&malDesc) - i.SetMalware(&mal) + i.SetMalware(mal) expect := map[string]any{ fault.AddtlCreatedBy: cbID, fault.AddtlLastModBy: lmID, fault.AddtlContainerID: refCID, fault.AddtlContainerName: refCN, + fault.AddtlContainerPath: refCPexp, fault.AddtlMalwareDesc: malDesc, } - assert.Equal(suite.T(), expect, ItemInfo(&i)) + assert.Equal(suite.T(), expect, ItemInfo(i)) +} + +func (suite *GraphErrorsUnitSuite) TestIsErrFolderExists() { + table := []struct { + name string + err error + expect assert.BoolAssertionFunc + }{ + { + name: "nil", + err: nil, + expect: assert.False, + }, + { + name: "non-matching", + err: assert.AnError, + expect: assert.False, + }, + { + name: "non-matching oDataErr", + err: odErr("folder doesn't exist"), + expect: assert.False, + }, + { + name: "matching oDataErr", + err: odErr(string(folderExists)), + expect: assert.True, + }, + // next two tests are to make sure the checks are case insensitive + { + name: "oDataErr camelcase", + err: odErr("ErrorFolderExists"), + expect: assert.True, + }, + { + name: "oDataErr lowercase", + err: odErr("errorfolderexists"), + expect: assert.True, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + test.expect(suite.T(), IsErrFolderExists(test.err)) + }) + } } diff --git a/src/internal/connector/graph/http_wrapper.go b/src/internal/connector/graph/http_wrapper.go new file mode 100644 index 000000000..b0bca76e2 --- /dev/null +++ b/src/internal/connector/graph/http_wrapper.go @@ -0,0 +1,159 @@ +package graph + +import ( + "context" + "io" + "net/http" + + "github.com/alcionai/clues" + khttp "github.com/microsoft/kiota-http-go" + + "github.com/alcionai/corso/src/internal/version" +) + +// --------------------------------------------------------------------------- +// constructors +// --------------------------------------------------------------------------- + +type Requester interface { + Request( + ctx context.Context, + method, url string, + body io.Reader, + headers map[string]string, + ) (*http.Response, error) +} + +// NewHTTPWrapper produces a http.Client wrapper that ensures +// calls use all the middleware we expect from the graph api client. +// +// Re-use of http clients is critical, or else we leak OS resources +// and consume relatively unbound socket connections. It is important +// to centralize this client to be passed downstream where api calls +// can utilize it on a per-download basis. +func NewHTTPWrapper(opts ...Option) *httpWrapper { + var ( + cc = populateConfig(opts...) + rt = customTransport{ + n: pipeline{ + middlewares: internalMiddleware(cc), + transport: defaultTransport(), + }, + } + redirect = func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + } + hc = &http.Client{ + CheckRedirect: redirect, + Timeout: defaultHTTPClientTimeout, + Transport: rt, + } + ) + + cc.apply(hc) + + return &httpWrapper{hc} +} + +// NewNoTimeoutHTTPWrapper constructs a http wrapper with no context timeout. +// +// Re-use of http clients is critical, or else we leak OS resources +// and consume relatively unbound socket connections. It is important +// to centralize this client to be passed downstream where api calls +// can utilize it on a per-download basis. +func NewNoTimeoutHTTPWrapper(opts ...Option) *httpWrapper { + opts = append(opts, NoTimeout()) + return NewHTTPWrapper(opts...) +} + +// --------------------------------------------------------------------------- +// requests +// --------------------------------------------------------------------------- + +// Request does the provided request. +func (hw httpWrapper) Request( + ctx context.Context, + method, url string, + body io.Reader, + headers map[string]string, +) (*http.Response, error) { + req, err := http.NewRequest(method, url, body) + if err != nil { + return nil, clues.Wrap(err, "new http request") + } + + for k, v := range headers { + req.Header.Set(k, v) + } + + //nolint:lll + // Decorate the traffic + // See https://learn.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online#how-to-decorate-your-http-traffic + req.Header.Set("User-Agent", "ISV|Alcion|Corso/"+version.Version) + + resp, err := hw.client.Do(req) + if err != nil { + return nil, Stack(ctx, err) + } + + return resp, nil +} + +// --------------------------------------------------------------------------- +// constructor internals +// --------------------------------------------------------------------------- + +type ( + httpWrapper struct { + client *http.Client + } + + customTransport struct { + n nexter + } + + pipeline struct { + transport http.RoundTripper + middlewares []khttp.Middleware + } +) + +// RoundTrip kicks off the middleware chain and returns a response +func (ct customTransport) RoundTrip(req *http.Request) (*http.Response, error) { + return ct.n.Next(req, 0) +} + +// Next moves the request object through middlewares in the pipeline +func (pl pipeline) Next(req *http.Request, idx int) (*http.Response, error) { + if idx < len(pl.middlewares) { + return pl.middlewares[idx].Intercept(pl, idx+1, req) + } + + return pl.transport.RoundTrip(req) +} + +func defaultTransport() http.RoundTripper { + defaultTransport := http.DefaultTransport.(*http.Transport).Clone() + defaultTransport.ForceAttemptHTTP2 = true + + return defaultTransport +} + +func internalMiddleware(cc *clientConfig) []khttp.Middleware { + mw := []khttp.Middleware{ + &RetryMiddleware{ + MaxRetries: cc.maxRetries, + Delay: cc.minDelay, + }, + khttp.NewRedirectHandler(), + &LoggingMiddleware{}, + &RateLimiterMiddleware{}, + &MetricsMiddleware{}, + } + + if len(cc.appendMiddleware) > 0 { + mw = append(mw, cc.appendMiddleware...) + } + + return mw +} diff --git a/src/internal/connector/graph/http_wrapper_test.go b/src/internal/connector/graph/http_wrapper_test.go new file mode 100644 index 000000000..92348736c --- /dev/null +++ b/src/internal/connector/graph/http_wrapper_test.go @@ -0,0 +1,120 @@ +package graph + +import ( + "net/http" + "strings" + "testing" + + "github.com/alcionai/clues" + khttp "github.com/microsoft/kiota-http-go" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/tester" +) + +type HTTPWrapperIntgSuite struct { + tester.Suite +} + +func TestHTTPWrapperIntgSuite(t *testing.T) { + suite.Run(t, &HTTPWrapperIntgSuite{ + Suite: tester.NewIntegrationSuite( + t, + [][]string{tester.M365AcctCredEnvs}), + }) +} + +func (suite *HTTPWrapperIntgSuite) TestNewHTTPWrapper() { + ctx, flush := tester.NewContext() + defer flush() + + var ( + t = suite.T() + hw = NewHTTPWrapper() + ) + + resp, err := hw.Request( + ctx, + http.MethodGet, + "https://www.corsobackup.io", + nil, + nil) + require.NoError(t, err, clues.ToCore(err)) + + defer resp.Body.Close() + + require.NotNil(t, resp) + require.Equal(t, http.StatusOK, resp.StatusCode) +} + +type mwForceResp struct { + err error + resp *http.Response + alternate func(*http.Request) (bool, *http.Response, error) +} + +func (mw *mwForceResp) Intercept( + pipeline khttp.Pipeline, + middlewareIndex int, + req *http.Request, +) (*http.Response, error) { + ok, r, e := mw.alternate(req) + if ok { + return r, e + } + + return mw.resp, mw.err +} + +type HTTPWrapperUnitSuite struct { + tester.Suite +} + +func TestHTTPWrapperUnitSuite(t *testing.T) { + suite.Run(t, &HTTPWrapperUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *HTTPWrapperUnitSuite) TestNewHTTPWrapper_redirectMiddleware() { + ctx, flush := tester.NewContext() + defer flush() + + var ( + t = suite.T() + uri = "https://graph.microsoft.com" + path = "/fnords/beaux/regard" + url = uri + path + ) + + // can't use gock for this, or else it'll short-circuit the transport, + // and thus skip all the middleware + hdr := http.Header{} + hdr.Set("Location", "localhost:99999999/smarfs") + + toResp := &http.Response{ + StatusCode: 302, + Header: hdr, + } + + mwResp := mwForceResp{ + resp: toResp, + alternate: func(req *http.Request) (bool, *http.Response, error) { + if strings.HasSuffix(req.URL.String(), "smarfs") { + return true, &http.Response{StatusCode: http.StatusOK}, nil + } + + return false, nil, nil + }, + } + + hw := NewHTTPWrapper(appendMiddleware(&mwResp)) + + resp, err := hw.Request(ctx, http.MethodGet, url, nil, nil) + require.NoError(t, err, clues.ToCore(err)) + + defer resp.Body.Close() + + require.NotNil(t, resp) + // require.Equal(t, 1, calledCorrectly, "test server was called with expected path") + require.Equal(t, http.StatusOK, resp.StatusCode) +} diff --git a/src/internal/connector/graph/metadata/metadata.go b/src/internal/connector/graph/metadata/metadata.go index cb08f7695..6aa0d5fa6 100644 --- a/src/internal/connector/graph/metadata/metadata.go +++ b/src/internal/connector/graph/metadata/metadata.go @@ -10,6 +10,9 @@ func IsMetadataFile(p path.Path) bool { case path.OneDriveService: return metadata.HasMetaSuffix(p.Item()) + case path.SharePointService: + return p.Category() == path.LibrariesCategory && metadata.HasMetaSuffix(p.Item()) + default: return false } diff --git a/src/internal/connector/graph/metadata/metadata_test.go b/src/internal/connector/graph/metadata/metadata_test.go index 94a2adc1d..2abef52d3 100644 --- a/src/internal/connector/graph/metadata/metadata_test.go +++ b/src/internal/connector/graph/metadata/metadata_test.go @@ -61,7 +61,7 @@ var ( { service: path.SharePointService, category: path.LibrariesCategory, - expected: assert.Falsef, + expected: assert.Truef, }, { service: path.SharePointService, diff --git a/src/internal/connector/graph/middleware.go b/src/internal/connector/graph/middleware.go index bedfbd932..03d3a109f 100644 --- a/src/internal/connector/graph/middleware.go +++ b/src/internal/connector/graph/middleware.go @@ -2,7 +2,7 @@ package graph import ( "context" - "fmt" + "io" "net/http" "net/http/httputil" "os" @@ -13,13 +13,17 @@ import ( "github.com/alcionai/clues" backoff "github.com/cenkalti/backoff/v4" khttp "github.com/microsoft/kiota-http-go" - "golang.org/x/time/rate" + "golang.org/x/exp/slices" "github.com/alcionai/corso/src/internal/common/pii" "github.com/alcionai/corso/src/internal/events" "github.com/alcionai/corso/src/pkg/logger" ) +type nexter interface { + Next(req *http.Request, middlewareIndex int) (*http.Response, error) +} + // --------------------------------------------------------------------------- // Logging // --------------------------------------------------------------------------- @@ -29,6 +33,7 @@ type LoggingMiddleware struct{} // well-known path names used by graph api calls // used to un-hide path elements in a pii.SafeURL +// https://learn.microsoft.com/en-us/graph/api/resources/mailfolder?view=graph-rest-1.0 var SafeURLPathParams = pii.MapWithPlurals( //nolint:misspell "alltime", @@ -42,11 +47,16 @@ var SafeURLPathParams = pii.MapWithPlurals( "childfolder", "children", "clone", + "clutter", "column", + "conflict", "contactfolder", "contact", "contenttype", + "conversationhistory", + "deleteditem", "delta", + "draft", "drive", "event", "group", @@ -55,18 +65,28 @@ var SafeURLPathParams = pii.MapWithPlurals( "invitation", "item", "joinedteam", + "junkemail", "label", "list", + "localfailure", "mailfolder", "member", "message", + "msgfolderroot", "notification", + "outbox", "page", "primarychannel", + "recoverableitemsdeletion", "root", + "scheduled", + "searchfolder", "security", + "sentitem", + "serverfailure", "site", "subscription", + "syncissue", "team", "unarchive", "user", @@ -94,64 +114,82 @@ func LoggableURL(url string) pii.SafeURL { } } -func (handler *LoggingMiddleware) Intercept( +// 1 MB +const logMBLimit = 1 * 1048576 + +func (mw *LoggingMiddleware) Intercept( pipeline khttp.Pipeline, middlewareIndex int, req *http.Request, ) (*http.Response, error) { - ctx := clues.Add( - req.Context(), - "method", req.Method, - "url", LoggableURL(req.URL.String()), - "request_len", req.ContentLength) - // call the next middleware resp, err := pipeline.Next(req, middlewareIndex) - - if strings.Contains(req.URL.String(), "users//") { - logger.Ctx(ctx).Error("malformed request url: missing resource") - } - if resp == nil { return resp, err } - ctx = clues.Add(ctx, "status", resp.Status, "statusCode", resp.StatusCode) - log := logger.Ctx(ctx) + ctx := clues.Add( + req.Context(), + "method", req.Method, + "url", LoggableURL(req.URL.String()), + "request_content_len", req.ContentLength, + "resp_status", resp.Status, + "resp_status_code", resp.StatusCode, + "resp_content_len", resp.ContentLength) - // Return immediately if the response is good (2xx). - // If api logging is toggled, log a body-less dump of the request/resp. - if (resp.StatusCode / 100) == 2 { - if logger.DebugAPIFV || os.Getenv(log2xxGraphRequestsEnvKey) != "" { - log.Debugw("2xx graph api resp", "response", getRespDump(ctx, resp, os.Getenv(log2xxGraphResponseEnvKey) != "")) + var ( + log = logger.Ctx(ctx) + respClass = resp.StatusCode / 100 + logExtra = logger.DebugAPIFV || os.Getenv(logGraphRequestsEnvKey) != "" + ) + + // special case: always info log 429 responses + if resp.StatusCode == http.StatusTooManyRequests { + if logExtra { + log = log.With("response", getRespDump(ctx, resp, true)) } - return resp, err - } - - // Log errors according to api debugging configurations. - // When debugging is toggled, every non-2xx is recorded with a response dump. - // Otherwise, throttling cases and other non-2xx responses are logged - // with a slimmer reference for telemetry/supportability purposes. - if logger.DebugAPIFV || os.Getenv(logGraphRequestsEnvKey) != "" { - log.Errorw("non-2xx graph api response", "response", getRespDump(ctx, resp, true)) - return resp, err - } - - msg := fmt.Sprintf("graph api error: %s", resp.Status) - - // special case for supportability: log all throttling cases. - if resp.StatusCode == http.StatusTooManyRequests { - log = log.With( + log.Infow( + "graph api throttling", "limit", resp.Header.Get(rateLimitHeader), "remaining", resp.Header.Get(rateRemainingHeader), "reset", resp.Header.Get(rateResetHeader), "retry-after", resp.Header.Get(retryAfterHeader)) - } else if resp.StatusCode/100 == 4 || resp.StatusCode == http.StatusServiceUnavailable { - log = log.With("response", getRespDump(ctx, resp, true)) + + return resp, err } - log.Info(msg) + // special case: always dump status-400-bad-request + if resp.StatusCode == http.StatusBadRequest { + log.With("response", getRespDump(ctx, resp, true)). + Error("graph api error: " + resp.Status) + + return resp, err + } + + // Log api calls according to api debugging configurations. + switch respClass { + case 2: + if logExtra { + // only dump the body if it's under a size limit. We don't want to copy gigs into memory for a log. + dump := getRespDump(ctx, resp, os.Getenv(log2xxGraphResponseEnvKey) != "" && resp.ContentLength < logMBLimit) + log.Infow("2xx graph api resp", "response", dump) + } + case 3: + log = log.With("redirect_location", LoggableURL(resp.Header.Get(locationHeader))) + + if logExtra { + log = log.With("response", getRespDump(ctx, resp, false)) + } + + log.Info("graph api redirect: " + resp.Status) + default: + if logExtra { + log = log.With("response", getRespDump(ctx, resp, true)) + } + + log.Error("graph api error: " + resp.Status) + } return resp, err } @@ -169,15 +207,53 @@ func getRespDump(ctx context.Context, resp *http.Response, getBody bool) string // Retry & Backoff // --------------------------------------------------------------------------- -// RetryHandler handles transient HTTP responses and retries the request given the retry options -type RetryHandler struct { +// RetryMiddleware handles transient HTTP responses and retries the request given the retry options +type RetryMiddleware struct { // The maximum number of times a request can be retried MaxRetries int // The delay in seconds between retries Delay time.Duration } -func (middleware RetryHandler) retryRequest( +// Intercept implements the interface and evaluates whether to retry a failed request. +func (mw RetryMiddleware) Intercept( + pipeline khttp.Pipeline, + middlewareIndex int, + req *http.Request, +) (*http.Response, error) { + ctx := req.Context() + + resp, err := pipeline.Next(req, middlewareIndex) + if err != nil && !IsErrTimeout(err) && !IsErrConnectionReset(err) { + return resp, stackReq(ctx, req, resp, err) + } + + if resp != nil && resp.StatusCode/100 != 4 && resp.StatusCode/100 != 5 { + return resp, err + } + + exponentialBackOff := backoff.NewExponentialBackOff() + exponentialBackOff.InitialInterval = mw.Delay + exponentialBackOff.Reset() + + resp, err = mw.retryRequest( + ctx, + pipeline, + middlewareIndex, + req, + resp, + 0, + 0, + exponentialBackOff, + err) + if err != nil { + return nil, stackReq(ctx, req, resp, err) + } + + return resp, nil +} + +func (mw RetryMiddleware) retryRequest( ctx context.Context, pipeline khttp.Pipeline, middlewareIndex int, @@ -186,15 +262,31 @@ func (middleware RetryHandler) retryRequest( executionCount int, cumulativeDelay time.Duration, exponentialBackoff *backoff.ExponentialBackOff, - respErr error, + priorErr error, ) (*http.Response, error) { - if (respErr != nil || middleware.isRetriableErrorCode(req, resp.StatusCode)) && - middleware.isRetriableRequest(req) && - executionCount < middleware.MaxRetries { + status := "unknown_resp_status" + statusCode := -1 + + if resp != nil { + status = resp.Status + statusCode = resp.StatusCode + } + + ctx = clues.Add( + ctx, + "prev_resp_status", status, + "retry_count", executionCount) + + // only retry under certain conditions: + // 1, there was an error. 2, the resp and/or status code match retriable conditions. + // 3, the request is retriable. + // 4, we haven't hit our max retries already. + if (priorErr != nil || mw.isRetriableRespCode(ctx, resp, statusCode)) && + mw.isRetriableRequest(req) && + executionCount < mw.MaxRetries { executionCount++ - delay := middleware.getRetryDelay(req, resp, exponentialBackoff) - + delay := mw.getRetryDelay(req, resp, exponentialBackoff) cumulativeDelay += delay req.Header.Set(retryAttemptHeader, strconv.Itoa(executionCount)) @@ -205,41 +297,70 @@ func (middleware RetryHandler) retryRequest( case <-ctx.Done(): // Don't retry if the context is marked as done, it will just error out // when we attempt to send the retry anyway. - return resp, ctx.Err() + return resp, clues.Stack(ctx.Err()).WithClues(ctx) - // Will exit switch-block so the remainder of the code doesn't need to be - // indented. case <-timer.C: } - response, err := pipeline.Next(req, middlewareIndex) - if err != nil && !IsErrTimeout(err) && !IsErrConnectionReset(err) { - return response, Stack(ctx, err).With("retry_count", executionCount) + // we have to reset the original body reader for each retry, or else the graph + // compressor will produce a 0 length body following an error response such + // as a 500. + if req.Body != nil { + if s, ok := req.Body.(io.Seeker); ok { + _, err := s.Seek(0, io.SeekStart) + if err != nil { + return nil, Wrap(ctx, err, "resetting request body reader") + } + } } - return middleware.retryRequest(ctx, + nextResp, err := pipeline.Next(req, middlewareIndex) + if err != nil && !IsErrTimeout(err) && !IsErrConnectionReset(err) { + return nextResp, stackReq(ctx, req, nextResp, err) + } + + return mw.retryRequest(ctx, pipeline, middlewareIndex, req, - response, + nextResp, executionCount, cumulativeDelay, exponentialBackoff, err) } - if respErr != nil { - return nil, Stack(ctx, respErr).With("retry_count", executionCount) + if priorErr != nil { + return nil, stackReq(ctx, req, nil, priorErr) } return resp, nil } -func (middleware RetryHandler) isRetriableErrorCode(req *http.Request, code int) bool { - return code == http.StatusInternalServerError || code == http.StatusServiceUnavailable +var retryableRespCodes = []int{ + http.StatusInternalServerError, + http.StatusBadGateway, } -func (middleware RetryHandler) isRetriableRequest(req *http.Request) bool { +func (mw RetryMiddleware) isRetriableRespCode(ctx context.Context, resp *http.Response, code int) bool { + if slices.Contains(retryableRespCodes, code) { + return true + } + + // prevent the body dump below in case of a 2xx response. + // There's no reason to check the body on a healthy status. + if code/100 != 4 && code/100 != 5 { + return false + } + + // not a status code, but the message itself might indicate a connectivity issue that + // can be retried independent of the status code. + return strings.Contains( + strings.ToLower(getRespDump(ctx, resp, true)), + strings.ToLower(string(IOErrDuringRead))) +} + +func (mw RetryMiddleware) isRetriableRequest(req *http.Request) bool { isBodiedMethod := req.Method == "POST" || req.Method == "PUT" || req.Method == "PATCH" if isBodiedMethod && req.Body != nil { return req.ContentLength != -1 @@ -248,7 +369,7 @@ func (middleware RetryHandler) isRetriableRequest(req *http.Request) bool { return true } -func (middleware RetryHandler) getRetryDelay( +func (mw RetryMiddleware) getRetryDelay( req *http.Request, resp *http.Response, exponentialBackoff *backoff.ExponentialBackOff, @@ -268,88 +389,16 @@ func (middleware RetryHandler) getRetryDelay( return exponentialBackoff.NextBackOff() } -// Intercept implements the interface and evaluates whether to retry a failed request. -func (middleware RetryHandler) Intercept( - pipeline khttp.Pipeline, - middlewareIndex int, - req *http.Request, -) (*http.Response, error) { - ctx := req.Context() - - response, err := pipeline.Next(req, middlewareIndex) - if err != nil && !IsErrTimeout(err) { - return response, Stack(ctx, err) - } - - exponentialBackOff := backoff.NewExponentialBackOff() - exponentialBackOff.InitialInterval = middleware.Delay - exponentialBackOff.Reset() - - response, err = middleware.retryRequest( - ctx, - pipeline, - middlewareIndex, - req, - response, - 0, - 0, - exponentialBackOff, - err) - if err != nil { - return nil, Stack(ctx, err) - } - - return response, nil -} - -// We're trying to keep calls below the 10k-per-10-minute threshold. -// 15 tokens every second nets 900 per minute. That's 9000 every 10 minutes, -// which is a bit below the mark. -// But suppose we have a minute-long dry spell followed by a 10 minute tsunami. -// We'll have built up 900 tokens in reserve, so the first 900 calls go through -// immediately. Over the next 10 minutes, we'll partition out the other calls -// at a rate of 900-per-minute, ending at a total of 9900. Theoretically, if -// the volume keeps up after that, we'll always stay between 9000 and 9900 out -// of 10k. -const ( - perSecond = 15 - maxCap = 900 -) - -// Single, global rate limiter at this time. Refinements for method (creates, -// versus reads) or service can come later. -var limiter = rate.NewLimiter(perSecond, maxCap) - -// QueueRequest will allow the request to occur immediately if we're under the -// 1k-calls-per-minute rate. Otherwise, the call will wait in a queue until -// the next token set is available. -func QueueRequest(ctx context.Context) { - if err := limiter.Wait(ctx); err != nil { - logger.CtxErr(ctx, err).Error("graph middleware waiting on the limiter") - } -} - // --------------------------------------------------------------------------- -// Rate Limiting +// Metrics // --------------------------------------------------------------------------- -// ThrottleControlMiddleware is used to ensure we don't overstep 10k-per-10-min -// request limits. -type ThrottleControlMiddleware struct{} - -func (handler *ThrottleControlMiddleware) Intercept( - pipeline khttp.Pipeline, - middlewareIndex int, - req *http.Request, -) (*http.Response, error) { - QueueRequest(req.Context()) - return pipeline.Next(req, middlewareIndex) -} - // MetricsMiddleware aggregates per-request metrics on the events bus type MetricsMiddleware struct{} -func (handler *MetricsMiddleware) Intercept( +const xmruHeader = "x-ms-resource-unit" + +func (mw *MetricsMiddleware) Intercept( pipeline khttp.Pipeline, middlewareIndex int, req *http.Request, @@ -360,6 +409,10 @@ func (handler *MetricsMiddleware) Intercept( status = "nil-resp" ) + if resp == nil { + return resp, err + } + if resp != nil { status = resp.Status } @@ -369,5 +422,18 @@ func (handler *MetricsMiddleware) Intercept( events.Since(start, events.APICall) events.Since(start, events.APICall, status) + // track the graph "resource cost" for each call (if not provided, assume 1) + + // from msoft throttling documentation: + // x-ms-resource-unit - Indicates the resource unit used for this request. Values are positive integer + xmru := resp.Header.Get(xmruHeader) + xmrui, e := strconv.Atoi(xmru) + + if len(xmru) == 0 || e != nil { + xmrui = 1 + } + + events.IncN(xmrui, events.APICall, xmruHeader) + return resp, err } diff --git a/src/internal/connector/graph/middleware_test.go b/src/internal/connector/graph/middleware_test.go new file mode 100644 index 000000000..597c8d0a6 --- /dev/null +++ b/src/internal/connector/graph/middleware_test.go @@ -0,0 +1,339 @@ +package graph + +import ( + "bytes" + "io" + "net/http" + "testing" + "time" + + "github.com/alcionai/clues" + "github.com/google/uuid" + khttp "github.com/microsoft/kiota-http-go" + msgraphsdkgo "github.com/microsoftgraph/msgraph-sdk-go" + msgraphgocore "github.com/microsoftgraph/msgraph-sdk-go-core" + "github.com/microsoftgraph/msgraph-sdk-go/models" + "github.com/microsoftgraph/msgraph-sdk-go/users" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "golang.org/x/time/rate" + + "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/path" +) + +type mwReturns struct { + err error + resp *http.Response +} + +func newMWReturns(code int, body []byte, err error) mwReturns { + var brc io.ReadCloser + + if len(body) > 0 { + brc = io.NopCloser(bytes.NewBuffer(body)) + } + + return mwReturns{ + err: err, + resp: &http.Response{ + StatusCode: code, + Body: brc, + }, + } +} + +func newTestMW(onIntercept func(*http.Request), mrs ...mwReturns) *testMW { + return &testMW{ + onIntercept: onIntercept, + toReturn: mrs, + } +} + +type testMW struct { + repeatReturn0 bool + iter int + toReturn []mwReturns + onIntercept func(*http.Request) +} + +func (mw *testMW) Intercept( + pipeline khttp.Pipeline, + middlewareIndex int, + req *http.Request, +) (*http.Response, error) { + mw.onIntercept(req) + + i := mw.iter + if mw.repeatReturn0 { + i = 0 + } + + // panic on out-of-bounds intentionally not protected + tr := mw.toReturn[i] + + mw.iter++ + + return tr.resp, tr.err +} + +// can't use graph/mock.CreateAdapter() due to circular references. +func mockAdapter(creds account.M365Config, mw khttp.Middleware) (*msgraphsdkgo.GraphRequestAdapter, error) { + auth, err := GetAuth( + creds.AzureTenantID, + creds.AzureClientID, + creds.AzureClientSecret) + if err != nil { + return nil, err + } + + var ( + clientOptions = msgraphsdkgo.GetDefaultClientOptions() + cc = populateConfig(MinimumBackoff(10 * time.Millisecond)) + middlewares = append(kiotaMiddlewares(&clientOptions, cc), mw) + httpClient = msgraphgocore.GetDefaultClient(&clientOptions, middlewares...) + ) + + httpClient.Timeout = 15 * time.Second + + cc.apply(httpClient) + + return msgraphsdkgo.NewGraphRequestAdapterWithParseNodeFactoryAndSerializationWriterFactoryAndHttpClient( + auth, + nil, nil, + httpClient) +} + +type RetryMWIntgSuite struct { + tester.Suite + creds account.M365Config +} + +// We do end up mocking the actual request, but creating the rest +// similar to E2E suite +func TestRetryMWIntgSuite(t *testing.T) { + suite.Run(t, &RetryMWIntgSuite{ + Suite: tester.NewIntegrationSuite( + t, + [][]string{tester.M365AcctCredEnvs}), + }) +} + +func (suite *RetryMWIntgSuite) SetupSuite() { + var ( + a = tester.NewM365Account(suite.T()) + err error + ) + + suite.creds, err = a.M365Config() + require.NoError(suite.T(), err, clues.ToCore(err)) +} + +func (suite *RetryMWIntgSuite) TestRetryMiddleware_Intercept_byStatusCode() { + var ( + uri = "https://graph.microsoft.com" + urlPath = "/v1.0/users/user/messages/foo" + url = uri + urlPath + ) + + tests := []struct { + name string + status int + expectRetryCount int + mw testMW + expectErr assert.ErrorAssertionFunc + }{ + { + name: "200, no retries", + status: http.StatusOK, + expectRetryCount: 0, + expectErr: assert.NoError, + }, + { + name: "400, no retries", + status: http.StatusBadRequest, + expectRetryCount: 0, + expectErr: assert.Error, + }, + { + // don't test 504: gets intercepted by graph client for long waits. + name: "502", + status: http.StatusBadGateway, + expectRetryCount: defaultMaxRetries, + expectErr: assert.Error, + }, + } + + for _, test := range tests { + suite.Run(test.name, func() { + ctx, flush := tester.NewContext() + defer flush() + + t := suite.T() + called := 0 + mw := newTestMW( + func(*http.Request) { called++ }, + newMWReturns(test.status, nil, nil)) + mw.repeatReturn0 = true + + adpt, err := mockAdapter(suite.creds, mw) + require.NoError(t, err, clues.ToCore(err)) + + // url doesn't fit the builder, but that shouldn't matter + _, err = users.NewCountRequestBuilder(url, adpt).Get(ctx, nil) + test.expectErr(t, err, clues.ToCore(err)) + + // -1 because the non-retried call always counts for one, then + // we increment based on the number of retry attempts. + assert.Equal(t, test.expectRetryCount, called-1) + }) + } +} + +func (suite *RetryMWIntgSuite) TestRetryMiddleware_RetryRequest_resetBodyAfter500() { + ctx, flush := tester.NewContext() + defer flush() + + var ( + t = suite.T() + body = models.NewMailFolder() + checkOnIntercept = func(req *http.Request) { + bs, err := io.ReadAll(req.Body) + require.NoError(t, err, clues.ToCore(err)) + + // an expired body, after graph compression, will + // normally contain 25 bytes. So we should see more + // than that at least. + require.Less( + t, + 25, + len(bs), + "body should be longer than 25 bytes; shorter indicates the body was sliced on a retry") + } + ) + + body.SetDisplayName(ptr.To(uuid.NewString())) + + mw := newTestMW( + checkOnIntercept, + newMWReturns(http.StatusInternalServerError, nil, nil), + newMWReturns(http.StatusOK, nil, nil)) + + adpt, err := mockAdapter(suite.creds, mw) + require.NoError(t, err, clues.ToCore(err)) + + _, err = NewService(adpt). + Client(). + Users(). + ByUserId("user"). + MailFolders(). + Post(ctx, body, nil) + require.NoError(t, err, clues.ToCore(err)) +} + +type MiddlewareUnitSuite struct { + tester.Suite +} + +func TestMiddlewareUnitSuite(t *testing.T) { + suite.Run(t, &MiddlewareUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *MiddlewareUnitSuite) TestBindExtractLimiterConfig() { + ctx, flush := tester.NewContext() + defer flush() + + // an unpopulated ctx should produce the default limiter + assert.Equal(suite.T(), defaultLimiter, ctxLimiter(ctx)) + + table := []struct { + name string + service path.ServiceType + expectOK require.BoolAssertionFunc + expectLimiter *rate.Limiter + }{ + { + name: "exchange", + service: path.ExchangeService, + expectLimiter: defaultLimiter, + }, + { + name: "oneDrive", + service: path.OneDriveService, + expectLimiter: driveLimiter, + }, + { + name: "sharePoint", + service: path.SharePointService, + expectLimiter: driveLimiter, + }, + { + name: "unknownService", + service: path.UnknownService, + expectLimiter: defaultLimiter, + }, + { + name: "badService", + service: path.ServiceType(-1), + expectLimiter: defaultLimiter, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + tctx := BindRateLimiterConfig(ctx, LimiterCfg{Service: test.service}) + lc, ok := extractRateLimiterConfig(tctx) + require.True(t, ok, "found rate limiter in ctx") + assert.Equal(t, test.service, lc.Service) + assert.Equal(t, test.expectLimiter, ctxLimiter(tctx)) + }) + } +} + +func (suite *MiddlewareUnitSuite) TestLimiterConsumption() { + ctx, flush := tester.NewContext() + defer flush() + + // an unpopulated ctx should produce the default consumption + assert.Equal(suite.T(), defaultLC, ctxLimiterConsumption(ctx, defaultLC)) + + table := []struct { + name string + n int + expect int + }{ + { + name: "matches default", + n: defaultLC, + expect: defaultLC, + }, + { + name: "default+1", + n: defaultLC + 1, + expect: defaultLC + 1, + }, + { + name: "zero", + n: 0, + expect: defaultLC, + }, + { + name: "negative", + n: -1, + expect: defaultLC, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + tctx := ConsumeNTokens(ctx, test.n) + lc := ctxLimiterConsumption(tctx, defaultLC) + assert.Equal(t, test.expect, lc) + }) + } +} diff --git a/src/internal/connector/graph/mock/service.go b/src/internal/connector/graph/mock/service.go index 9a2a9b292..a44d9f1ca 100644 --- a/src/internal/connector/graph/mock/service.go +++ b/src/internal/connector/graph/mock/service.go @@ -1,12 +1,27 @@ package mock import ( + "github.com/alcionai/clues" "github.com/h2non/gock" msgraphsdkgo "github.com/microsoftgraph/msgraph-sdk-go" "github.com/alcionai/corso/src/internal/connector/graph" + "github.com/alcionai/corso/src/pkg/account" ) +func NewService(creds account.M365Config, opts ...graph.Option) (*graph.Service, error) { + a, err := CreateAdapter( + creds.AzureTenantID, + creds.AzureClientID, + creds.AzureClientSecret, + opts...) + if err != nil { + return nil, clues.Wrap(err, "generating graph adapter") + } + + return graph.NewService(a), nil +} + // CreateAdapter is similar to graph.CreateAdapter, but with option to // enable interceptions via gock to make it mockable. func CreateAdapter( @@ -18,7 +33,7 @@ func CreateAdapter( return nil, err } - httpClient := graph.HTTPClient(opts...) + httpClient := graph.KiotaHTTPClient(opts...) // This makes sure that we are able to intercept any requests via // gock. Only necessary for testing. diff --git a/src/internal/connector/graph/service.go b/src/internal/connector/graph/service.go index ff8b3a85d..f0aa71f08 100644 --- a/src/internal/connector/graph/service.go +++ b/src/internal/connector/graph/service.go @@ -12,7 +12,7 @@ import ( msgraphsdkgo "github.com/microsoftgraph/msgraph-sdk-go" msgraphgocore "github.com/microsoftgraph/msgraph-sdk-go-core" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/path" ) @@ -21,13 +21,14 @@ const ( logGraphRequestsEnvKey = "LOG_GRAPH_REQUESTS" log2xxGraphRequestsEnvKey = "LOG_2XX_GRAPH_REQUESTS" log2xxGraphResponseEnvKey = "LOG_2XX_GRAPH_RESPONSES" - retryAttemptHeader = "Retry-Attempt" - retryAfterHeader = "Retry-After" defaultMaxRetries = 3 defaultDelay = 3 * time.Second + locationHeader = "Location" rateLimitHeader = "RateLimit-Limit" rateRemainingHeader = "RateLimit-Remaining" rateResetHeader = "RateLimit-Reset" + retryAfterHeader = "Retry-After" + retryAttemptHeader = "Retry-Attempt" defaultHTTPClientTimeout = 1 * time.Hour ) @@ -39,7 +40,7 @@ func AllMetadataFileNames() []string { type QueryParams struct { Category path.CategoryType - ResourceOwner common.IDNamer + ResourceOwner idname.Provider Credentials account.M365Config } @@ -114,7 +115,7 @@ func CreateAdapter( return nil, err } - httpClient := HTTPClient(opts...) + httpClient := KiotaHTTPClient(opts...) return msgraphsdkgo.NewGraphRequestAdapterWithParseNodeFactoryAndSerializationWriterFactoryAndHttpClient( auth, @@ -140,21 +141,24 @@ func GetAuth(tenant string, client string, secret string) (*kauth.AzureIdentityA return auth, nil } -// HTTPClient creates the httpClient with middlewares and timeout configured +// KiotaHTTPClient creates a httpClient with middlewares and timeout configured +// for use in the graph adapter. // // Re-use of http clients is critical, or else we leak OS resources // and consume relatively unbound socket connections. It is important // to centralize this client to be passed downstream where api calls // can utilize it on a per-download basis. -func HTTPClient(opts ...Option) *http.Client { - clientOptions := msgraphsdkgo.GetDefaultClientOptions() - clientconfig := (&clientConfig{}).populate(opts...) - noOfRetries, minRetryDelay := clientconfig.applyMiddlewareConfig() - middlewares := GetKiotaMiddlewares(&clientOptions, noOfRetries, minRetryDelay) - httpClient := msgraphgocore.GetDefaultClient(&clientOptions, middlewares...) +func KiotaHTTPClient(opts ...Option) *http.Client { + var ( + clientOptions = msgraphsdkgo.GetDefaultClientOptions() + cc = populateConfig(opts...) + middlewares = kiotaMiddlewares(&clientOptions, cc) + httpClient = msgraphgocore.GetDefaultClient(&clientOptions, middlewares...) + ) + httpClient.Timeout = defaultHTTPClientTimeout - clientconfig.apply(httpClient) + cc.apply(httpClient) return httpClient } @@ -170,32 +174,24 @@ type clientConfig struct { // The minimum delay in seconds between retries minDelay time.Duration overrideRetryCount bool + + appendMiddleware []khttp.Middleware } type Option func(*clientConfig) // populate constructs a clientConfig according to the provided options. -func (c *clientConfig) populate(opts ...Option) *clientConfig { +func populateConfig(opts ...Option) *clientConfig { + cc := clientConfig{ + maxRetries: defaultMaxRetries, + minDelay: defaultDelay, + } + for _, opt := range opts { - opt(c) + opt(&cc) } - return c -} - -// apply updates the http.Client with the expected options. -func (c *clientConfig) applyMiddlewareConfig() (retry int, delay time.Duration) { - retry = defaultMaxRetries - if c.overrideRetryCount { - retry = c.maxRetries - } - - delay = defaultDelay - if c.minDelay > 0 { - delay = c.minDelay - } - - return + return &cc } // apply updates the http.Client with the expected options. @@ -232,18 +228,30 @@ func MinimumBackoff(dur time.Duration) Option { } } +func appendMiddleware(mw ...khttp.Middleware) Option { + return func(c *clientConfig) { + if len(mw) > 0 { + c.appendMiddleware = mw + } + } +} + // --------------------------------------------------------------------------- // Middleware Control // --------------------------------------------------------------------------- -// GetDefaultMiddlewares creates a new default set of middlewares for the Kiota request adapter -func GetMiddlewares(maxRetry int, delay time.Duration) []khttp.Middleware { - return []khttp.Middleware{ - &RetryHandler{ - // The maximum number of times a request can be retried - MaxRetries: maxRetry, - // The delay in seconds between retries - Delay: delay, +// kiotaMiddlewares creates a default slice of middleware for the Graph Client. +func kiotaMiddlewares( + options *msgraphgocore.GraphClientOptions, + cc *clientConfig, +) []khttp.Middleware { + mw := []khttp.Middleware{} + + mw = append(mw, []khttp.Middleware{ + msgraphgocore.NewGraphTelemetryHandler(options), + &RetryMiddleware{ + MaxRetries: cc.maxRetries, + Delay: cc.minDelay, }, khttp.NewRetryHandler(), khttp.NewRedirectHandler(), @@ -251,25 +259,21 @@ func GetMiddlewares(maxRetry int, delay time.Duration) []khttp.Middleware { khttp.NewParametersNameDecodingHandler(), khttp.NewUserAgentHandler(), &LoggingMiddleware{}, - &ThrottleControlMiddleware{}, - &MetricsMiddleware{}, - } -} + }...) -// GetKiotaMiddlewares creates a default slice of middleware for the Graph Client. -func GetKiotaMiddlewares( - options *msgraphgocore.GraphClientOptions, - maxRetry int, - minDelay time.Duration, -) []khttp.Middleware { - kiotaMiddlewares := GetMiddlewares(maxRetry, minDelay) - graphMiddlewares := []khttp.Middleware{ - msgraphgocore.NewGraphTelemetryHandler(options), + // Optionally add concurrency limiter middleware if it has been initialized. + if concurrencyLim != nil { + mw = append(mw, concurrencyLim) } - graphMiddlewaresLen := len(graphMiddlewares) - resultMiddlewares := make([]khttp.Middleware, len(kiotaMiddlewares)+graphMiddlewaresLen) - copy(resultMiddlewares, graphMiddlewares) - copy(resultMiddlewares[graphMiddlewaresLen:], kiotaMiddlewares) - return resultMiddlewares + mw = append( + mw, + &RateLimiterMiddleware{}, + &MetricsMiddleware{}) + + if len(cc.appendMiddleware) > 0 { + mw = append(mw, cc.appendMiddleware...) + } + + return mw } diff --git a/src/internal/connector/graph/service_test.go b/src/internal/connector/graph/service_test.go index 4565efca1..9d4aad624 100644 --- a/src/internal/connector/graph/service_test.go +++ b/src/internal/connector/graph/service_test.go @@ -70,7 +70,7 @@ func (suite *GraphUnitSuite) TestHTTPClient() { suite.Run(test.name, func() { t := suite.T() - cli := HTTPClient(test.opts...) + cli := KiotaHTTPClient(test.opts...) assert.NotNil(t, cli) test.check(t, cli) }) diff --git a/src/internal/connector/uploadsession/uploadsession.go b/src/internal/connector/graph/uploadsession.go similarity index 67% rename from src/internal/connector/uploadsession/uploadsession.go rename to src/internal/connector/graph/uploadsession.go index 210abe018..77fefd5c8 100644 --- a/src/internal/connector/uploadsession/uploadsession.go +++ b/src/internal/connector/graph/uploadsession.go @@ -1,12 +1,12 @@ -package uploadsession +package graph import ( "bytes" "context" "fmt" + "net/http" "github.com/alcionai/clues" - "gopkg.in/resty.v1" "github.com/alcionai/corso/src/pkg/logger" ) @@ -20,7 +20,7 @@ const ( // Writer implements an io.Writer for a M365 // UploadSession URL -type writer struct { +type largeItemWriter struct { // Identifier id string // Upload URL for this item @@ -29,18 +29,20 @@ type writer struct { contentLength int64 // Last item offset that was written to lastWrittenOffset int64 - client *resty.Client + client httpWrapper } -func NewWriter(id, url string, size int64) *writer { - return &writer{id: id, url: url, contentLength: size, client: resty.New()} +func NewLargeItemWriter(id, url string, size int64) *largeItemWriter { + return &largeItemWriter{id: id, url: url, contentLength: size, client: *NewNoTimeoutHTTPWrapper()} } // Write will upload the provided data to M365. It sets the `Content-Length` and `Content-Range` headers based on // https://docs.microsoft.com/en-us/graph/api/driveitem-createuploadsession -func (iw *writer) Write(p []byte) (int, error) { +func (iw *largeItemWriter) Write(p []byte) (int, error) { rangeLength := len(p) - logger.Ctx(context.Background()). + ctx := context.Background() + + logger.Ctx(ctx). Debugf("WRITE for %s. Size:%d, Offset: %d, TotalSize: %d", iw.id, rangeLength, iw.lastWrittenOffset, iw.contentLength) @@ -48,17 +50,20 @@ func (iw *writer) Write(p []byte) (int, error) { // PUT the request - set headers `Content-Range`to describe total size and `Content-Length` to describe size of // data in the current request - _, err := iw.client.R(). - SetHeaders(map[string]string{ - contentRangeHeaderKey: fmt.Sprintf( - contentRangeHeaderValueFmt, - iw.lastWrittenOffset, - endOffset-1, - iw.contentLength), - contentLengthHeaderKey: fmt.Sprintf("%d", rangeLength), - }). - SetBody(bytes.NewReader(p)). - Put(iw.url) + headers := make(map[string]string) + headers[contentRangeHeaderKey] = fmt.Sprintf( + contentRangeHeaderValueFmt, + iw.lastWrittenOffset, + endOffset-1, + iw.contentLength) + headers[contentLengthHeaderKey] = fmt.Sprintf("%d", rangeLength) + + _, err := iw.client.Request( + ctx, + http.MethodPut, + iw.url, + bytes.NewReader(p), + headers) if err != nil { return 0, clues.Wrap(err, "uploading item").With( "upload_id", iw.id, diff --git a/src/internal/connector/uploadsession/uploadsession_test.go b/src/internal/connector/graph/uploadsession_test.go similarity index 97% rename from src/internal/connector/uploadsession/uploadsession_test.go rename to src/internal/connector/graph/uploadsession_test.go index f080affa3..74d2c71c9 100644 --- a/src/internal/connector/uploadsession/uploadsession_test.go +++ b/src/internal/connector/graph/uploadsession_test.go @@ -1,4 +1,4 @@ -package uploadsession +package graph import ( "bytes" @@ -69,7 +69,7 @@ func (suite *UploadSessionSuite) TestWriter() { defer ts.Close() - writer := NewWriter("item", ts.URL, writeSize) + writer := NewLargeItemWriter("item", ts.URL, writeSize) // Using a 32 KB buffer for the copy allows us to validate the // multi-part upload. `io.CopyBuffer` will only write 32 KB at diff --git a/src/internal/connector/graph_connector.go b/src/internal/connector/graph_connector.go index 94e9e1634..752037b34 100644 --- a/src/internal/connector/graph_connector.go +++ b/src/internal/connector/graph_connector.go @@ -4,20 +4,18 @@ package connector import ( "context" - "net/http" "runtime/trace" "sync" "github.com/alcionai/clues" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/operations/inject" "github.com/alcionai/corso/src/pkg/account" - "github.com/alcionai/corso/src/pkg/fault" - "github.com/alcionai/corso/src/pkg/services/m365/api" + m365api "github.com/alcionai/corso/src/pkg/services/m365/api" ) // --------------------------------------------------------------------------- @@ -35,8 +33,8 @@ var ( // bookkeeping and interfacing with other component. type GraphConnector struct { Service graph.Servicer - Discovery api.Client - itemClient *http.Client // configured to handle large item downloads + Discovery m365api.Client + itemClient graph.Requester // configured to handle large item downloads tenant string credentials account.M365Config @@ -45,7 +43,7 @@ type GraphConnector struct { // maps of resource owner ids to names, and names to ids. // not guaranteed to be populated, only here as a post-population // reference for processes that choose to populate the values. - IDNameLookup common.IDNameSwapper + IDNameLookup idname.Cacher // wg is used to track completion of GC tasks wg *sync.WaitGroup @@ -59,8 +57,7 @@ type GraphConnector struct { func NewGraphConnector( ctx context.Context, acct account.Account, - r resource, - errs *fault.Bus, + r Resource, ) (*GraphConnector, error) { creds, err := acct.M365Config() if err != nil { @@ -72,23 +69,23 @@ func NewGraphConnector( return nil, clues.Wrap(err, "creating service connection").WithClues(ctx) } - discovery, err := api.NewClient(creds) + ac, err := m365api.NewClient(creds) if err != nil { return nil, clues.Wrap(err, "creating api client").WithClues(ctx) } - rc, err := r.resourceClient(discovery) + rc, err := r.resourceClient(ac) if err != nil { return nil, clues.Wrap(err, "creating resource client").WithClues(ctx) } gc := GraphConnector{ - Discovery: discovery, - IDNameLookup: common.IDsNames{}, + Discovery: ac, + IDNameLookup: idname.NewCache(nil), Service: service, credentials: creds, - itemClient: graph.HTTPClient(graph.NoTimeout()), + itemClient: graph.NewNoTimeoutHTTPWrapper(), ownerLookup: rc, tenant: acct.ID(), wg: &sync.WaitGroup{}, @@ -170,36 +167,32 @@ func (gc *GraphConnector) incrementAwaitingMessages() { gc.wg.Add(1) } -func (gc *GraphConnector) incrementMessagesBy(num int) { - gc.wg.Add(num) -} - // --------------------------------------------------------------------------- // Resource Lookup Handling // --------------------------------------------------------------------------- -type resource int +type Resource int const ( - UnknownResource resource = iota + UnknownResource Resource = iota AllResources // unused Users Sites ) -func (r resource) resourceClient(discovery api.Client) (*resourceClient, error) { +func (r Resource) resourceClient(ac m365api.Client) (*resourceClient, error) { switch r { case Users: - return &resourceClient{enum: r, getter: discovery.Users()}, nil + return &resourceClient{enum: r, getter: ac.Users()}, nil case Sites: - return &resourceClient{enum: r, getter: discovery.Sites()}, nil + return &resourceClient{enum: r, getter: ac.Sites()}, nil default: return nil, clues.New("unrecognized owner resource enum").With("resource_enum", r) } } type resourceClient struct { - enum resource + enum Resource getter getIDAndNamer } @@ -216,9 +209,9 @@ var _ getOwnerIDAndNamer = &resourceClient{} type getOwnerIDAndNamer interface { getOwnerIDAndNameFrom( ctx context.Context, - discovery api.Client, + discovery m365api.Client, owner string, - ins common.IDNameSwapper, + ins idname.Cacher, ) ( ownerID string, ownerName string, @@ -234,9 +227,9 @@ type getOwnerIDAndNamer interface { // (PrincipalName for users, WebURL for sites). func (r resourceClient) getOwnerIDAndNameFrom( ctx context.Context, - discovery api.Client, + discovery m365api.Client, owner string, - ins common.IDNameSwapper, + ins idname.Cacher, ) (string, string, error) { if ins != nil { if n, ok := ins.NameOf(owner); ok { @@ -280,18 +273,14 @@ func (r resourceClient) getOwnerIDAndNameFrom( func (gc *GraphConnector) PopulateOwnerIDAndNamesFrom( ctx context.Context, owner string, // input value, can be either id or name - ins common.IDNameSwapper, + ins idname.Cacher, ) (string, string, error) { - // move this to GC method id, name, err := gc.ownerLookup.getOwnerIDAndNameFrom(ctx, gc.Discovery, owner, ins) if err != nil { return "", "", clues.Wrap(err, "identifying resource owner") } - gc.IDNameLookup = common.IDsNames{ - IDToName: map[string]string{id: name}, - NameToID: map[string]string{name: id}, - } + gc.IDNameLookup = idname.NewCache(map[string]string{id: name}) return id, name, nil } diff --git a/src/internal/connector/graph_connector_disconnected_test.go b/src/internal/connector/graph_connector_disconnected_test.go index b95f75335..23a6ab1dc 100644 --- a/src/internal/connector/graph_connector_disconnected_test.go +++ b/src/internal/connector/graph_connector_disconnected_test.go @@ -11,6 +11,7 @@ import ( "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/selectors" + selTD "github.com/alcionai/corso/src/pkg/selectors/testdata" ) // --------------------------------------------------------------- @@ -82,19 +83,19 @@ func (suite *DisconnectedGraphConnectorSuite) TestVerifyBackupInputs_allServices checkError: assert.NoError, excludes: func(t *testing.T) selectors.Selector { sel := selectors.NewOneDriveBackup([]string{"elliotReid@someHospital.org", "foo@SomeCompany.org"}) - sel.Exclude(sel.Folders(selectors.Any())) + sel.Exclude(selTD.OneDriveBackupFolderScope(sel)) sel.DiscreteOwner = "elliotReid@someHospital.org" return sel.Selector }, filters: func(t *testing.T) selectors.Selector { sel := selectors.NewOneDriveBackup([]string{"elliotReid@someHospital.org", "foo@SomeCompany.org"}) - sel.Filter(sel.Folders(selectors.Any())) + sel.Filter(selTD.OneDriveBackupFolderScope(sel)) sel.DiscreteOwner = "elliotReid@someHospital.org" return sel.Selector }, includes: func(t *testing.T) selectors.Selector { sel := selectors.NewOneDriveBackup([]string{"elliotReid@someHospital.org", "foo@SomeCompany.org"}) - sel.Include(sel.Folders(selectors.Any())) + sel.Include(selTD.OneDriveBackupFolderScope(sel)) sel.DiscreteOwner = "elliotReid@someHospital.org" return sel.Selector }, @@ -104,17 +105,17 @@ func (suite *DisconnectedGraphConnectorSuite) TestVerifyBackupInputs_allServices checkError: assert.NoError, excludes: func(t *testing.T) selectors.Selector { sel := selectors.NewOneDriveBackup([]string{"foo@SomeCompany.org"}) - sel.Exclude(sel.Folders(selectors.Any())) + sel.Exclude(selTD.OneDriveBackupFolderScope(sel)) return sel.Selector }, filters: func(t *testing.T) selectors.Selector { sel := selectors.NewOneDriveBackup([]string{"foo@SomeCompany.org"}) - sel.Filter(sel.Folders(selectors.Any())) + sel.Filter(selTD.OneDriveBackupFolderScope(sel)) return sel.Selector }, includes: func(t *testing.T) selectors.Selector { sel := selectors.NewOneDriveBackup([]string{"foo@SomeCompany.org"}) - sel.Include(sel.Folders(selectors.Any())) + sel.Include(selTD.OneDriveBackupFolderScope(sel)) return sel.Selector }, }, diff --git a/src/internal/connector/graph_connector_helper_test.go b/src/internal/connector/graph_connector_helper_test.go index 3a60bc701..4776c93f8 100644 --- a/src/internal/connector/graph_connector_helper_test.go +++ b/src/internal/connector/graph_connector_helper_test.go @@ -1,7 +1,6 @@ package connector import ( - "bytes" "context" "encoding/json" "io" @@ -17,44 +16,31 @@ import ( "golang.org/x/exp/slices" "github.com/alcionai/corso/src/internal/common/ptr" - exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock" + "github.com/alcionai/corso/src/internal/connector/exchange" "github.com/alcionai/corso/src/internal/connector/onedrive" "github.com/alcionai/corso/src/internal/connector/onedrive/metadata" "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/tester" - "github.com/alcionai/corso/src/pkg/account" - "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" ) -func mustToDataLayerPath( - t *testing.T, - service path.ServiceType, - tenant, resourceOwner string, - category path.CategoryType, - elements []string, - isItem bool, -) path.Path { - res, err := path.Build(tenant, resourceOwner, service, category, isItem, elements...) - require.NoError(t, err, clues.ToCore(err)) - - return res -} - func testElementsMatch[T any]( t *testing.T, expected []T, got []T, + subset bool, equalityCheck func(expectedItem, gotItem T) bool, ) { t.Helper() pending := make([]*T, len(expected)) - for i := 0; i < len(expected); i++ { - pending[i] = &expected[i] + + for i := range expected { + ei := expected[i] + pending[i] = &ei } unexpected := []T{} @@ -97,64 +83,34 @@ func testElementsMatch[T any]( return } + if subset && len(missing) == 0 && len(unexpected) > 0 { + return + } + assert.Failf( t, - "contain different elements", - "missing items: (%T)%v\nunexpected items: (%T)%v\n", + "elements differ", + "expected: (%T)%+v\ngot: (%T)%+v\nmissing: %+v\nextra: %+v\n", + expected, expected, - missing, got, - unexpected, - ) -} - -type configInfo struct { - acct account.Account - opts control.Options - resource resource - service path.ServiceType - tenant string - resourceOwners []string - dest control.RestoreDestination -} - -type itemInfo struct { - // lookupKey is a string that can be used to find this data from a set of - // other data in the same collection. This key should be something that will - // be the same before and after restoring the item in M365 and may not be - // the M365 ID. When restoring items out of place, the item is assigned a - // new ID making it unsuitable for a lookup key. - lookupKey string - name string - data []byte -} - -type colInfo struct { - // Elements (in order) for the path representing this collection. Should - // only contain elements after the prefix that corso uses for the path. For - // example, a collection for the Inbox folder in exchange mail would just be - // "Inbox". - pathElements []string - category path.CategoryType - items []itemInfo - // auxItems are items that can be retrieved with Fetch but won't be returned - // by Items(). These files do not directly participate in comparisosn at the - // end of a test. - auxItems []itemInfo + got, + missing, + unexpected) } type restoreBackupInfo struct { name string service path.ServiceType - collections []colInfo - resource resource + collections []ColInfo + resource Resource } type restoreBackupInfoMultiVersion struct { service path.ServiceType - collectionsLatest []colInfo - collectionsPrevious []colInfo - resource resource + collectionsLatest []ColInfo + collectionsPrevious []ColInfo + resource Resource backupVersion int } @@ -162,11 +118,15 @@ func attachmentEqual( expected models.Attachmentable, got models.Attachmentable, ) bool { - // This is super hacky, but seems like it would be good to have a comparison - // of the actual content. I think the only other way to really get it is to - // serialize both structs to JSON and pull it from there or something though. - expectedData := reflect.Indirect(reflect.ValueOf(expected)).FieldByName("contentBytes").Bytes() - gotData := reflect.Indirect(reflect.ValueOf(got)).FieldByName("contentBytes").Bytes() + expectedData, err := exchange.GetAttachmentBytes(expected) + if err != nil { + return false + } + + gotData, err := exchange.GetAttachmentBytes(got) + if err != nil { + return false + } if !reflect.DeepEqual(expectedData, gotData) { return false @@ -211,7 +171,7 @@ func checkMessage( expected models.Messageable, got models.Messageable, ) { - testElementsMatch(t, expected.GetAttachments(), got.GetAttachments(), attachmentEqual) + testElementsMatch(t, expected.GetAttachments(), got.GetAttachments(), false, attachmentEqual) assert.Equal(t, expected.GetBccRecipients(), got.GetBccRecipients(), "BccRecipients") @@ -289,7 +249,7 @@ func checkMessage( assert.Equal(t, ptr.Val(expected.GetSubject()), ptr.Val(got.GetSubject()), "Subject") - testElementsMatch(t, expected.GetToRecipients(), got.GetToRecipients(), recipientEqual) + testElementsMatch(t, expected.GetToRecipients(), got.GetToRecipients(), false, recipientEqual) // Skip WebLink as it's tied to this specific instance of the item. @@ -535,10 +495,10 @@ func checkEvent( t, []models.Locationable{expected.GetLocation()}, []models.Locationable{got.GetLocation()}, - locationEqual, - ) + false, + locationEqual) - testElementsMatch(t, expected.GetLocations(), got.GetLocations(), locationEqual) + testElementsMatch(t, expected.GetLocations(), got.GetLocations(), false, locationEqual) assert.Equal(t, expected.GetOnlineMeeting(), got.GetOnlineMeeting(), "OnlineMeeting") @@ -693,7 +653,7 @@ func compareExchangeEvent( checkEvent(t, expectedEvent, itemEvent) } -func permissionEqual(expected onedrive.UserPermission, got onedrive.UserPermission) bool { +func permissionEqual(expected metadata.Permission, got metadata.Permission) bool { if !strings.EqualFold(expected.Email, got.Email) { return false } @@ -722,14 +682,14 @@ func permissionEqual(expected onedrive.UserPermission, got onedrive.UserPermissi return true } -func compareOneDriveItem( +func compareDriveItem( t *testing.T, expected map[string][]byte, item data.Stream, - restorePermissions bool, + config ConfigInfo, rootDir bool, ) bool { - // Skip OneDrive permissions in the folder that used to be the root. We don't + // Skip Drive permissions in the folder that used to be the root. We don't // have a good way to materialize these in the test right now. if rootDir && item.UUID() == metadata.DirMetaFileSuffix { return false @@ -746,19 +706,15 @@ func compareOneDriveItem( isMeta = metadata.HasMetaSuffix(name) ) - if isMeta { - var itemType *onedrive.MetadataItem - - assert.IsType(t, itemType, item) - } else { + if !isMeta { oitem := item.(*onedrive.Item) info := oitem.Info() - // Don't need to check SharePoint because it was added after we stopped - // adding meta files to backup details. if info.OneDrive != nil { displayName = oitem.Info().OneDrive.ItemName + // Don't need to check SharePoint because it was added after we stopped + // adding meta files to backup details. assert.False(t, oitem.Info().OneDrive.IsMeta, "meta marker for non meta item %s", name) } else if info.SharePoint != nil { displayName = oitem.Info().SharePoint.ItemName @@ -768,9 +724,13 @@ func compareOneDriveItem( } if isMeta { + var itemType *metadata.Item + + assert.IsType(t, itemType, item) + var ( - itemMeta onedrive.Metadata - expectedMeta onedrive.Metadata + itemMeta metadata.Metadata + expectedMeta metadata.Metadata ) err = json.Unmarshal(buf, &itemMeta) @@ -806,13 +766,13 @@ func compareOneDriveItem( assert.Equal(t, expectedMeta.FileName, itemMeta.FileName) } - if !restorePermissions { + if !config.Opts.RestorePermissions { assert.Equal(t, 0, len(itemMeta.Permissions)) return true } // We cannot restore owner permissions, so skip checking them - itemPerms := []onedrive.UserPermission{} + itemPerms := []metadata.Permission{} for _, p := range itemMeta.Permissions { if p.Roles[0] != "owner" { @@ -824,8 +784,11 @@ func compareOneDriveItem( t, expectedMeta.Permissions, itemPerms, - permissionEqual, - ) + // sharepoint retrieves a superset of permissions + // (all site admins, site groups, built in by default) + // relative to the permissions changed by the test. + config.Service == path.SharePointService, + permissionEqual) return true } @@ -866,7 +829,7 @@ func compareItem( service path.ServiceType, category path.CategoryType, item data.Stream, - restorePermissions bool, + config ConfigInfo, rootDir bool, ) bool { if mt, ok := item.(data.StreamModTime); ok { @@ -887,7 +850,7 @@ func compareItem( } case path.OneDriveService: - return compareOneDriveItem(t, expected, item, restorePermissions, rootDir) + return compareDriveItem(t, expected, item, config, rootDir) case path.SharePointService: if category != path.LibrariesCategory { @@ -895,7 +858,7 @@ func compareItem( } // SharePoint libraries reuses OneDrive code. - return compareOneDriveItem(t, expected, item, restorePermissions, rootDir) + return compareDriveItem(t, expected, item, config, rootDir) default: assert.FailNowf(t, "unexpected service: %s", service.String()) @@ -919,7 +882,36 @@ func checkHasCollections( } for _, g := range got { - gotNames = append(gotNames, g.FullPath().String()) + // TODO(ashmrtn): Remove when LocationPath is made part of BackupCollection + // interface. + if !assert.Implements(t, (*data.LocationPather)(nil), g) { + continue + } + + fp := g.FullPath() + loc := g.(data.LocationPather).LocationPath() + + if fp.Service() == path.OneDriveService || + (fp.Service() == path.SharePointService && fp.Category() == path.LibrariesCategory) { + dp, err := path.ToDrivePath(fp) + if !assert.NoError(t, err, clues.ToCore(err)) { + continue + } + + loc = path.BuildDriveLocation(dp.DriveID, loc.Elements()...) + } + + p, err := loc.ToDataLayerPath( + fp.Tenant(), + fp.ResourceOwner(), + fp.Service(), + fp.Category(), + false) + if !assert.NoError(t, err, clues.ToCore(err)) { + continue + } + + gotNames = append(gotNames, p.String()) } assert.ElementsMatch(t, expectedNames, gotNames, "returned collections") @@ -931,8 +923,7 @@ func checkCollections( expectedItems int, expected map[string]map[string][]byte, got []data.BackupCollection, - dest control.RestoreDestination, - restorePermissions bool, + config ConfigInfo, ) int { collectionsWithItems := []data.BackupCollection{} @@ -946,7 +937,7 @@ func checkCollections( category = returned.FullPath().Category() expectedColData = expected[returned.FullPath().String()] folders = returned.FullPath().Elements() - rootDir = folders[len(folders)-1] == dest.ContainerName + rootDir = folders[len(folders)-1] == config.Dest.ContainerName ) // Need to iterate through all items even if we don't expect to find a match @@ -979,7 +970,7 @@ func checkCollections( service, category, item, - restorePermissions, + config, rootDir) { gotItems-- } @@ -1100,7 +1091,7 @@ func makeSharePointBackupSel( } // backupSelectorForExpected creates a selector that can be used to backup the -// given items in expected based on the item paths. Fails the test if items from +// given dests based on the item paths. Fails the test if items from // multiple services are in expected. func backupSelectorForExpected( t *testing.T, @@ -1127,127 +1118,6 @@ func backupSelectorForExpected( return selectors.Selector{} } -// backupOutputPathFromRestore returns a path.Path denoting the location in -// kopia the data will be placed at. The location is a data-type specific -// combination of the location the data was recently restored to and where the -// data was originally in the hierarchy. -func backupOutputPathFromRestore( - t *testing.T, - restoreDest control.RestoreDestination, - inputPath path.Path, -) path.Path { - base := []string{restoreDest.ContainerName} - - // OneDrive has leading information like the drive ID. - if inputPath.Service() == path.OneDriveService || inputPath.Service() == path.SharePointService { - folders := inputPath.Folders() - base = append(append([]string{}, folders[:3]...), restoreDest.ContainerName) - - if len(folders) > 3 { - base = append(base, folders[3:]...) - } - } - - if inputPath.Service() == path.ExchangeService && inputPath.Category() == path.EmailCategory { - base = append(base, inputPath.Folders()...) - } - - return mustToDataLayerPath( - t, - inputPath.Service(), - inputPath.Tenant(), - inputPath.ResourceOwner(), - inputPath.Category(), - base, - false, - ) -} - -// TODO(ashmrtn): Make this an actual mock class that can be used in other -// packages. -type mockRestoreCollection struct { - data.Collection - auxItems map[string]data.Stream -} - -func (rc mockRestoreCollection) Fetch( - ctx context.Context, - name string, -) (data.Stream, error) { - res := rc.auxItems[name] - if res == nil { - return nil, data.ErrNotFound - } - - return res, nil -} - -func collectionsForInfo( - t *testing.T, - service path.ServiceType, - tenant, user string, - dest control.RestoreDestination, - allInfo []colInfo, - backupVersion int, -) (int, int, []data.RestoreCollection, map[string]map[string][]byte) { - var ( - collections = make([]data.RestoreCollection, 0, len(allInfo)) - expectedData = make(map[string]map[string][]byte, len(allInfo)) - totalItems = 0 - kopiaEntries = 0 - ) - - for _, info := range allInfo { - pth := mustToDataLayerPath( - t, - service, - tenant, - user, - info.category, - info.pathElements, - false) - - mc := exchMock.NewCollection(pth, pth, len(info.items)) - baseDestPath := backupOutputPathFromRestore(t, dest, pth) - - baseExpected := expectedData[baseDestPath.String()] - if baseExpected == nil { - expectedData[baseDestPath.String()] = make(map[string][]byte, len(info.items)) - baseExpected = expectedData[baseDestPath.String()] - } - - for i := 0; i < len(info.items); i++ { - mc.Names[i] = info.items[i].name - mc.Data[i] = info.items[i].data - - baseExpected[info.items[i].lookupKey] = info.items[i].data - - // We do not count metadata files against item count - if backupVersion > 0 && - (service == path.OneDriveService || service == path.SharePointService) && - metadata.HasMetaSuffix(info.items[i].name) { - continue - } - - totalItems++ - } - - c := mockRestoreCollection{Collection: mc, auxItems: map[string]data.Stream{}} - - for _, aux := range info.auxItems { - c.auxItems[aux.name] = &exchMock.Data{ - ID: aux.name, - Reader: io.NopCloser(bytes.NewReader(aux.data)), - } - } - - collections = append(collections, c) - kopiaEntries += len(info.items) - } - - return totalItems, kopiaEntries, collections, expectedData -} - func getSelectorWith( t *testing.T, service path.ServiceType, @@ -1282,10 +1152,10 @@ func getSelectorWith( } } -func loadConnector(ctx context.Context, t *testing.T, r resource) *GraphConnector { +func loadConnector(ctx context.Context, t *testing.T, r Resource) *GraphConnector { a := tester.NewM365Account(t) - connector, err := NewGraphConnector(ctx, a, r, fault.New(true)) + connector, err := NewGraphConnector(ctx, a, r) require.NoError(t, err, clues.ToCore(err)) return connector diff --git a/src/internal/connector/graph_connector_onedrive_test.go b/src/internal/connector/graph_connector_onedrive_test.go index 495ddedc4..b3fe13e75 100644 --- a/src/internal/connector/graph_connector_onedrive_test.go +++ b/src/internal/connector/graph_connector_onedrive_test.go @@ -2,13 +2,11 @@ package connector import ( "context" - "encoding/json" "fmt" "strings" "testing" "github.com/alcionai/clues" - "github.com/google/uuid" "github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -16,7 +14,7 @@ import ( "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/connector/graph" - "github.com/alcionai/corso/src/internal/connector/onedrive" + odConsts "github.com/alcionai/corso/src/internal/connector/onedrive/consts" "github.com/alcionai/corso/src/internal/connector/onedrive/metadata" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/version" @@ -25,90 +23,11 @@ import ( "github.com/alcionai/corso/src/pkg/path" ) -// For any version post this(inclusive), we expect to be using IDs for -// permission instead of email -const versionPermissionSwitchedToID = version.OneDrive4DirIncludesPermissions - -func getMetadata(fileName string, perm permData, permUseID bool) onedrive.Metadata { - if len(perm.user) == 0 || len(perm.roles) == 0 || - perm.sharingMode != onedrive.SharingModeCustom { - return onedrive.Metadata{ - FileName: fileName, - SharingMode: perm.sharingMode, - } - } - - // In case of permissions, the id will usually be same for same - // user/role combo unless deleted and readded, but we have to do - // this as we only have two users of which one is already taken. - id := uuid.NewString() - uperm := onedrive.UserPermission{ID: id, Roles: perm.roles} - - if permUseID { - uperm.EntityID = perm.entityID - } else { - uperm.Email = perm.user - } - - testMeta := onedrive.Metadata{ - FileName: fileName, - Permissions: []onedrive.UserPermission{uperm}, - } - - return testMeta -} - -type testOneDriveData struct { - FileName string `json:"fileName,omitempty"` - Data []byte `json:"data,omitempty"` -} - -func onedriveItemWithData( - t *testing.T, - name, lookupKey string, - fileData []byte, -) itemInfo { - t.Helper() - - content := testOneDriveData{ - FileName: lookupKey, - Data: fileData, - } - - serialized, err := json.Marshal(content) - require.NoError(t, err, clues.ToCore(err)) - - return itemInfo{ - name: name, - data: serialized, - lookupKey: lookupKey, - } -} - -func onedriveMetadata( - t *testing.T, - fileName, itemID, lookupKey string, - perm permData, - permUseID bool, -) itemInfo { - t.Helper() - - testMeta := getMetadata(fileName, perm, permUseID) - - testMetaJSON, err := json.Marshal(testMeta) - require.NoError(t, err, "marshalling metadata", clues.ToCore(err)) - - return itemInfo{ - name: itemID, - data: testMetaJSON, - lookupKey: lookupKey, - } -} - var ( - fileName = "test-file.txt" - folderAName = "folder-a" - folderBName = "b" + fileName = "test-file.txt" + folderAName = "folder-a" + folderBName = "b" + folderNamedFolder = "folder" fileAData = []byte(strings.Repeat("a", 33)) fileBData = []byte(strings.Repeat("b", 65)) @@ -121,202 +40,6 @@ var ( readPerm = []string{"read"} ) -func newOneDriveCollection( - t *testing.T, - service path.ServiceType, - pathElements []string, - backupVersion int, -) *onedriveCollection { - return &onedriveCollection{ - service: service, - pathElements: pathElements, - backupVersion: backupVersion, - t: t, - } -} - -type onedriveCollection struct { - service path.ServiceType - pathElements []string - items []itemInfo - aux []itemInfo - backupVersion int - t *testing.T -} - -func (c onedriveCollection) collection() colInfo { - cat := path.FilesCategory - if c.service == path.SharePointService { - cat = path.LibrariesCategory - } - - return colInfo{ - pathElements: c.pathElements, - category: cat, - items: c.items, - auxItems: c.aux, - } -} - -func (c *onedriveCollection) withFile(name string, fileData []byte, perm permData) *onedriveCollection { - switch c.backupVersion { - case 0: - // Lookups will occur using the most recent version of things so we need - // the embedded file name to match that. - c.items = append(c.items, onedriveItemWithData( - c.t, - name, - name+metadata.DataFileSuffix, - fileData)) - - case version.OneDrive1DataAndMetaFiles, 2, version.OneDrive3IsMetaMarker, - version.OneDrive4DirIncludesPermissions, version.OneDrive5DirMetaNoName: - c.items = append(c.items, onedriveItemWithData( - c.t, - name+metadata.DataFileSuffix, - name+metadata.DataFileSuffix, - fileData)) - - md := onedriveMetadata( - c.t, - "", - name+metadata.MetaFileSuffix, - name+metadata.MetaFileSuffix, - perm, - c.backupVersion >= versionPermissionSwitchedToID) - c.items = append(c.items, md) - c.aux = append(c.aux, md) - - case version.OneDrive6NameInMeta, version.OneDrive7LocationRef: - c.items = append(c.items, onedriveItemWithData( - c.t, - name+metadata.DataFileSuffix, - name+metadata.DataFileSuffix, - fileData)) - - md := onedriveMetadata( - c.t, - name, - name+metadata.MetaFileSuffix, - name, - perm, - c.backupVersion >= versionPermissionSwitchedToID) - c.items = append(c.items, md) - c.aux = append(c.aux, md) - - default: - assert.FailNowf(c.t, "bad backup version", "version %d", c.backupVersion) - } - - return c -} - -func (c *onedriveCollection) withFolder(name string, perm permData) *onedriveCollection { - switch c.backupVersion { - case 0, version.OneDrive4DirIncludesPermissions, version.OneDrive5DirMetaNoName, - version.OneDrive6NameInMeta, version.OneDrive7LocationRef: - return c - - case version.OneDrive1DataAndMetaFiles, 2, version.OneDrive3IsMetaMarker: - c.items = append( - c.items, - onedriveMetadata( - c.t, - "", - name+metadata.DirMetaFileSuffix, - name+metadata.DirMetaFileSuffix, - perm, - c.backupVersion >= versionPermissionSwitchedToID)) - - default: - assert.FailNowf(c.t, "bad backup version", "version %d", c.backupVersion) - } - - return c -} - -// withPermissions adds permissions to the folder represented by this -// onedriveCollection. -func (c *onedriveCollection) withPermissions(perm permData) *onedriveCollection { - // These versions didn't store permissions for the folder or didn't store them - // in the folder's collection. - if c.backupVersion < version.OneDrive4DirIncludesPermissions { - return c - } - - name := c.pathElements[len(c.pathElements)-1] - metaName := name - - if c.backupVersion >= version.OneDrive5DirMetaNoName { - // We switched to just .dirmeta for metadata file names. - metaName = "" - } - - if name == "root:" { - return c - } - - md := onedriveMetadata( - c.t, - name, - metaName+metadata.DirMetaFileSuffix, - metaName+metadata.DirMetaFileSuffix, - perm, - c.backupVersion >= versionPermissionSwitchedToID) - - c.items = append(c.items, md) - c.aux = append(c.aux, md) - - return c -} - -type permData struct { - user string // user is only for older versions - entityID string - roles []string - sharingMode onedrive.SharingMode -} - -type itemData struct { - name string - data []byte - perms permData -} - -type onedriveColInfo struct { - pathElements []string - perms permData - files []itemData - folders []itemData -} - -func testDataForInfo( - t *testing.T, - service path.ServiceType, - cols []onedriveColInfo, - backupVersion int, -) []colInfo { - var res []colInfo - - for _, c := range cols { - onedriveCol := newOneDriveCollection(t, service, c.pathElements, backupVersion) - - for _, f := range c.files { - onedriveCol.withFile(f.name, f.data, f.perms) - } - - for _, d := range c.folders { - onedriveCol.withFolder(d.name, d.perms) - } - - onedriveCol.withPermissions(c.perms) - - res = append(res, onedriveCol.collection()) - } - - return res -} - func mustGetDefaultDriveID( t *testing.T, ctx context.Context, //revive:disable-line:context-as-argument @@ -331,9 +54,9 @@ func mustGetDefaultDriveID( switch backupService { case path.OneDriveService: - d, err = service.Client().UsersById(resourceOwner).Drive().Get(ctx, nil) + d, err = service.Client().Users().ByUserId(resourceOwner).Drive().Get(ctx, nil) case path.SharePointService: - d, err = service.Client().SitesById(resourceOwner).Drive().Get(ctx, nil) + d, err = service.Client().Sites().BySiteId(resourceOwner).Drive().Get(ctx, nil) default: assert.FailNowf(t, "unknown service type %s", backupService.String()) } @@ -364,7 +87,7 @@ type suiteInfo interface { // also be a site. BackupResourceOwner() string BackupService() path.ServiceType - Resource() resource + Resource() Resource } type oneDriveSuite interface { @@ -383,7 +106,7 @@ type suiteInfoImpl struct { tertiaryUserID string acct account.Account service path.ServiceType - resourceType resource + resourceType Resource } func (si suiteInfoImpl) Service() graph.Servicer { @@ -418,7 +141,7 @@ func (si suiteInfoImpl) BackupService() path.ServiceType { return si.service } -func (si suiteInfoImpl) Resource() resource { +func (si suiteInfoImpl) Resource() Resource { return si.resourceType } @@ -428,11 +151,6 @@ func (si suiteInfoImpl) Resource() resource { // SharePoint shares most of its libraries implementation with OneDrive so we // only test simple things here and leave the more extensive testing to // OneDrive. -// -// TODO(ashmrtn): SharePoint doesn't have permissions backup/restore enabled -// right now. Adjust the tests here when that is enabled so we have at least -// basic assurances that it's doing the right thing. We can leave the more -// extensive permissions tests to OneDrive as well. type GraphConnectorSharePointIntegrationSuite struct { tester.Suite @@ -483,6 +201,23 @@ func (suite *GraphConnectorSharePointIntegrationSuite) TestRestoreAndBackup_Mult testRestoreAndBackupMultipleFilesAndFoldersNoPermissions(suite, version.Backup) } +func (suite *GraphConnectorSharePointIntegrationSuite) TestPermissionsRestoreAndBackup() { + testPermissionsRestoreAndBackup(suite, version.Backup) +} + +func (suite *GraphConnectorSharePointIntegrationSuite) TestPermissionsBackupAndNoRestore() { + testPermissionsBackupAndNoRestore(suite, version.Backup) +} + +func (suite *GraphConnectorSharePointIntegrationSuite) TestPermissionsInheritanceRestoreAndBackup() { + testPermissionsInheritanceRestoreAndBackup(suite, version.Backup) +} + +func (suite *GraphConnectorSharePointIntegrationSuite) TestRestoreFolderNamedFolderRegression() { + // No reason why it couldn't work with previous versions, but this is when it got introduced. + testRestoreFolderNamedFolderRegression(suite, version.All8MigrateUserPNToID) +} + // --------------------------------------------------------------------------- // OneDrive most recent backup version // --------------------------------------------------------------------------- @@ -542,6 +277,11 @@ func (suite *GraphConnectorOneDriveIntegrationSuite) TestPermissionsInheritanceR testPermissionsInheritanceRestoreAndBackup(suite, version.Backup) } +func (suite *GraphConnectorOneDriveIntegrationSuite) TestRestoreFolderNamedFolderRegression() { + // No reason why it couldn't work with previous versions, but this is when it got introduced. + testRestoreFolderNamedFolderRegression(suite, version.All8MigrateUserPNToID) +} + // --------------------------------------------------------------------------- // OneDrive regression // --------------------------------------------------------------------------- @@ -598,11 +338,15 @@ func (suite *GraphConnectorOneDriveNightlySuite) TestPermissionsBackupAndNoResto } func (suite *GraphConnectorOneDriveNightlySuite) TestPermissionsInheritanceRestoreAndBackup() { - // No reason why it couldn't work with previous versions, but this is when it - // got introduced. + // No reason why it couldn't work with previous versions, but this is when it got introduced. testPermissionsInheritanceRestoreAndBackup(suite, version.OneDrive4DirIncludesPermissions) } +func (suite *GraphConnectorOneDriveNightlySuite) TestRestoreFolderNamedFolderRegression() { + // No reason why it couldn't work with previous versions, but this is when it got introduced. + testRestoreFolderNamedFolderRegression(suite, version.All8MigrateUserPNToID) +} + func testRestoreAndBackupMultipleFilesAndFoldersNoPermissions( suite oneDriveSuite, startVersion int, @@ -616,114 +360,115 @@ func testRestoreAndBackupMultipleFilesAndFoldersNoPermissions( ctx, suite.BackupService(), suite.Service(), - suite.BackupResourceOwner(), - ) + suite.BackupResourceOwner()) rootPath := []string{ - "drives", + odConsts.DrivesPathDir, driveID, - "root:", + odConsts.RootPathDir, } folderAPath := []string{ - "drives", + odConsts.DrivesPathDir, driveID, - "root:", + odConsts.RootPathDir, folderAName, } subfolderBPath := []string{ - "drives", + odConsts.DrivesPathDir, driveID, - "root:", + odConsts.RootPathDir, folderAName, folderBName, } subfolderAPath := []string{ - "drives", + odConsts.DrivesPathDir, driveID, - "root:", + odConsts.RootPathDir, folderAName, folderBName, folderAName, } folderBPath := []string{ - "drives", + odConsts.DrivesPathDir, driveID, - "root:", + odConsts.RootPathDir, folderBName, } - cols := []onedriveColInfo{ + cols := []OnedriveColInfo{ { - pathElements: rootPath, - files: []itemData{ + PathElements: rootPath, + Files: []ItemData{ { - name: fileName, - data: fileAData, + Name: fileName, + Data: fileAData, }, }, - folders: []itemData{ + Folders: []ItemData{ { - name: folderAName, + Name: folderAName, }, { - name: folderBName, + Name: folderBName, }, }, }, { - pathElements: folderAPath, - files: []itemData{ + PathElements: folderAPath, + Files: []ItemData{ { - name: fileName, - data: fileBData, + Name: fileName, + Data: fileBData, }, }, - folders: []itemData{ + Folders: []ItemData{ { - name: folderBName, + Name: folderBName, }, }, }, { - pathElements: subfolderBPath, - files: []itemData{ + PathElements: subfolderBPath, + Files: []ItemData{ { - name: fileName, - data: fileCData, + Name: fileName, + Data: fileCData, }, }, - folders: []itemData{ + Folders: []ItemData{ { - name: folderAName, + Name: folderAName, }, }, }, { - pathElements: subfolderAPath, - files: []itemData{ + PathElements: subfolderAPath, + Files: []ItemData{ { - name: fileName, - data: fileDData, + Name: fileName, + Data: fileDData, }, }, }, { - pathElements: folderBPath, - files: []itemData{ + PathElements: folderBPath, + Files: []ItemData{ { - name: fileName, - data: fileEData, + Name: fileName, + Data: fileEData, }, }, }, } - expected := testDataForInfo(suite.T(), suite.BackupService(), cols, version.Backup) + expected, err := DataForInfo(suite.BackupService(), cols, version.Backup) + require.NoError(suite.T(), err) for vn := startVersion; vn <= version.Backup; vn++ { suite.Run(fmt.Sprintf("Version%d", vn), func() { t := suite.T() - input := testDataForInfo(t, suite.BackupService(), cols, vn) + input, err := DataForInfo(suite.BackupService(), cols, vn) + require.NoError(suite.T(), err) testData := restoreBackupInfoMultiVersion{ service: suite.BackupService(), @@ -742,8 +487,7 @@ func testRestoreAndBackupMultipleFilesAndFoldersNoPermissions( control.Options{ RestorePermissions: true, ToggleFeatures: control.Toggles{}, - }, - ) + }) }) } } @@ -760,109 +504,108 @@ func testPermissionsRestoreAndBackup(suite oneDriveSuite, startVersion int) { ctx, suite.BackupService(), suite.Service(), - suite.BackupResourceOwner(), - ) + suite.BackupResourceOwner()) fileName2 := "test-file2.txt" folderCName := "folder-c" rootPath := []string{ - "drives", + odConsts.DrivesPathDir, driveID, - "root:", + odConsts.RootPathDir, } folderAPath := []string{ - "drives", + odConsts.DrivesPathDir, driveID, - "root:", + odConsts.RootPathDir, folderAName, } folderBPath := []string{ - "drives", + odConsts.DrivesPathDir, driveID, - "root:", + odConsts.RootPathDir, folderBName, } // For skipped test // subfolderAPath := []string{ - // "drives", + // odConsts.DrivesPathDir, // driveID, - // "root:", + // odConsts.RootPathDir, // folderBName, // folderAName, // } folderCPath := []string{ - "drives", + odConsts.DrivesPathDir, driveID, - "root:", + odConsts.RootPathDir, folderCName, } - cols := []onedriveColInfo{ + cols := []OnedriveColInfo{ { - pathElements: rootPath, - files: []itemData{ + PathElements: rootPath, + Files: []ItemData{ { // Test restoring a file that doesn't inherit permissions. - name: fileName, - data: fileAData, - perms: permData{ - user: secondaryUserName, - entityID: secondaryUserID, - roles: writePerm, + Name: fileName, + Data: fileAData, + Perms: PermData{ + User: secondaryUserName, + EntityID: secondaryUserID, + Roles: writePerm, }, }, { // Test restoring a file that doesn't inherit permissions and has // no permissions. - name: fileName2, - data: fileBData, + Name: fileName2, + Data: fileBData, }, }, - folders: []itemData{ + Folders: []ItemData{ { - name: folderBName, + Name: folderBName, }, { - name: folderAName, - perms: permData{ - user: secondaryUserName, - entityID: secondaryUserID, - roles: readPerm, + Name: folderAName, + Perms: PermData{ + User: secondaryUserName, + EntityID: secondaryUserID, + Roles: readPerm, }, }, { - name: folderCName, - perms: permData{ - user: secondaryUserName, - entityID: secondaryUserID, - roles: readPerm, + Name: folderCName, + Perms: PermData{ + User: secondaryUserName, + EntityID: secondaryUserID, + Roles: readPerm, }, }, }, }, { - pathElements: folderBPath, - files: []itemData{ + PathElements: folderBPath, + Files: []ItemData{ { // Test restoring a file in a non-root folder that doesn't inherit // permissions. - name: fileName, - data: fileBData, - perms: permData{ - user: secondaryUserName, - entityID: secondaryUserID, - roles: writePerm, + Name: fileName, + Data: fileBData, + Perms: PermData{ + User: secondaryUserName, + EntityID: secondaryUserID, + Roles: writePerm, }, }, }, - folders: []itemData{ + Folders: []ItemData{ { - name: folderAName, - perms: permData{ - user: secondaryUserName, - entityID: secondaryUserID, - roles: readPerm, + Name: folderAName, + Perms: PermData{ + User: secondaryUserName, + EntityID: secondaryUserID, + Roles: readPerm, }, }, }, @@ -884,60 +627,63 @@ func testPermissionsRestoreAndBackup(suite oneDriveSuite, startVersion int) { // }, // }, // }, - // perms: permData{ - // user: secondaryUserName, - // entityID: secondaryUserID, - // roles: readPerm, + // Perms: PermData{ + // User: secondaryUserName, + // EntityID: secondaryUserID, + // Roles: readPerm, // }, // }, { // Tests a folder that has permissions with an item in the folder with // the different permissions. - pathElements: folderAPath, - files: []itemData{ + PathElements: folderAPath, + Files: []ItemData{ { - name: fileName, - data: fileEData, - perms: permData{ - user: secondaryUserName, - entityID: secondaryUserID, - roles: writePerm, + Name: fileName, + Data: fileEData, + Perms: PermData{ + User: secondaryUserName, + EntityID: secondaryUserID, + Roles: writePerm, }, }, }, - perms: permData{ - user: secondaryUserName, - entityID: secondaryUserID, - roles: readPerm, + Perms: PermData{ + User: secondaryUserName, + EntityID: secondaryUserID, + Roles: readPerm, }, }, { // Tests a folder that has permissions with an item in the folder with // no permissions. - pathElements: folderCPath, - files: []itemData{ + PathElements: folderCPath, + Files: []ItemData{ { - name: fileName, - data: fileAData, + Name: fileName, + Data: fileAData, }, }, - perms: permData{ - user: secondaryUserName, - entityID: secondaryUserID, - roles: readPerm, + Perms: PermData{ + User: secondaryUserName, + EntityID: secondaryUserID, + Roles: readPerm, }, }, } - expected := testDataForInfo(suite.T(), suite.BackupService(), cols, version.Backup) + expected, err := DataForInfo(suite.BackupService(), cols, version.Backup) + require.NoError(suite.T(), err) + bss := suite.BackupService().String() for vn := startVersion; vn <= version.Backup; vn++ { - suite.Run(fmt.Sprintf("Version%d", vn), func() { + suite.Run(fmt.Sprintf("%s-Version%d", bss, vn), func() { t := suite.T() // Ideally this can always be true or false and still // work, but limiting older versions to use emails so as // to validate that flow as well. - input := testDataForInfo(t, suite.BackupService(), cols, vn) + input, err := DataForInfo(suite.BackupService(), cols, vn) + require.NoError(suite.T(), err) testData := restoreBackupInfoMultiVersion{ service: suite.BackupService(), @@ -956,8 +702,7 @@ func testPermissionsRestoreAndBackup(suite oneDriveSuite, startVersion int) { control.Options{ RestorePermissions: true, ToggleFeatures: control.Toggles{}, - }, - ) + }) }) } } @@ -974,53 +719,55 @@ func testPermissionsBackupAndNoRestore(suite oneDriveSuite, startVersion int) { ctx, suite.BackupService(), suite.Service(), - suite.BackupResourceOwner(), - ) + suite.BackupResourceOwner()) - inputCols := []onedriveColInfo{ + inputCols := []OnedriveColInfo{ { - pathElements: []string{ - "drives", + PathElements: []string{ + odConsts.DrivesPathDir, driveID, - "root:", + odConsts.RootPathDir, }, - files: []itemData{ + Files: []ItemData{ { - name: fileName, - data: fileAData, - perms: permData{ - user: secondaryUserName, - entityID: secondaryUserID, - roles: writePerm, + Name: fileName, + Data: fileAData, + Perms: PermData{ + User: secondaryUserName, + EntityID: secondaryUserID, + Roles: writePerm, }, }, }, }, } - expectedCols := []onedriveColInfo{ + expectedCols := []OnedriveColInfo{ { - pathElements: []string{ - "drives", + PathElements: []string{ + odConsts.DrivesPathDir, driveID, - "root:", + odConsts.RootPathDir, }, - files: []itemData{ + Files: []ItemData{ { // No permissions on the output since they weren't restored. - name: fileName, - data: fileAData, + Name: fileName, + Data: fileAData, }, }, }, } - expected := testDataForInfo(suite.T(), suite.BackupService(), expectedCols, version.Backup) + expected, err := DataForInfo(suite.BackupService(), expectedCols, version.Backup) + require.NoError(suite.T(), err) + bss := suite.BackupService().String() for vn := startVersion; vn <= version.Backup; vn++ { - suite.Run(fmt.Sprintf("Version%d", vn), func() { + suite.Run(fmt.Sprintf("%s-Version%d", bss, vn), func() { t := suite.T() - input := testDataForInfo(t, suite.BackupService(), inputCols, vn) + input, err := DataForInfo(suite.BackupService(), inputCols, vn) + require.NoError(suite.T(), err) testData := restoreBackupInfoMultiVersion{ service: suite.BackupService(), @@ -1039,8 +786,7 @@ func testPermissionsBackupAndNoRestore(suite oneDriveSuite, startVersion int) { control.Options{ RestorePermissions: false, ToggleFeatures: control.Toggles{}, - }, - ) + }) }) } } @@ -1060,69 +806,68 @@ func testPermissionsInheritanceRestoreAndBackup(suite oneDriveSuite, startVersio ctx, suite.BackupService(), suite.Service(), - suite.BackupResourceOwner(), - ) + suite.BackupResourceOwner()) folderAName := "custom" folderBName := "inherited" folderCName := "empty" rootPath := []string{ - "drives", + odConsts.DrivesPathDir, driveID, - "root:", + odConsts.RootPathDir, } folderAPath := []string{ - "drives", + odConsts.DrivesPathDir, driveID, - "root:", + odConsts.RootPathDir, folderAName, } subfolderAAPath := []string{ - "drives", + odConsts.DrivesPathDir, driveID, - "root:", + odConsts.RootPathDir, folderAName, folderAName, } subfolderABPath := []string{ - "drives", + odConsts.DrivesPathDir, driveID, - "root:", + odConsts.RootPathDir, folderAName, folderBName, } subfolderACPath := []string{ - "drives", + odConsts.DrivesPathDir, driveID, - "root:", + odConsts.RootPathDir, folderAName, folderCName, } - fileSet := []itemData{ + fileSet := []ItemData{ { - name: "file-custom", - data: fileAData, - perms: permData{ - user: secondaryUserName, - entityID: secondaryUserID, - roles: writePerm, - sharingMode: onedrive.SharingModeCustom, + Name: "file-custom", + Data: fileAData, + Perms: PermData{ + User: secondaryUserName, + EntityID: secondaryUserID, + Roles: writePerm, + SharingMode: metadata.SharingModeCustom, }, }, { - name: "file-inherited", - data: fileAData, - perms: permData{ - sharingMode: onedrive.SharingModeInherited, + Name: "file-inherited", + Data: fileAData, + Perms: PermData{ + SharingMode: metadata.SharingModeInherited, }, }, { - name: "file-empty", - data: fileAData, - perms: permData{ - sharingMode: onedrive.SharingModeCustom, + Name: "file-empty", + Data: fileAData, + Perms: PermData{ + SharingMode: metadata.SharingModeCustom, }, }, } @@ -1145,63 +890,66 @@ func testPermissionsInheritanceRestoreAndBackup(suite oneDriveSuite, startVersio // - inherted-permission-file // - empty-permission-file (empty/empty might have interesting behavior) - cols := []onedriveColInfo{ + cols := []OnedriveColInfo{ { - pathElements: rootPath, - files: []itemData{}, - folders: []itemData{ - {name: folderAName}, + PathElements: rootPath, + Files: []ItemData{}, + Folders: []ItemData{ + {Name: folderAName}, }, }, { - pathElements: folderAPath, - files: fileSet, - folders: []itemData{ - {name: folderAName}, - {name: folderBName}, - {name: folderCName}, + PathElements: folderAPath, + Files: fileSet, + Folders: []ItemData{ + {Name: folderAName}, + {Name: folderBName}, + {Name: folderCName}, }, - perms: permData{ - user: tertiaryUserName, - entityID: tertiaryUserID, - roles: readPerm, + Perms: PermData{ + User: tertiaryUserName, + EntityID: tertiaryUserID, + Roles: readPerm, }, }, { - pathElements: subfolderAAPath, - files: fileSet, - perms: permData{ - user: tertiaryUserName, - entityID: tertiaryUserID, - roles: writePerm, - sharingMode: onedrive.SharingModeCustom, + PathElements: subfolderAAPath, + Files: fileSet, + Perms: PermData{ + User: tertiaryUserName, + EntityID: tertiaryUserID, + Roles: writePerm, + SharingMode: metadata.SharingModeCustom, }, }, { - pathElements: subfolderABPath, - files: fileSet, - perms: permData{ - sharingMode: onedrive.SharingModeInherited, + PathElements: subfolderABPath, + Files: fileSet, + Perms: PermData{ + SharingMode: metadata.SharingModeInherited, }, }, { - pathElements: subfolderACPath, - files: fileSet, - perms: permData{ - sharingMode: onedrive.SharingModeCustom, + PathElements: subfolderACPath, + Files: fileSet, + Perms: PermData{ + SharingMode: metadata.SharingModeCustom, }, }, } - expected := testDataForInfo(suite.T(), suite.BackupService(), cols, version.Backup) + expected, err := DataForInfo(suite.BackupService(), cols, version.Backup) + require.NoError(suite.T(), err) + bss := suite.BackupService().String() for vn := startVersion; vn <= version.Backup; vn++ { - suite.Run(fmt.Sprintf("Version%d", vn), func() { + suite.Run(fmt.Sprintf("%s-Version%d", bss, vn), func() { t := suite.T() // Ideally this can always be true or false and still // work, but limiting older versions to use emails so as // to validate that flow as well. - input := testDataForInfo(t, suite.BackupService(), cols, vn) + input, err := DataForInfo(suite.BackupService(), cols, vn) + require.NoError(suite.T(), err) testData := restoreBackupInfoMultiVersion{ service: suite.BackupService(), @@ -1212,6 +960,120 @@ func testPermissionsInheritanceRestoreAndBackup(suite oneDriveSuite, startVersio } runRestoreBackupTestVersions( + t, + suite.Account(), + testData, + suite.Tenant(), + []string{suite.BackupResourceOwner()}, + control.Options{ + RestorePermissions: true, + ToggleFeatures: control.Toggles{}, + }) + }) + } +} + +func testRestoreFolderNamedFolderRegression( + suite oneDriveSuite, + startVersion int, +) { + ctx, flush := tester.NewContext() + defer flush() + + // Get the default drive ID for the test user. + driveID := mustGetDefaultDriveID( + suite.T(), + ctx, + suite.BackupService(), + suite.Service(), + suite.BackupResourceOwner()) + + rootPath := []string{ + odConsts.DrivesPathDir, + driveID, + odConsts.RootPathDir, + } + folderFolderPath := []string{ + odConsts.DrivesPathDir, + driveID, + odConsts.RootPathDir, + folderNamedFolder, + } + subfolderPath := []string{ + odConsts.DrivesPathDir, + driveID, + odConsts.RootPathDir, + folderNamedFolder, + folderBName, + } + + cols := []OnedriveColInfo{ + { + PathElements: rootPath, + Files: []ItemData{ + { + Name: fileName, + Data: fileAData, + }, + }, + Folders: []ItemData{ + { + Name: folderNamedFolder, + }, + { + Name: folderBName, + }, + }, + }, + { + PathElements: folderFolderPath, + Files: []ItemData{ + { + Name: fileName, + Data: fileBData, + }, + }, + Folders: []ItemData{ + { + Name: folderBName, + }, + }, + }, + { + PathElements: subfolderPath, + Files: []ItemData{ + { + Name: fileName, + Data: fileCData, + }, + }, + Folders: []ItemData{ + { + Name: folderNamedFolder, + }, + }, + }, + } + + expected, err := DataForInfo(suite.BackupService(), cols, version.Backup) + require.NoError(suite.T(), err) + bss := suite.BackupService().String() + + for vn := startVersion; vn <= version.Backup; vn++ { + suite.Run(fmt.Sprintf("%s-Version%d", bss, vn), func() { + t := suite.T() + input, err := DataForInfo(suite.BackupService(), cols, vn) + require.NoError(suite.T(), err) + + testData := restoreBackupInfoMultiVersion{ + service: suite.BackupService(), + resource: suite.Resource(), + backupVersion: vn, + collectionsPrevious: input, + collectionsLatest: expected, + } + + runRestoreTestWithVerion( t, suite.Account(), testData, diff --git a/src/internal/connector/graph_connector_onedrive_test_helper.go b/src/internal/connector/graph_connector_onedrive_test_helper.go new file mode 100644 index 000000000..38b760b1a --- /dev/null +++ b/src/internal/connector/graph_connector_onedrive_test_helper.go @@ -0,0 +1,358 @@ +package connector + +import ( + "encoding/json" + "fmt" + + "github.com/alcionai/clues" + "github.com/google/uuid" + "golang.org/x/exp/maps" + + odConsts "github.com/alcionai/corso/src/internal/connector/onedrive/consts" + "github.com/alcionai/corso/src/internal/connector/onedrive/metadata" + "github.com/alcionai/corso/src/internal/data" + "github.com/alcionai/corso/src/internal/version" + "github.com/alcionai/corso/src/pkg/path" +) + +// For any version post this(inclusive), we expect to be using IDs for +// permission instead of email +const versionPermissionSwitchedToID = version.OneDrive4DirIncludesPermissions + +func getMetadata(fileName string, perm PermData, permUseID bool) metadata.Metadata { + if len(perm.User) == 0 || len(perm.Roles) == 0 || + perm.SharingMode != metadata.SharingModeCustom { + return metadata.Metadata{ + FileName: fileName, + SharingMode: perm.SharingMode, + } + } + + // In case of permissions, the id will usually be same for same + // user/role combo unless deleted and readded, but we have to do + // this as we only have two users of which one is already taken. + id := uuid.NewString() + uperm := metadata.Permission{ID: id, Roles: perm.Roles} + + if permUseID { + uperm.EntityID = perm.EntityID + } else { + uperm.Email = perm.User + } + + testMeta := metadata.Metadata{ + FileName: fileName, + Permissions: []metadata.Permission{uperm}, + } + + return testMeta +} + +type PermData struct { + User string // user is only for older versions + EntityID string + Roles []string + SharingMode metadata.SharingMode +} + +type ItemData struct { + Name string + Data []byte + Perms PermData +} + +type OnedriveColInfo struct { + PathElements []string + Perms PermData + Files []ItemData + Folders []ItemData +} + +type onedriveCollection struct { + service path.ServiceType + PathElements []string + items []ItemInfo + aux []ItemInfo + backupVersion int +} + +func (c onedriveCollection) collection() ColInfo { + cat := path.FilesCategory + if c.service == path.SharePointService { + cat = path.LibrariesCategory + } + + return ColInfo{ + PathElements: c.PathElements, + Category: cat, + Items: c.items, + AuxItems: c.aux, + } +} + +func NewOneDriveCollection( + service path.ServiceType, + PathElements []string, + backupVersion int, +) *onedriveCollection { + return &onedriveCollection{ + service: service, + PathElements: PathElements, + backupVersion: backupVersion, + } +} + +func DataForInfo( + service path.ServiceType, + cols []OnedriveColInfo, + backupVersion int, +) ([]ColInfo, error) { + var ( + res []ColInfo + err error + ) + + for _, c := range cols { + onedriveCol := NewOneDriveCollection(service, c.PathElements, backupVersion) + + for _, f := range c.Files { + _, err = onedriveCol.withFile(f.Name, f.Data, f.Perms) + if err != nil { + return res, err + } + } + + for _, d := range c.Folders { + _, err = onedriveCol.withFolder(d.Name, d.Perms) + if err != nil { + return res, err + } + } + + _, err = onedriveCol.withPermissions(c.Perms) + if err != nil { + return res, err + } + + res = append(res, onedriveCol.collection()) + } + + return res, nil +} + +func (c *onedriveCollection) withFile(name string, fileData []byte, perm PermData) (*onedriveCollection, error) { + switch c.backupVersion { + case 0: + // Lookups will occur using the most recent version of things so we need + // the embedded file name to match that. + item, err := onedriveItemWithData( + name, + name+metadata.DataFileSuffix, + fileData) + if err != nil { + return c, err + } + + c.items = append(c.items, item) + + // v1-5, early metadata design + case version.OneDrive1DataAndMetaFiles, 2, version.OneDrive3IsMetaMarker, + version.OneDrive4DirIncludesPermissions, version.OneDrive5DirMetaNoName: + items, err := onedriveItemWithData( + name+metadata.DataFileSuffix, + name+metadata.DataFileSuffix, + fileData) + if err != nil { + return c, err + } + + c.items = append(c.items, items) + + md, err := onedriveMetadata( + "", + name+metadata.MetaFileSuffix, + name+metadata.MetaFileSuffix, + perm, + c.backupVersion >= versionPermissionSwitchedToID) + if err != nil { + return c, err + } + + c.items = append(c.items, md) + c.aux = append(c.aux, md) + + // v6+ current metadata design + case version.OneDrive6NameInMeta, version.OneDrive7LocationRef, version.All8MigrateUserPNToID: + item, err := onedriveItemWithData( + name+metadata.DataFileSuffix, + name+metadata.DataFileSuffix, + fileData) + if err != nil { + return c, err + } + + c.items = append(c.items, item) + + md, err := onedriveMetadata( + name, + name+metadata.MetaFileSuffix, + name, + perm, + c.backupVersion >= versionPermissionSwitchedToID) + if err != nil { + return c, err + } + + c.items = append(c.items, md) + c.aux = append(c.aux, md) + + default: + return c, clues.New(fmt.Sprintf("bad backup version. version %d", c.backupVersion)) + } + + return c, nil +} + +func (c *onedriveCollection) withFolder(name string, perm PermData) (*onedriveCollection, error) { + switch c.backupVersion { + case 0, version.OneDrive4DirIncludesPermissions, version.OneDrive5DirMetaNoName, + version.OneDrive6NameInMeta, version.OneDrive7LocationRef, version.All8MigrateUserPNToID: + return c, nil + + case version.OneDrive1DataAndMetaFiles, 2, version.OneDrive3IsMetaMarker: + item, err := onedriveMetadata( + "", + name+metadata.DirMetaFileSuffix, + name+metadata.DirMetaFileSuffix, + perm, + c.backupVersion >= versionPermissionSwitchedToID) + + c.items = append(c.items, item) + + if err != nil { + return c, err + } + + default: + return c, clues.New(fmt.Sprintf("bad backup version.version %d", c.backupVersion)) + } + + return c, nil +} + +// withPermissions adds permissions to the folder represented by this +// onedriveCollection. +func (c *onedriveCollection) withPermissions(perm PermData) (*onedriveCollection, error) { + // These versions didn't store permissions for the folder or didn't store them + // in the folder's collection. + if c.backupVersion < version.OneDrive4DirIncludesPermissions { + return c, nil + } + + name := c.PathElements[len(c.PathElements)-1] + metaName := name + + if c.backupVersion >= version.OneDrive5DirMetaNoName { + // We switched to just .dirmeta for metadata file names. + metaName = "" + } + + if name == odConsts.RootPathDir { + return c, nil + } + + md, err := onedriveMetadata( + name, + metaName+metadata.DirMetaFileSuffix, + metaName+metadata.DirMetaFileSuffix, + perm, + c.backupVersion >= versionPermissionSwitchedToID) + if err != nil { + return c, err + } + + c.items = append(c.items, md) + c.aux = append(c.aux, md) + + return c, err +} + +type testOneDriveData struct { + FileName string `json:"fileName,omitempty"` + Data []byte `json:"data,omitempty"` +} + +func onedriveItemWithData( + name, lookupKey string, + fileData []byte, +) (ItemInfo, error) { + content := testOneDriveData{ + FileName: lookupKey, + Data: fileData, + } + + serialized, err := json.Marshal(content) + if err != nil { + return ItemInfo{}, clues.Stack(err) + } + + return ItemInfo{ + name: name, + data: serialized, + lookupKey: lookupKey, + }, nil +} + +func onedriveMetadata( + fileName, itemID, lookupKey string, + perm PermData, + permUseID bool, +) (ItemInfo, error) { + testMeta := getMetadata(fileName, perm, permUseID) + + testMetaJSON, err := json.Marshal(testMeta) + if err != nil { + return ItemInfo{}, clues.Wrap(err, "marshalling metadata") + } + + return ItemInfo{ + name: itemID, + data: testMetaJSON, + lookupKey: lookupKey, + }, nil +} + +func GetCollectionsAndExpected( + config ConfigInfo, + testCollections []ColInfo, + backupVersion int, +) (int, int, []data.RestoreCollection, map[string]map[string][]byte, error) { + var ( + collections []data.RestoreCollection + expectedData = map[string]map[string][]byte{} + totalItems = 0 + totalKopiaItems = 0 + ) + + for _, owner := range config.ResourceOwners { + numItems, kopiaItems, ownerCollections, userExpectedData, err := collectionsForInfo( + config.Service, + config.Tenant, + owner, + config.Dest, + testCollections, + backupVersion, + ) + if err != nil { + return totalItems, totalKopiaItems, collections, expectedData, err + } + + collections = append(collections, ownerCollections...) + totalItems += numItems + totalKopiaItems += kopiaItems + + maps.Copy(expectedData, userExpectedData) + } + + return totalItems, totalKopiaItems, collections, expectedData, nil +} diff --git a/src/internal/connector/graph_connector_test.go b/src/internal/connector/graph_connector_test.go index 123f3f959..e8906d35b 100644 --- a/src/internal/connector/graph_connector_test.go +++ b/src/internal/connector/graph_connector_test.go @@ -11,9 +11,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "golang.org/x/exp/maps" - "github.com/alcionai/corso/src/internal/common" + inMock "github.com/alcionai/corso/src/internal/common/idname/mock" exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock" "github.com/alcionai/corso/src/internal/connector/mock" "github.com/alcionai/corso/src/internal/connector/support" @@ -58,7 +57,7 @@ func (suite *GraphConnectorUnitSuite) TestPopulateOwnerIDAndNamesFrom() { table := []struct { name string owner string - ins common.IDsNames + ins inMock.Cache rc *resourceClient expectID string expectName string @@ -81,108 +80,81 @@ func (suite *GraphConnectorUnitSuite) TestPopulateOwnerIDAndNamesFrom() { expectErr: require.Error, }, { - name: "only id map with owner id", - owner: id, - ins: common.IDsNames{ - IDToName: itn, - NameToID: nil, - }, + name: "only id map with owner id", + owner: id, + ins: inMock.NewCache(itn, nil), rc: noLookup, expectID: id, expectName: name, expectErr: require.NoError, }, { - name: "only name map with owner id", - owner: id, - ins: common.IDsNames{ - IDToName: nil, - NameToID: nti, - }, + name: "only name map with owner id", + owner: id, + ins: inMock.NewCache(nil, nti), rc: noLookup, expectID: "", expectName: "", expectErr: require.Error, }, { - name: "only name map with owner id and lookup", - owner: id, - ins: common.IDsNames{ - IDToName: nil, - NameToID: nti, - }, + name: "only name map with owner id and lookup", + owner: id, + ins: inMock.NewCache(nil, nti), rc: lookup, expectID: id, expectName: name, expectErr: require.NoError, }, { - name: "only id map with owner name", - owner: name, - ins: common.IDsNames{ - IDToName: itn, - NameToID: nil, - }, + name: "only id map with owner name", + owner: name, + ins: inMock.NewCache(itn, nil), rc: lookup, expectID: id, expectName: name, expectErr: require.NoError, }, { - name: "only name map with owner name", - owner: name, - ins: common.IDsNames{ - IDToName: nil, - NameToID: nti, - }, + name: "only name map with owner name", + owner: name, + ins: inMock.NewCache(nil, nti), rc: noLookup, expectID: id, expectName: name, expectErr: require.NoError, }, { - name: "only id map with owner name", - owner: name, - ins: common.IDsNames{ - IDToName: itn, - NameToID: nil, - }, + name: "only id map with owner name", + owner: name, + ins: inMock.NewCache(itn, nil), rc: noLookup, expectID: "", expectName: "", expectErr: require.Error, }, { - name: "only id map with owner name and lookup", - owner: name, - ins: common.IDsNames{ - IDToName: itn, - NameToID: nil, - }, + name: "only id map with owner name and lookup", + owner: name, + ins: inMock.NewCache(itn, nil), rc: lookup, expectID: id, expectName: name, expectErr: require.NoError, }, { - name: "both maps with owner id", - owner: id, - ins: common.IDsNames{ - IDToName: itn, - NameToID: nti, - }, + name: "both maps with owner id", + owner: id, + ins: inMock.NewCache(itn, nti), rc: noLookup, expectID: id, expectName: name, expectErr: require.NoError, }, { - name: "both maps with owner name", - owner: name, - ins: common.IDsNames{ - IDToName: itn, - NameToID: nti, - }, + name: "both maps with owner name", + owner: name, + ins: inMock.NewCache(itn, nti), rc: noLookup, expectID: id, expectName: name, @@ -191,10 +163,9 @@ func (suite *GraphConnectorUnitSuite) TestPopulateOwnerIDAndNamesFrom() { { name: "non-matching maps with owner id", owner: id, - ins: common.IDsNames{ - IDToName: map[string]string{"foo": "bar"}, - NameToID: map[string]string{"fnords": "smarf"}, - }, + ins: inMock.NewCache( + map[string]string{"foo": "bar"}, + map[string]string{"fnords": "smarf"}), rc: noLookup, expectID: "", expectName: "", @@ -203,10 +174,9 @@ func (suite *GraphConnectorUnitSuite) TestPopulateOwnerIDAndNamesFrom() { { name: "non-matching with owner name", owner: name, - ins: common.IDsNames{ - IDToName: map[string]string{"foo": "bar"}, - NameToID: map[string]string{"fnords": "smarf"}, - }, + ins: inMock.NewCache( + map[string]string{"foo": "bar"}, + map[string]string{"fnords": "smarf"}), rc: noLookup, expectID: "", expectName: "", @@ -215,10 +185,9 @@ func (suite *GraphConnectorUnitSuite) TestPopulateOwnerIDAndNamesFrom() { { name: "non-matching maps with owner id and lookup", owner: id, - ins: common.IDsNames{ - IDToName: map[string]string{"foo": "bar"}, - NameToID: map[string]string{"fnords": "smarf"}, - }, + ins: inMock.NewCache( + map[string]string{"foo": "bar"}, + map[string]string{"fnords": "smarf"}), rc: lookup, expectID: id, expectName: name, @@ -227,10 +196,9 @@ func (suite *GraphConnectorUnitSuite) TestPopulateOwnerIDAndNamesFrom() { { name: "non-matching with owner name and lookup", owner: name, - ins: common.IDsNames{ - IDToName: map[string]string{"foo": "bar"}, - NameToID: map[string]string{"fnords": "smarf"}, - }, + ins: inMock.NewCache( + map[string]string{"foo": "bar"}, + map[string]string{"fnords": "smarf"}), rc: lookup, expectID: id, expectName: name, @@ -326,7 +294,7 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreFailsBadService() { var ( t = suite.T() acct = tester.NewM365Account(t) - dest = tester.DefaultTestRestoreDestination() + dest = tester.DefaultTestRestoreDestination("") sel = selectors.Selector{ Service: selectors.ServiceUnknown, } @@ -354,7 +322,7 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreFailsBadService() { } func (suite *GraphConnectorIntegrationSuite) TestEmptyCollections() { - dest := tester.DefaultTestRestoreDestination() + dest := tester.DefaultTestRestoreDestination("") table := []struct { name string col []data.RestoreCollection @@ -438,66 +406,30 @@ func (suite *GraphConnectorIntegrationSuite) TestEmptyCollections() { // Exchange Functions //------------------------------------------------------------- -func getCollectionsAndExpected( - t *testing.T, - config configInfo, - testCollections []colInfo, - backupVersion int, -) (int, int, []data.RestoreCollection, map[string]map[string][]byte) { - t.Helper() - - var ( - collections []data.RestoreCollection - expectedData = map[string]map[string][]byte{} - totalItems = 0 - totalKopiaItems = 0 - ) - - for _, owner := range config.resourceOwners { - numItems, kopiaItems, ownerCollections, userExpectedData := collectionsForInfo( - t, - config.service, - config.tenant, - owner, - config.dest, - testCollections, - backupVersion, - ) - - collections = append(collections, ownerCollections...) - totalItems += numItems - totalKopiaItems += kopiaItems - - maps.Copy(expectedData, userExpectedData) - } - - return totalItems, totalKopiaItems, collections, expectedData -} - func runRestore( t *testing.T, ctx context.Context, //revive:disable-line:context-as-argument - config configInfo, + config ConfigInfo, backupVersion int, collections []data.RestoreCollection, numRestoreItems int, ) { t.Logf( "Restoring collections to %s for resourceOwners(s) %v\n", - config.dest.ContainerName, - config.resourceOwners) + config.Dest.ContainerName, + config.ResourceOwners) start := time.Now() - restoreGC := loadConnector(ctx, t, config.resource) - restoreSel := getSelectorWith(t, config.service, config.resourceOwners, true) + restoreGC := loadConnector(ctx, t, config.Resource) + restoreSel := getSelectorWith(t, config.Service, config.ResourceOwners, true) deets, err := restoreGC.ConsumeRestoreCollections( ctx, backupVersion, - config.acct, + config.Acct, restoreSel, - config.dest, - config.opts, + config.Dest, + config.Opts, collections, fault.New(true)) require.NoError(t, err, clues.ToCore(err)) @@ -521,30 +453,30 @@ func runRestore( func runBackupAndCompare( t *testing.T, ctx context.Context, //revive:disable-line:context-as-argument - config configInfo, + config ConfigInfo, expectedData map[string]map[string][]byte, totalItems int, totalKopiaItems int, - inputCollections []colInfo, + inputCollections []ColInfo, ) { t.Helper() // Run a backup and compare its output with what we put in. cats := make(map[path.CategoryType]struct{}, len(inputCollections)) for _, c := range inputCollections { - cats[c.category] = struct{}{} + cats[c.Category] = struct{}{} } var ( - expectedDests = make([]destAndCats, 0, len(config.resourceOwners)) + expectedDests = make([]destAndCats, 0, len(config.ResourceOwners)) idToName = map[string]string{} nameToID = map[string]string{} ) - for _, ro := range config.resourceOwners { + for _, ro := range config.ResourceOwners { expectedDests = append(expectedDests, destAndCats{ resourceOwner: ro, - dest: config.dest.ContainerName, + dest: config.Dest.ContainerName, cats: cats, }) @@ -552,10 +484,10 @@ func runBackupAndCompare( nameToID[ro] = ro } - backupGC := loadConnector(ctx, t, config.resource) - backupGC.IDNameLookup = common.IDsNames{IDToName: idToName, NameToID: nameToID} + backupGC := loadConnector(ctx, t, config.Resource) + backupGC.IDNameLookup = inMock.NewCache(idToName, nameToID) - backupSel := backupSelectorForExpected(t, config.service, expectedDests) + backupSel := backupSelectorForExpected(t, config.Service, expectedDests) t.Logf("Selective backup of %s\n", backupSel) start := time.Now() @@ -564,11 +496,12 @@ func runBackupAndCompare( backupSel, backupSel, nil, - config.opts, + version.NoBackup, + config.Opts, fault.New(true)) require.NoError(t, err, clues.ToCore(err)) // No excludes yet because this isn't an incremental backup. - assert.Empty(t, excludes) + assert.True(t, excludes.Empty()) t.Logf("Backup enumeration complete in %v\n", time.Since(start)) @@ -580,8 +513,7 @@ func runBackupAndCompare( totalKopiaItems, expectedData, dcs, - config.dest, - config.opts.RestorePermissions) + config) status := backupGC.Wait() @@ -602,22 +534,23 @@ func runRestoreBackupTest( ctx, flush := tester.NewContext() defer flush() - config := configInfo{ - acct: acct, - opts: opts, - resource: test.resource, - service: test.service, - tenant: tenant, - resourceOwners: resourceOwners, - dest: tester.DefaultTestRestoreDestination(), + config := ConfigInfo{ + Acct: acct, + Opts: opts, + Resource: test.resource, + Service: test.service, + Tenant: tenant, + ResourceOwners: resourceOwners, + Dest: tester.DefaultTestRestoreDestination(""), } - totalItems, totalKopiaItems, collections, expectedData := getCollectionsAndExpected( - t, + totalItems, totalKopiaItems, collections, expectedData, err := GetCollectionsAndExpected( config, test.collections, version.Backup) + require.NoError(t, err) + runRestore( t, ctx, @@ -636,6 +569,43 @@ func runRestoreBackupTest( test.collections) } +// runRestoreTest restores with data using the test's backup version +func runRestoreTestWithVerion( + t *testing.T, + acct account.Account, + test restoreBackupInfoMultiVersion, + tenant string, + resourceOwners []string, + opts control.Options, +) { + ctx, flush := tester.NewContext() + defer flush() + + config := ConfigInfo{ + Acct: acct, + Opts: opts, + Resource: test.resource, + Service: test.service, + Tenant: tenant, + ResourceOwners: resourceOwners, + Dest: tester.DefaultTestRestoreDestination(""), + } + + totalItems, _, collections, _, err := GetCollectionsAndExpected( + config, + test.collectionsPrevious, + test.backupVersion) + require.NoError(t, err) + + runRestore( + t, + ctx, + config, + test.backupVersion, + collections, + totalItems) +} + // runRestoreBackupTestVersions restores with data from an older // version of the backup and check the restored data against the // something that would be in the form of a newer backup. @@ -650,21 +620,21 @@ func runRestoreBackupTestVersions( ctx, flush := tester.NewContext() defer flush() - config := configInfo{ - acct: acct, - opts: opts, - resource: test.resource, - service: test.service, - tenant: tenant, - resourceOwners: resourceOwners, - dest: tester.DefaultTestRestoreDestination(), + config := ConfigInfo{ + Acct: acct, + Opts: opts, + Resource: test.resource, + Service: test.service, + Tenant: tenant, + ResourceOwners: resourceOwners, + Dest: tester.DefaultTestRestoreDestination(""), } - totalItems, _, collections, _ := getCollectionsAndExpected( - t, + totalItems, _, collections, _, err := GetCollectionsAndExpected( config, test.collectionsPrevious, test.backupVersion) + require.NoError(t, err) runRestore( t, @@ -675,11 +645,11 @@ func runRestoreBackupTestVersions( totalItems) // Get expected output for new version. - totalItems, totalKopiaItems, _, expectedData := getCollectionsAndExpected( - t, + totalItems, totalKopiaItems, _, expectedData, err := GetCollectionsAndExpected( config, test.collectionsLatest, version.Backup) + require.NoError(t, err) runBackupAndCompare( t, @@ -700,11 +670,11 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() { name: "EmailsWithAttachments", service: path.ExchangeService, resource: Users, - collections: []colInfo{ + collections: []ColInfo{ { - pathElements: []string{"Inbox"}, - category: path.EmailCategory, - items: []itemInfo{ + PathElements: []string{"Inbox"}, + Category: path.EmailCategory, + Items: []ItemInfo{ { name: "someencodeditemID", data: exchMock.MessageWithDirectAttachment( @@ -727,11 +697,11 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() { name: "MultipleEmailsMultipleFolders", service: path.ExchangeService, resource: Users, - collections: []colInfo{ + collections: []ColInfo{ { - pathElements: []string{"Inbox"}, - category: path.EmailCategory, - items: []itemInfo{ + PathElements: []string{"Inbox"}, + Category: path.EmailCategory, + Items: []ItemInfo{ { name: "someencodeditemID", data: exchMock.MessageWithBodyBytes( @@ -744,9 +714,9 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() { }, }, { - pathElements: []string{"Work"}, - category: path.EmailCategory, - items: []itemInfo{ + PathElements: []string{"Work"}, + Category: path.EmailCategory, + Items: []ItemInfo{ { name: "someencodeditemID2", data: exchMock.MessageWithBodyBytes( @@ -768,9 +738,9 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() { }, }, { - pathElements: []string{"Work", "Inbox"}, - category: path.EmailCategory, - items: []itemInfo{ + PathElements: []string{"Work", "Inbox"}, + Category: path.EmailCategory, + Items: []ItemInfo{ { name: "someencodeditemID4", data: exchMock.MessageWithBodyBytes( @@ -783,9 +753,9 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() { }, }, { - pathElements: []string{"Work", "Inbox", "Work"}, - category: path.EmailCategory, - items: []itemInfo{ + PathElements: []string{"Work", "Inbox", "Work"}, + Category: path.EmailCategory, + Items: []ItemInfo{ { name: "someencodeditemID5", data: exchMock.MessageWithBodyBytes( @@ -803,11 +773,11 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() { name: "MultipleContactsSingleFolder", service: path.ExchangeService, resource: Users, - collections: []colInfo{ + collections: []ColInfo{ { - pathElements: []string{"Contacts"}, - category: path.ContactsCategory, - items: []itemInfo{ + PathElements: []string{"Contacts"}, + Category: path.ContactsCategory, + Items: []ItemInfo{ { name: "someencodeditemID", data: exchMock.ContactBytes("Ghimley"), @@ -831,11 +801,11 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() { name: "MultipleContactsMultipleFolders", service: path.ExchangeService, resource: Users, - collections: []colInfo{ + collections: []ColInfo{ { - pathElements: []string{"Work"}, - category: path.ContactsCategory, - items: []itemInfo{ + PathElements: []string{"Work"}, + Category: path.ContactsCategory, + Items: []ItemInfo{ { name: "someencodeditemID", data: exchMock.ContactBytes("Ghimley"), @@ -854,9 +824,9 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() { }, }, { - pathElements: []string{"Personal"}, - category: path.ContactsCategory, - items: []itemInfo{ + PathElements: []string{"Personal"}, + Category: path.ContactsCategory, + Items: []ItemInfo{ { name: "someencodeditemID4", data: exchMock.ContactBytes("Argon"), @@ -966,11 +936,11 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames name: "Contacts", service: path.ExchangeService, resource: Users, - collections: []colInfo{ + collections: []ColInfo{ { - pathElements: []string{"Work"}, - category: path.ContactsCategory, - items: []itemInfo{ + PathElements: []string{"Work"}, + Category: path.ContactsCategory, + Items: []ItemInfo{ { name: "someencodeditemID", data: exchMock.ContactBytes("Ghimley"), @@ -979,9 +949,9 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames }, }, { - pathElements: []string{"Personal"}, - category: path.ContactsCategory, - items: []itemInfo{ + PathElements: []string{"Personal"}, + Category: path.ContactsCategory, + Items: []ItemInfo{ { name: "someencodeditemID2", data: exchMock.ContactBytes("Irgot"), @@ -1007,9 +977,9 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames // }, // }, // { - // pathElements: []string{"Personal"}, - // category: path.EventsCategory, - // items: []itemInfo{ + // PathElements: []string{"Personal"}, + // Category: path.EventsCategory, + // Items: []ItemInfo{ // { // name: "someencodeditemID2", // data: exchMock.EventWithSubjectBytes("Irgot"), @@ -1035,24 +1005,25 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames for i, collection := range test.collections { // Get a dest per collection so they're independent. - dest := tester.DefaultTestRestoreDestination() + dest := tester.DefaultTestRestoreDestination("") expectedDests = append(expectedDests, destAndCats{ resourceOwner: suite.user, dest: dest.ContainerName, cats: map[path.CategoryType]struct{}{ - collection.category: {}, + collection.Category: {}, }, }) - totalItems, _, collections, expectedData := collectionsForInfo( - t, + totalItems, _, collections, expectedData, err := collectionsForInfo( test.service, suite.connector.tenant, suite.user, dest, - []colInfo{collection}, + []ColInfo{collection}, version.Backup, ) + require.NoError(t, err) + allItems += totalItems for k, v := range expectedData { @@ -1106,6 +1077,7 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames backupSel, backupSel, nil, + version.NoBackup, control.Options{ RestorePermissions: true, ToggleFeatures: control.Toggles{}, @@ -1113,21 +1085,19 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames fault.New(true)) require.NoError(t, err, clues.ToCore(err)) // No excludes yet because this isn't an incremental backup. - assert.Empty(t, excludes) + assert.True(t, excludes.Empty()) t.Log("Backup enumeration complete") + ci := ConfigInfo{ + Opts: control.Options{RestorePermissions: true}, + // Alright to be empty, needed for OneDrive. + Dest: control.RestoreDestination{}, + } + // Pull the data prior to waiting for the status as otherwise it will // deadlock. - skipped := checkCollections( - t, - ctx, - allItems, - allExpectedData, - dcs, - // Alright to be empty, needed for OneDrive. - control.RestoreDestination{}, - true) + skipped := checkCollections(t, ctx, allItems, allExpectedData, dcs, ci) status := backupGC.Wait() assert.Equal(t, allItems+skipped, status.Objects, "status.Objects") @@ -1145,11 +1115,11 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup_largeMailAttac name: "EmailsWithLargeAttachments", service: path.ExchangeService, resource: Users, - collections: []colInfo{ + collections: []ColInfo{ { - pathElements: []string{"Inbox"}, - category: path.EmailCategory, - items: []itemInfo{ + PathElements: []string{"Inbox"}, + Category: path.EmailCategory, + Items: []ItemInfo{ { name: "35mbAttachment", data: exchMock.MessageWithSizedAttachment(subjectText, 35), @@ -1176,7 +1146,7 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup_largeMailAttac func (suite *GraphConnectorIntegrationSuite) TestBackup_CreatesPrefixCollections() { table := []struct { name string - resource resource + resource Resource selectorFunc func(t *testing.T) selectors.Selector service path.ServiceType categories []string @@ -1206,9 +1176,7 @@ func (suite *GraphConnectorIntegrationSuite) TestBackup_CreatesPrefixCollections resource: Users, selectorFunc: func(t *testing.T) selectors.Selector { sel := selectors.NewOneDriveBackup([]string{suite.user}) - sel.Include( - sel.Folders([]string{selectors.NoneTgt}), - ) + sel.Include(sel.Folders([]string{selectors.NoneTgt})) return sel.Selector }, @@ -1261,9 +1229,10 @@ func (suite *GraphConnectorIntegrationSuite) TestBackup_CreatesPrefixCollections dcs, excludes, err := backupGC.ProduceBackupCollections( ctx, - backupSel, + inMock.NewProvider(id, name), backupSel, nil, + version.NoBackup, control.Options{ RestorePermissions: false, ToggleFeatures: control.Toggles{}, @@ -1271,7 +1240,7 @@ func (suite *GraphConnectorIntegrationSuite) TestBackup_CreatesPrefixCollections fault.New(true)) require.NoError(t, err) // No excludes yet because this isn't an incremental backup. - assert.Empty(t, excludes) + assert.True(t, excludes.Empty()) t.Logf("Backup enumeration complete in %v\n", time.Since(start)) diff --git a/src/internal/connector/graph_connector_test_helper.go b/src/internal/connector/graph_connector_test_helper.go new file mode 100644 index 000000000..5d91538bb --- /dev/null +++ b/src/internal/connector/graph_connector_test_helper.go @@ -0,0 +1,188 @@ +package connector + +import ( + "bytes" + "context" + "io" + + exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock" + "github.com/alcionai/corso/src/internal/connector/onedrive/metadata" + "github.com/alcionai/corso/src/internal/data" + "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/path" +) + +type ColInfo struct { + // Elements (in order) for the path representing this collection. Should + // only contain elements after the prefix that corso uses for the path. For + // example, a collection for the Inbox folder in exchange mail would just be + // "Inbox". + PathElements []string + Category path.CategoryType + Items []ItemInfo + // auxItems are items that can be retrieved with Fetch but won't be returned + // by Items(). These files do not directly participate in comparisosn at the + // end of a test. + AuxItems []ItemInfo +} + +type ItemInfo struct { + // lookupKey is a string that can be used to find this data from a set of + // other data in the same collection. This key should be something that will + // be the same before and after restoring the item in M365 and may not be + // the M365 ID. When restoring items out of place, the item is assigned a + // new ID making it unsuitable for a lookup key. + lookupKey string + name string + data []byte +} + +type ConfigInfo struct { + Acct account.Account + Opts control.Options + Resource Resource + Service path.ServiceType + Tenant string + ResourceOwners []string + Dest control.RestoreDestination +} + +func mustToDataLayerPath( + service path.ServiceType, + tenant, resourceOwner string, + category path.CategoryType, + elements []string, + isItem bool, +) (path.Path, error) { + res, err := path.Build(tenant, resourceOwner, service, category, isItem, elements...) + if err != nil { + return nil, err + } + + return res, err +} + +// backupOutputPathFromRestore returns a path.Path denoting the location in +// kopia the data will be placed at. The location is a data-type specific +// combination of the location the data was recently restored to and where the +// data was originally in the hierarchy. +func backupOutputPathFromRestore( + restoreDest control.RestoreDestination, + inputPath path.Path, +) (path.Path, error) { + base := []string{restoreDest.ContainerName} + + // OneDrive has leading information like the drive ID. + if inputPath.Service() == path.OneDriveService || inputPath.Service() == path.SharePointService { + folders := inputPath.Folders() + base = append(append([]string{}, folders[:3]...), restoreDest.ContainerName) + + if len(folders) > 3 { + base = append(base, folders[3:]...) + } + } + + if inputPath.Service() == path.ExchangeService && inputPath.Category() == path.EmailCategory { + base = append(base, inputPath.Folders()...) + } + + return mustToDataLayerPath( + inputPath.Service(), + inputPath.Tenant(), + inputPath.ResourceOwner(), + inputPath.Category(), + base, + false, + ) +} + +// TODO(ashmrtn): Make this an actual mock class that can be used in other +// packages. +type mockRestoreCollection struct { + data.Collection + auxItems map[string]data.Stream +} + +func (rc mockRestoreCollection) Fetch( + ctx context.Context, + name string, +) (data.Stream, error) { + res := rc.auxItems[name] + if res == nil { + return nil, data.ErrNotFound + } + + return res, nil +} + +func collectionsForInfo( + service path.ServiceType, + tenant, user string, + dest control.RestoreDestination, + allInfo []ColInfo, + backupVersion int, +) (int, int, []data.RestoreCollection, map[string]map[string][]byte, error) { + var ( + collections = make([]data.RestoreCollection, 0, len(allInfo)) + expectedData = make(map[string]map[string][]byte, len(allInfo)) + totalItems = 0 + kopiaEntries = 0 + ) + + for _, info := range allInfo { + pth, err := mustToDataLayerPath( + service, + tenant, + user, + info.Category, + info.PathElements, + false) + if err != nil { + return totalItems, kopiaEntries, collections, expectedData, err + } + + mc := exchMock.NewCollection(pth, pth, len(info.Items)) + + baseDestPath, err := backupOutputPathFromRestore(dest, pth) + if err != nil { + return totalItems, kopiaEntries, collections, expectedData, err + } + + baseExpected := expectedData[baseDestPath.String()] + if baseExpected == nil { + expectedData[baseDestPath.String()] = make(map[string][]byte, len(info.Items)) + baseExpected = expectedData[baseDestPath.String()] + } + + for i := 0; i < len(info.Items); i++ { + mc.Names[i] = info.Items[i].name + mc.Data[i] = info.Items[i].data + + baseExpected[info.Items[i].lookupKey] = info.Items[i].data + + // We do not count metadata files against item count + if backupVersion > 0 && + (service == path.OneDriveService || service == path.SharePointService) && + metadata.HasMetaSuffix(info.Items[i].name) { + continue + } + + totalItems++ + } + + c := mockRestoreCollection{Collection: mc, auxItems: map[string]data.Stream{}} + + for _, aux := range info.AuxItems { + c.auxItems[aux.name] = &exchMock.Data{ + ID: aux.name, + Reader: io.NopCloser(bytes.NewReader(aux.data)), + } + } + + collections = append(collections, c) + kopiaEntries += len(info.Items) + } + + return totalItems, kopiaEntries, collections, expectedData, nil +} diff --git a/src/internal/connector/mock/connector.go b/src/internal/connector/mock/connector.go index d6d68f067..d8cce9781 100644 --- a/src/internal/connector/mock/connector.go +++ b/src/internal/connector/mock/connector.go @@ -3,8 +3,10 @@ package mock import ( "context" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/idname" + "github.com/alcionai/corso/src/internal/common/prefixmatcher" "github.com/alcionai/corso/src/internal/data" + "github.com/alcionai/corso/src/internal/operations/inject" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/control" @@ -12,9 +14,11 @@ import ( "github.com/alcionai/corso/src/pkg/selectors" ) +var _ inject.BackupProducer = &GraphConnector{} + type GraphConnector struct { Collections []data.BackupCollection - Exclude map[string]map[string]struct{} + Exclude *prefixmatcher.StringSetMatcher Deets *details.Details @@ -25,14 +29,15 @@ type GraphConnector struct { func (gc GraphConnector) ProduceBackupCollections( _ context.Context, - _ common.IDNamer, + _ idname.Provider, _ selectors.Selector, _ []data.RestoreCollection, + _ int, _ control.Options, _ *fault.Bus, ) ( []data.BackupCollection, - map[string]map[string]struct{}, + prefixmatcher.StringSetReader, error, ) { return gc.Collections, gc.Exclude, gc.Err diff --git a/src/internal/connector/onedrive/collection.go b/src/internal/connector/onedrive/collection.go index 39624f3a6..6f7cf3da8 100644 --- a/src/internal/connector/onedrive/collection.go +++ b/src/internal/connector/onedrive/collection.go @@ -15,7 +15,6 @@ import ( "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/connector/graph" - "github.com/alcionai/corso/src/internal/connector/onedrive/api" "github.com/alcionai/corso/src/internal/connector/onedrive/metadata" "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/data" @@ -25,20 +24,10 @@ import ( "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) const ( - // TODO: This number needs to be tuned - // Consider max open file limit `ulimit -n`, usually 1024 when setting this value - collectionChannelBufferSize = 5 - - // TODO: Tune this later along with collectionChannelBufferSize - urlPrefetchChannelBufferSize = 5 - - // maxDownloadRetires specifies the number of times a file download should - // be retried - maxDownloadRetires = 3 - // Used to compare in case of OneNote files MaxOneNoteFileSize = 2 * 1024 * 1024 * 1024 ) @@ -48,21 +37,14 @@ var ( _ data.Stream = &Item{} _ data.StreamInfo = &Item{} _ data.StreamModTime = &Item{} - _ data.Stream = &MetadataItem{} - _ data.StreamModTime = &MetadataItem{} -) - -type SharingMode int - -const ( - SharingModeCustom = SharingMode(iota) - SharingModeInherited + _ data.Stream = &metadata.Item{} + _ data.StreamModTime = &metadata.Item{} ) // Collection represents a set of OneDrive objects retrieved from M365 type Collection struct { // configured to handle large item downloads - itemClient *http.Client + itemClient graph.Requester // data is used to share data streams with the collection consumer data chan data.Stream @@ -110,7 +92,7 @@ type Collection struct { doNotMergeItems bool } -// itemGetterFunc gets an specified item +// itemGetterFunc gets a specified item type itemGetterFunc func( ctx context.Context, srv graph.Servicer, @@ -120,7 +102,7 @@ type itemGetterFunc func( // itemReadFunc returns a reader for the specified item type itemReaderFunc func( ctx context.Context, - hc *http.Client, + client graph.Requester, item models.DriveItemable, ) (details.ItemInfo, io.ReadCloser, error) @@ -138,18 +120,18 @@ func pathToLocation(p path.Path) (*path.Builder, error) { return nil, nil } - odp, err := path.ToOneDrivePath(p) + dp, err := path.ToDrivePath(p) if err != nil { return nil, err } - return path.Builder{}.Append(odp.Root).Append(odp.Folders...), nil + return path.Builder{}.Append(dp.Root).Append(dp.Folders...), nil } // NewCollection creates a Collection func NewCollection( - itemClient *http.Client, - folderPath path.Path, + itemClient graph.Requester, + currPath path.Path, prevPath path.Path, driveID string, service graph.Servicer, @@ -163,9 +145,9 @@ func NewCollection( // to be changed as we won't be able to extract path information from the // storage path. In that case, we'll need to start storing the location paths // like we do the previous path. - locPath, err := pathToLocation(folderPath) + locPath, err := pathToLocation(currPath) if err != nil { - return nil, clues.Wrap(err, "getting location").With("folder_path", folderPath.String()) + return nil, clues.Wrap(err, "getting location").With("curr_path", currPath.String()) } prevLocPath, err := pathToLocation(prevPath) @@ -173,20 +155,49 @@ func NewCollection( return nil, clues.Wrap(err, "getting previous location").With("prev_path", prevPath.String()) } + c := newColl( + itemClient, + currPath, + prevPath, + driveID, + service, + statusUpdater, + source, + ctrlOpts, + colScope, + doNotMergeItems) + + c.locPath = locPath + c.prevLocPath = prevLocPath + + return c, nil +} + +func newColl( + gr graph.Requester, + currPath path.Path, + prevPath path.Path, + driveID string, + service graph.Servicer, + statusUpdater support.StatusUpdater, + source driveSource, + ctrlOpts control.Options, + colScope collectionScope, + doNotMergeItems bool, +) *Collection { c := &Collection{ - itemClient: itemClient, - folderPath: folderPath, + itemClient: gr, + itemGetter: api.GetDriveItem, + folderPath: currPath, prevPath: prevPath, - locPath: locPath, - prevLocPath: prevLocPath, driveItems: map[string]models.DriveItemable{}, driveID: driveID, source: source, service: service, - data: make(chan data.Stream, collectionChannelBufferSize), + data: make(chan data.Stream, graph.Parallelism(path.OneDriveMetadataService).CollectionBufferSize()), statusUpdater: statusUpdater, ctrl: ctrlOpts, - state: data.StateOf(prevPath, folderPath), + state: data.StateOf(prevPath, currPath), scope: colScope, doNotMergeItems: doNotMergeItems, } @@ -194,16 +205,14 @@ func NewCollection( // Allows tests to set a mock populator switch source { case SharePointSource: - c.itemGetter = api.GetDriveItem c.itemReader = sharePointItemReader c.itemMetaReader = sharePointItemMetaReader default: - c.itemGetter = api.GetDriveItem c.itemReader = oneDriveItemReader c.itemMetaReader = oneDriveItemMetaReader } - return c, nil + return c } // Adds an itemID to the collection. This will make it eligible to be @@ -265,17 +274,21 @@ func (oc Collection) PreviousLocationPath() details.LocationIDer { return nil } + var ider details.LocationIDer + switch oc.source { case OneDriveSource: - return details.NewOneDriveLocationIDer( + ider = details.NewOneDriveLocationIDer( oc.driveID, oc.prevLocPath.Elements()...) default: - return details.NewSharePointLocationIDer( + ider = details.NewSharePointLocationIDer( oc.driveID, oc.prevLocPath.Elements()...) } + + return ider } func (oc Collection) State() data.CollectionState { @@ -286,27 +299,6 @@ func (oc Collection) DoNotMergeItems() bool { return oc.doNotMergeItems } -// FilePermission is used to store permissions of a specific user to a -// OneDrive item. -type UserPermission struct { - ID string `json:"id,omitempty"` - Roles []string `json:"role,omitempty"` - Email string `json:"email,omitempty"` // DEPRECATED: Replaced with UserID in newer backups - EntityID string `json:"entityId,omitempty"` - Expiration *time.Time `json:"expiration,omitempty"` -} - -// ItemMeta contains metadata about the Item. It gets stored in a -// separate file in kopia -type Metadata struct { - FileName string `json:"filename,omitempty"` - // SharingMode denotes what the current mode of sharing is for the object. - // - inherited: permissions same as parent permissions (no "shared" in delta) - // - custom: use Permissions to set correct permissions ("shared" has value in delta) - SharingMode SharingMode `json:"permissionMode,omitempty"` - Permissions []UserPermission `json:"permissions,omitempty"` -} - // Item represents a single item retrieved from OneDrive type Item struct { id string @@ -314,57 +306,19 @@ type Item struct { info details.ItemInfo } -func (od *Item) UUID() string { - return od.id -} - -func (od *Item) ToReader() io.ReadCloser { - return od.data -} - // Deleted implements an interface function. However, OneDrive items are marked // as deleted by adding them to the exclude list so this can always return // false. -func (od Item) Deleted() bool { - return false -} - -func (od *Item) Info() details.ItemInfo { - return od.info -} - -func (od *Item) ModTime() time.Time { - return od.info.Modified() -} - -type MetadataItem struct { - id string - data io.ReadCloser - modTime time.Time -} - -func (od *MetadataItem) UUID() string { - return od.id -} - -func (od *MetadataItem) ToReader() io.ReadCloser { - return od.data -} - -// Deleted implements an interface function. However, OneDrive items are marked -// as deleted by adding them to the exclude list so this can always return -// false. -func (od MetadataItem) Deleted() bool { - return false -} - -func (od *MetadataItem) ModTime() time.Time { - return od.modTime -} +func (i Item) Deleted() bool { return false } +func (i *Item) UUID() string { return i.id } +func (i *Item) ToReader() io.ReadCloser { return i.data } +func (i *Item) Info() details.ItemInfo { return i.info } +func (i *Item) ModTime() time.Time { return i.info.Modified() } // getDriveItemContent fetch drive item's contents with retries func (oc *Collection) getDriveItemContent( ctx context.Context, + driveID string, item models.DriveItemable, errs *fault.Bus, ) (io.ReadCloser, error) { @@ -372,45 +326,29 @@ func (oc *Collection) getDriveItemContent( itemID = ptr.Val(item.GetId()) itemName = ptr.Val(item.GetName()) el = errs.Local() - - itemData io.ReadCloser - err error ) - // Initial try with url from delta + 2 retries - for i := 1; i <= maxDownloadRetires; i++ { - _, itemData, err = oc.itemReader(ctx, oc.itemClient, item) - if err == nil || !graph.IsErrUnauthorized(err) { - break - } - - // Assume unauthorized requests are a sign of an expired jwt - // token, and that we've overrun the available window to - // download the actual file. Re-downloading the item will - // refresh that download url. - di, diErr := oc.itemGetter(ctx, oc.service, oc.driveID, itemID) - if diErr != nil { - err = clues.Wrap(diErr, "retrieving expired item") - break - } - - item = di - } - - // check for errors following retries + itemData, err := downloadContent( + ctx, + oc.service, + oc.itemGetter, + oc.itemReader, + oc.itemClient, + item, + oc.driveID) if err != nil { if clues.HasLabel(err, graph.LabelsMalware) || (item != nil && item.GetMalware() != nil) { logger.CtxErr(ctx, err).With("skipped_reason", fault.SkipMalware).Info("item flagged as malware") - el.AddSkip(fault.FileSkip(fault.SkipMalware, itemID, itemName, graph.ItemInfo(item))) + el.AddSkip(fault.FileSkip(fault.SkipMalware, driveID, itemID, itemName, graph.ItemInfo(item))) - return nil, clues.Wrap(err, "downloading item").Label(graph.LabelsSkippable) + return nil, clues.Wrap(err, "malware item").Label(graph.LabelsSkippable) } if clues.HasLabel(err, graph.LabelStatus(http.StatusNotFound)) || graph.IsErrDeletedInFlight(err) { logger.CtxErr(ctx, err).With("skipped_reason", fault.SkipNotFound).Info("item not found") - el.AddSkip(fault.FileSkip(fault.SkipNotFound, itemID, itemName, graph.ItemInfo(item))) + el.AddSkip(fault.FileSkip(fault.SkipNotFound, driveID, itemID, itemName, graph.ItemInfo(item))) - return nil, clues.Wrap(err, "downloading item").Label(graph.LabelsSkippable) + return nil, clues.Wrap(err, "deleted item").Label(graph.LabelsSkippable) } // Skip big OneNote files as they can't be downloaded @@ -423,9 +361,9 @@ func (oc *Collection) getDriveItemContent( // restore, or we have to handle it separately by somehow // deleting the entire collection. logger.CtxErr(ctx, err).With("skipped_reason", fault.SkipBigOneNote).Info("max OneNote file size exceeded") - el.AddSkip(fault.FileSkip(fault.SkipBigOneNote, itemID, itemName, graph.ItemInfo(item))) + el.AddSkip(fault.FileSkip(fault.SkipBigOneNote, driveID, itemID, itemName, graph.ItemInfo(item))) - return nil, clues.Wrap(err, "downloading item").Label(graph.LabelsSkippable) + return nil, clues.Wrap(err, "max oneNote item").Label(graph.LabelsSkippable) } logger.CtxErr(ctx, err).Error("downloading item") @@ -433,12 +371,48 @@ func (oc *Collection) getDriveItemContent( // return err, not el.Err(), because the lazy reader needs to communicate to // the data consumer that this item is unreadable, regardless of the fault state. - return nil, clues.Wrap(err, "downloading item") + return nil, clues.Wrap(err, "fetching item content") } return itemData, nil } +// downloadContent attempts to fetch the item content. If the content url +// is expired (ie, returns a 401), it re-fetches the item to get a new download +// url and tries again. +func downloadContent( + ctx context.Context, + svc graph.Servicer, + igf itemGetterFunc, + irf itemReaderFunc, + gr graph.Requester, + item models.DriveItemable, + driveID string, +) (io.ReadCloser, error) { + _, content, err := irf(ctx, gr, item) + if err == nil { + return content, nil + } else if !graph.IsErrUnauthorized(err) { + return nil, err + } + + // Assume unauthorized requests are a sign of an expired jwt + // token, and that we've overrun the available window to + // download the actual file. Re-downloading the item will + // refresh that download url. + di, err := igf(ctx, svc, driveID, ptr.Val(item.GetId())) + if err != nil { + return nil, clues.Wrap(err, "retrieving expired item") + } + + _, content, err = irf(ctx, gr, di) + if err != nil { + return nil, clues.Wrap(err, "content download retry") + } + + return content, nil +} + // populateItems iterates through items added to the collection // and uses the collection `itemReader` to read the item func (oc *Collection) populateItems(ctx context.Context, errs *fault.Bus) { @@ -473,7 +447,7 @@ func (oc *Collection) populateItems(ctx context.Context, errs *fault.Bus) { defer colCloser() defer close(folderProgress) - semaphoreCh := make(chan struct{}, urlPrefetchChannelBufferSize) + semaphoreCh := make(chan struct{}, graph.Parallelism(path.OneDriveService).Item()) defer close(semaphoreCh) for _, item := range oc.driveItems { @@ -504,9 +478,9 @@ func (oc *Collection) populateItems(ctx context.Context, errs *fault.Bus) { ctx = clues.Add( ctx, - "backup_item_id", itemID, - "backup_item_name", itemName, - "backup_item_size", itemSize) + "item_id", itemID, + "item_name", clues.Hide(itemName), + "item_size", itemSize) item.SetParentReference(setName(item.GetParentReference(), oc.driveName)) @@ -545,7 +519,7 @@ func (oc *Collection) populateItems(ctx context.Context, errs *fault.Bus) { itemInfo.OneDrive.ParentPath = parentPathString } - ctx = clues.Add(ctx, "backup_item_info", itemInfo) + ctx = clues.Add(ctx, "item_info", itemInfo) if isFile { dataSuffix := metadata.DataFileSuffix @@ -555,7 +529,7 @@ func (oc *Collection) populateItems(ctx context.Context, errs *fault.Bus) { // attempts to read bytes. Assumption is that kopia will check things // like file modtimes before attempting to read. itemReader := lazy.NewLazyReadCloser(func() (io.ReadCloser, error) { - itemData, err := oc.getDriveItemContent(ctx, item, errs) + itemData, err := oc.getDriveItemContent(ctx, oc.driveID, item, errs) if err != nil { return nil, err } @@ -590,12 +564,12 @@ func (oc *Collection) populateItems(ctx context.Context, errs *fault.Bus) { return progReader, nil }) - oc.data <- &MetadataItem{ - id: metaFileName + metaSuffix, - data: metaReader, + oc.data <- &metadata.Item{ + ID: metaFileName + metaSuffix, + Data: metaReader, // Metadata file should always use the latest time as // permissions change does not update mod time. - modTime: time.Now(), + Mod: time.Now(), } // Item read successfully, add to collection diff --git a/src/internal/connector/onedrive/collection_test.go b/src/internal/connector/onedrive/collection_test.go index b4328fe9b..9402c8df2 100644 --- a/src/internal/connector/onedrive/collection_test.go +++ b/src/internal/connector/onedrive/collection_test.go @@ -18,6 +18,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/onedrive/metadata" "github.com/alcionai/corso/src/internal/connector/support" @@ -67,14 +68,16 @@ func (suite *CollectionUnitTestSuite) TestCollection() { testItemName = "itemName" testItemData = []byte("testdata") now = time.Now() - testItemMeta = Metadata{Permissions: []UserPermission{ - { - ID: "testMetaID", - Roles: []string{"read", "write"}, - Email: "email@provider.com", - Expiration: &now, + testItemMeta = metadata.Metadata{ + Permissions: []metadata.Permission{ + { + ID: "testMetaID", + Roles: []string{"read", "write"}, + Email: "email@provider.com", + Expiration: &now, + }, }, - }} + } ) type nst struct { @@ -98,7 +101,7 @@ func (suite *CollectionUnitTestSuite) TestCollection() { numInstances: 1, source: OneDriveSource, itemDeets: nst{testItemName, 42, now}, - itemReader: func(context.Context, *http.Client, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) { + itemReader: func(context.Context, graph.Requester, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) { return details.ItemInfo{OneDrive: &details.OneDriveInfo{ItemName: testItemName, Modified: now}}, io.NopCloser(bytes.NewReader(testItemData)), nil @@ -114,7 +117,7 @@ func (suite *CollectionUnitTestSuite) TestCollection() { numInstances: 3, source: OneDriveSource, itemDeets: nst{testItemName, 42, now}, - itemReader: func(context.Context, *http.Client, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) { + itemReader: func(context.Context, graph.Requester, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) { return details.ItemInfo{OneDrive: &details.OneDriveInfo{ItemName: testItemName, Modified: now}}, io.NopCloser(bytes.NewReader(testItemData)), nil @@ -130,7 +133,7 @@ func (suite *CollectionUnitTestSuite) TestCollection() { numInstances: 3, source: OneDriveSource, itemDeets: nst{testItemName, 42, now}, - itemReader: func(context.Context, *http.Client, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) { + itemReader: func(context.Context, graph.Requester, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) { return details.ItemInfo{}, nil, clues.New("test malware").Label(graph.LabelsMalware) }, infoFrom: func(t *testing.T, dii details.ItemInfo) (string, string) { @@ -146,7 +149,7 @@ func (suite *CollectionUnitTestSuite) TestCollection() { source: OneDriveSource, itemDeets: nst{testItemName, 42, now}, // Usually `Not Found` is returned from itemGetter and not itemReader - itemReader: func(context.Context, *http.Client, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) { + itemReader: func(context.Context, graph.Requester, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) { return details.ItemInfo{}, nil, clues.New("test not found").Label(graph.LabelStatus(http.StatusNotFound)) }, infoFrom: func(t *testing.T, dii details.ItemInfo) (string, string) { @@ -161,7 +164,7 @@ func (suite *CollectionUnitTestSuite) TestCollection() { numInstances: 1, source: SharePointSource, itemDeets: nst{testItemName, 42, now}, - itemReader: func(context.Context, *http.Client, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) { + itemReader: func(context.Context, graph.Requester, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) { return details.ItemInfo{SharePoint: &details.SharePointInfo{ItemName: testItemName, Modified: now}}, io.NopCloser(bytes.NewReader(testItemData)), nil @@ -177,7 +180,7 @@ func (suite *CollectionUnitTestSuite) TestCollection() { numInstances: 3, source: SharePointSource, itemDeets: nst{testItemName, 42, now}, - itemReader: func(context.Context, *http.Client, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) { + itemReader: func(context.Context, graph.Requester, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) { return details.ItemInfo{SharePoint: &details.SharePointInfo{ItemName: testItemName, Modified: now}}, io.NopCloser(bytes.NewReader(testItemData)), nil @@ -207,7 +210,7 @@ func (suite *CollectionUnitTestSuite) TestCollection() { require.NoError(t, err, clues.ToCore(err)) coll, err := NewCollection( - graph.HTTPClient(graph.NoTimeout()), + graph.NewNoTimeoutHTTPWrapper(), folderPath, nil, "drive-id", @@ -278,7 +281,7 @@ func (suite *CollectionUnitTestSuite) TestCollection() { if err != nil { for _, label := range test.expectLabels { - assert.True(t, clues.HasLabel(err, label), "has clues label:", label) + assert.Truef(t, clues.HasLabel(err, label), "has clues label: %s", label) } return @@ -290,21 +293,19 @@ func (suite *CollectionUnitTestSuite) TestCollection() { assert.Equal(t, testItemName, name) assert.Equal(t, driveFolderPath, parentPath) - if test.source == OneDriveSource { - readItemMeta := readItems[1] + readItemMeta := readItems[1] - assert.Equal(t, testItemID+metadata.MetaFileSuffix, readItemMeta.UUID()) + assert.Equal(t, testItemID+metadata.MetaFileSuffix, readItemMeta.UUID()) - readMetaData, err := io.ReadAll(readItemMeta.ToReader()) - require.NoError(t, err, clues.ToCore(err)) + readMetaData, err := io.ReadAll(readItemMeta.ToReader()) + require.NoError(t, err, clues.ToCore(err)) - tm, err := json.Marshal(testItemMeta) - if err != nil { - t.Fatal("unable to marshall test permissions", err) - } - - assert.Equal(t, tm, readMetaData) + tm, err := json.Marshal(testItemMeta) + if err != nil { + t.Fatal("unable to marshall test permissions", err) } + + assert.Equal(t, tm, readMetaData) }) } } @@ -347,7 +348,7 @@ func (suite *CollectionUnitTestSuite) TestCollectionReadError() { require.NoError(t, err, clues.ToCore(err)) coll, err := NewCollection( - graph.HTTPClient(graph.NoTimeout()), + graph.NewNoTimeoutHTTPWrapper(), folderPath, nil, "fakeDriveID", @@ -370,7 +371,7 @@ func (suite *CollectionUnitTestSuite) TestCollectionReadError() { coll.itemReader = func( context.Context, - *http.Client, + graph.Requester, models.DriveItemable, ) (details.ItemInfo, io.ReadCloser, error) { return details.ItemInfo{}, nil, assert.AnError @@ -437,7 +438,7 @@ func (suite *CollectionUnitTestSuite) TestCollectionReadUnauthorizedErrorRetry() require.NoError(t, err) coll, err := NewCollection( - graph.HTTPClient(graph.NoTimeout()), + graph.NewNoTimeoutHTTPWrapper(), folderPath, nil, "fakeDriveID", @@ -470,10 +471,10 @@ func (suite *CollectionUnitTestSuite) TestCollectionReadUnauthorizedErrorRetry() coll.itemReader = func( context.Context, - *http.Client, + graph.Requester, models.DriveItemable, ) (details.ItemInfo, io.ReadCloser, error) { - if count < 2 { + if count < 1 { count++ return details.ItemInfo{}, nil, clues.Stack(assert.AnError). Label(graph.LabelStatus(http.StatusUnauthorized)) @@ -494,13 +495,13 @@ func (suite *CollectionUnitTestSuite) TestCollectionReadUnauthorizedErrorRetry() assert.True(t, ok) _, err = io.ReadAll(collItem.ToReader()) - assert.NoError(t, err) + assert.NoError(t, err, clues.ToCore(err)) wg.Wait() require.Equal(t, 1, collStatus.Metrics.Objects, "only one object should be counted") require.Equal(t, 1, collStatus.Metrics.Successes, "read object successfully") - require.Equal(t, 2, count, "retry count") + require.Equal(t, 1, count, "retry count") }) } } @@ -515,6 +516,10 @@ func (suite *CollectionUnitTestSuite) TestCollectionPermissionBackupLatestModTim name: "oneDrive", source: OneDriveSource, }, + { + name: "sharePoint", + source: SharePointSource, + }, } for _, test := range table { suite.Run(test.name, func() { @@ -537,7 +542,7 @@ func (suite *CollectionUnitTestSuite) TestCollectionPermissionBackupLatestModTim require.NoError(t, err, clues.ToCore(err)) coll, err := NewCollection( - graph.HTTPClient(graph.NoTimeout()), + graph.NewNoTimeoutHTTPWrapper(), folderPath, nil, "drive-id", @@ -561,7 +566,7 @@ func (suite *CollectionUnitTestSuite) TestCollectionPermissionBackupLatestModTim coll.itemReader = func( context.Context, - *http.Client, + graph.Requester, models.DriveItemable, ) (details.ItemInfo, io.ReadCloser, error) { return details.ItemInfo{OneDrive: &details.OneDriveInfo{ItemName: "fakeName", Modified: time.Now()}}, @@ -611,7 +616,7 @@ func TestGetDriveItemUnitTestSuite(t *testing.T) { suite.Run(t, &GetDriveItemUnitTestSuite{Suite: tester.NewUnitSuite(t)}) } -func (suite *GetDriveItemUnitTestSuite) TestGetDriveItemError() { +func (suite *GetDriveItemUnitTestSuite) TestGetDriveItem_error() { strval := "not-important" table := []struct { @@ -637,14 +642,14 @@ func (suite *GetDriveItemUnitTestSuite) TestGetDriveItemError() { name: "malware error", colScope: CollectionScopeFolder, itemSize: 10, - err: clues.New("test error").Label(graph.LabelsMalware), + err: clues.New("malware error").Label(graph.LabelsMalware), labels: []string{graph.LabelsMalware, graph.LabelsSkippable}, }, { name: "file not found error", colScope: CollectionScopeFolder, itemSize: 10, - err: clues.New("test error").Label(graph.LabelStatus(http.StatusNotFound)), + err: clues.New("not found error").Label(graph.LabelStatus(http.StatusNotFound)), labels: []string{graph.LabelStatus(http.StatusNotFound), graph.LabelsSkippable}, }, { @@ -652,14 +657,14 @@ func (suite *GetDriveItemUnitTestSuite) TestGetDriveItemError() { name: "small OneNote file", colScope: CollectionScopePackage, itemSize: 10, - err: clues.New("test error").Label(graph.LabelStatus(http.StatusServiceUnavailable)), + err: clues.New("small onenote error").Label(graph.LabelStatus(http.StatusServiceUnavailable)), labels: []string{graph.LabelStatus(http.StatusServiceUnavailable)}, }, { name: "big OneNote file", colScope: CollectionScopePackage, itemSize: MaxOneNoteFileSize, - err: clues.New("test error").Label(graph.LabelStatus(http.StatusServiceUnavailable)), + err: clues.New("big onenote error").Label(graph.LabelStatus(http.StatusServiceUnavailable)), labels: []string{graph.LabelStatus(http.StatusServiceUnavailable), graph.LabelsSkippable}, }, { @@ -667,7 +672,7 @@ func (suite *GetDriveItemUnitTestSuite) TestGetDriveItemError() { name: "big file", colScope: CollectionScopeFolder, itemSize: MaxOneNoteFileSize, - err: clues.New("test error").Label(graph.LabelStatus(http.StatusServiceUnavailable)), + err: clues.New("big file error").Label(graph.LabelStatus(http.StatusServiceUnavailable)), labels: []string{graph.LabelStatus(http.StatusServiceUnavailable)}, }, } @@ -689,9 +694,9 @@ func (suite *GetDriveItemUnitTestSuite) TestGetDriveItemError() { item.SetSize(&test.itemSize) col.itemReader = func( - ctx context.Context, - hc *http.Client, - item models.DriveItemable, + _ context.Context, + _ graph.Requester, + _ models.DriveItemable, ) (details.ItemInfo, io.ReadCloser, error) { return details.ItemInfo{}, nil, test.err } @@ -705,13 +710,13 @@ func (suite *GetDriveItemUnitTestSuite) TestGetDriveItemError() { return item, nil } - _, err := col.getDriveItemContent(ctx, item, errs) + _, err := col.getDriveItemContent(ctx, "driveID", item, errs) if test.err == nil { - assert.NoError(t, err, "no error") + assert.NoError(t, err, clues.ToCore(err)) return } - assert.EqualError(t, err, clues.Wrap(test.err, "downloading item").Error(), "error") + assert.ErrorIs(t, err, test.err, clues.ToCore(err)) labelsMap := map[string]struct{}{} for _, l := range test.labels { @@ -722,3 +727,103 @@ func (suite *GetDriveItemUnitTestSuite) TestGetDriveItemError() { }) } } + +func (suite *GetDriveItemUnitTestSuite) TestDownloadContent() { + var ( + svc graph.Servicer + gr graph.Requester + driveID string + iorc = io.NopCloser(bytes.NewReader([]byte("fnords"))) + item = models.NewDriveItem() + itemWID = models.NewDriveItem() + ) + + itemWID.SetId(ptr.To("brainhooldy")) + + table := []struct { + name string + igf itemGetterFunc + irf itemReaderFunc + expectErr require.ErrorAssertionFunc + expect require.ValueAssertionFunc + }{ + { + name: "good", + irf: func(context.Context, graph.Requester, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) { + return details.ItemInfo{}, iorc, nil + }, + expectErr: require.NoError, + expect: require.NotNil, + }, + { + name: "expired url redownloads", + igf: func(context.Context, graph.Servicer, string, string) (models.DriveItemable, error) { + return itemWID, nil + }, + irf: func(c context.Context, g graph.Requester, m models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) { + // a bit hacky: assume only igf returns an item with a non-zero id. + if len(ptr.Val(m.GetId())) == 0 { + return details.ItemInfo{}, + nil, + clues.Stack(assert.AnError).Label(graph.LabelStatus(http.StatusUnauthorized)) + } + + return details.ItemInfo{}, iorc, nil + }, + expectErr: require.NoError, + expect: require.NotNil, + }, + { + name: "immediate error", + irf: func(context.Context, graph.Requester, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) { + return details.ItemInfo{}, nil, assert.AnError + }, + expectErr: require.Error, + expect: require.Nil, + }, + { + name: "re-fetching the item fails", + igf: func(context.Context, graph.Servicer, string, string) (models.DriveItemable, error) { + return nil, assert.AnError + }, + irf: func(context.Context, graph.Requester, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) { + return details.ItemInfo{}, + nil, + clues.Stack(assert.AnError).Label(graph.LabelStatus(http.StatusUnauthorized)) + }, + expectErr: require.Error, + expect: require.Nil, + }, + { + name: "expired url fails redownload", + igf: func(context.Context, graph.Servicer, string, string) (models.DriveItemable, error) { + return itemWID, nil + }, + irf: func(c context.Context, g graph.Requester, m models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) { + // a bit hacky: assume only igf returns an item with a non-zero id. + if len(ptr.Val(m.GetId())) == 0 { + return details.ItemInfo{}, + nil, + clues.Stack(assert.AnError).Label(graph.LabelStatus(http.StatusUnauthorized)) + } + + return details.ItemInfo{}, iorc, assert.AnError + }, + expectErr: require.Error, + expect: require.Nil, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + ctx, flush := tester.NewContext() + defer flush() + + t := suite.T() + + r, err := downloadContent(ctx, svc, test.igf, test.irf, gr, item, driveID) + + test.expect(t, r) + test.expectErr(t, err, clues.ToCore(err)) + }) + } +} diff --git a/src/internal/connector/onedrive/collections.go b/src/internal/connector/onedrive/collections.go index fdac083c8..cc27f4fb2 100644 --- a/src/internal/connector/onedrive/collections.go +++ b/src/internal/connector/onedrive/collections.go @@ -5,17 +5,15 @@ import ( "encoding/json" "fmt" "io" - "net/http" "strings" "github.com/alcionai/clues" "github.com/microsoftgraph/msgraph-sdk-go/models" - "github.com/pkg/errors" "golang.org/x/exp/maps" + "github.com/alcionai/corso/src/internal/common/prefixmatcher" "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/connector/graph" - "github.com/alcionai/corso/src/internal/connector/onedrive/api" "github.com/alcionai/corso/src/internal/connector/onedrive/metadata" "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/data" @@ -24,6 +22,7 @@ import ( "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) type driveSource int @@ -73,7 +72,7 @@ type folderMatcher interface { // resource owner, which can be either a user or a sharepoint site. type Collections struct { // configured to handle large item downloads - itemClient *http.Client + itemClient graph.Requester tenant string resourceOwner string @@ -101,6 +100,7 @@ type Collections struct { servicer graph.Servicer, driveID, link string, ) itemPager + servicePathPfxFunc pathPrefixerFunc // Track stats from drive enumeration. Represents the items backed up. NumItems int @@ -109,7 +109,7 @@ type Collections struct { } func NewCollections( - itemClient *http.Client, + itemClient graph.Requester, tenant string, resourceOwner string, source driveSource, @@ -119,17 +119,18 @@ func NewCollections( ctrlOpts control.Options, ) *Collections { return &Collections{ - itemClient: itemClient, - tenant: tenant, - resourceOwner: resourceOwner, - source: source, - matcher: matcher, - CollectionMap: map[string]map[string]*Collection{}, - drivePagerFunc: PagerForSource, - itemPagerFunc: defaultItemPager, - service: service, - statusUpdater: statusUpdater, - ctrl: ctrlOpts, + itemClient: itemClient, + tenant: tenant, + resourceOwner: resourceOwner, + source: source, + matcher: matcher, + CollectionMap: map[string]map[string]*Collection{}, + drivePagerFunc: PagerForSource, + itemPagerFunc: defaultItemPager, + servicePathPfxFunc: pathPrefixerForSource(tenant, resourceOwner, source), + service: service, + statusUpdater: statusUpdater, + ctrl: ctrlOpts, } } @@ -191,42 +192,41 @@ func deserializeMetadata( continue } - // This is conservative, but report an error if any of the items for - // any of the deserialized maps have duplicate drive IDs. This will - // cause the entire backup to fail, but it's not clear if higher - // layers would have caught this. Worst case if we don't handle this - // we end up in a situation where we're sourcing items from the wrong - // base in kopia wrapper. - if errors.Is(err, errExistingMapping) { - return nil, nil, clues.Wrap(err, "deserializing metadata file").WithClues(ictx) + // This is conservative, but report an error if either any of the items + // for any of the deserialized maps have duplicate drive IDs or there's + // some other problem deserializing things. This will cause the entire + // backup to fail, but it's not clear if higher layers would have caught + // these cases. We can make the logic for deciding when to continue vs. + // when to fail less strict in the future if needed. + if err != nil { + return nil, nil, clues.Stack(err).WithClues(ictx) } - - err = clues.Stack(err).WithClues(ictx) - - el.AddRecoverable(err) - logger.CtxErr(ictx, err).Error("deserializing base backup metadata") } } - // Go through and remove partial results (i.e. path mapping but no delta URL - // or vice-versa). - for k, v := range prevDeltas { - // Remove entries with an empty delta token as it's not useful. - if len(v) == 0 { - delete(prevDeltas, k) - delete(prevFolders, k) + // Go through and remove delta tokens if we didn't have any paths for them + // or one or more paths are empty (incorrect somehow). This will ensure we + // don't accidentally try to pull in delta results when we should have + // enumerated everything instead. + // + // Loop over the set of previous deltas because it's alright to have paths + // without a delta but not to have a delta without paths. This way ensures + // we check at least all the path sets for the deltas we have. + for drive := range prevDeltas { + paths := prevFolders[drive] + if len(paths) == 0 { + delete(prevDeltas, drive) } - // Remove entries without a folders map as we can't tell kopia the - // hierarchy changes. - if _, ok := prevFolders[k]; !ok { - delete(prevDeltas, k) - } - } - - for k := range prevFolders { - if _, ok := prevDeltas[k]; !ok { - delete(prevFolders, k) + // Drives have only a single delta token. If we find any folder that + // seems like the path is bad we need to drop the entire token and start + // fresh. Since we know the token will be gone we can also stop checking + // for other possibly incorrect folder paths. + for _, prevPath := range paths { + if len(prevPath) == 0 { + delete(prevDeltas, drive) + break + } } } } @@ -272,11 +272,18 @@ func deserializeMap[T any](reader io.ReadCloser, alreadyFound map[string]T) erro func (c *Collections) Get( ctx context.Context, prevMetadata []data.RestoreCollection, + ssmb *prefixmatcher.StringSetMatchBuilder, errs *fault.Bus, -) ([]data.BackupCollection, map[string]map[string]struct{}, error) { +) ([]data.BackupCollection, error) { prevDeltas, oldPathsByDriveID, err := deserializeMetadata(ctx, prevMetadata, errs) if err != nil { - return nil, nil, err + return nil, err + } + + driveTombstones := map[string]struct{}{} + + for driveID := range oldPathsByDriveID { + driveTombstones[driveID] = struct{}{} } driveComplete, closer := observe.MessageWithCompletion(ctx, observe.Bulletf("files")) @@ -286,12 +293,12 @@ func (c *Collections) Get( // Enumerate drives for the specified resourceOwner pager, err := c.drivePagerFunc(c.source, c.service, c.resourceOwner, nil) if err != nil { - return nil, nil, graph.Stack(ctx, err) + return nil, graph.Stack(ctx, err) } drives, err := api.GetAllDrives(ctx, pager, true, maxDrivesRetries) if err != nil { - return nil, nil, err + return nil, err } var ( @@ -299,9 +306,6 @@ func (c *Collections) Get( deltaURLs = map[string]string{} // Drive ID -> folder ID -> folder path folderPaths = map[string]map[string]string{} - // Items that should be excluded when sourcing data from the base backup. - // Parent Path -> item ID -> {} - excludedItems = map[string]map[string]struct{}{} ) for _, d := range drives { @@ -314,6 +318,8 @@ func (c *Collections) Get( ictx = clues.Add(ctx, "drive_id", driveID, "drive_name", driveName) ) + delete(driveTombstones, driveID) + if _, ok := c.CollectionMap[driveID]; !ok { c.CollectionMap[driveID] = map[string]*Collection{} } @@ -337,7 +343,7 @@ func (c *Collections) Get( prevDelta, errs) if err != nil { - return nil, nil, err + return nil, err } // Used for logging below. @@ -377,19 +383,10 @@ func (c *Collections) Get( c.resourceOwner, c.source) if err != nil { - return nil, nil, - clues.Wrap(err, "making exclude prefix").WithClues(ictx) + return nil, clues.Wrap(err, "making exclude prefix").WithClues(ictx) } - pstr := p.String() - - eidi, ok := excludedItems[pstr] - if !ok { - eidi = map[string]struct{}{} - } - - maps.Copy(eidi, excluded) - excludedItems[pstr] = eidi + ssmb.Add(p.String(), excluded) continue } @@ -414,12 +411,12 @@ func (c *Collections) Get( prevPath, err := path.FromDataLayerPath(p, false) if err != nil { err = clues.Wrap(err, "invalid previous path").WithClues(ictx).With("deleted_path", p) - return nil, map[string]map[string]struct{}{}, err + return nil, err } col, err := NewCollection( c.itemClient, - nil, + nil, // delete the folder prevPath, driveID, c.service, @@ -429,7 +426,7 @@ func (c *Collections) Get( CollectionScopeUnknown, true) if err != nil { - return nil, map[string]map[string]struct{}{}, clues.Wrap(err, "making collection").WithClues(ictx) + return nil, clues.Wrap(err, "making collection").WithClues(ictx) } c.CollectionMap[driveID][fldID] = col @@ -438,15 +435,41 @@ func (c *Collections) Get( observe.Message(ctx, fmt.Sprintf("Discovered %d items to backup", c.NumItems)) - // Add an extra for the metadata collection. collections := []data.BackupCollection{} + // add all the drives we found for _, driveColls := range c.CollectionMap { for _, coll := range driveColls { collections = append(collections, coll) } } + // generate tombstones for drives that were removed. + for driveID := range driveTombstones { + prevDrivePath, err := c.servicePathPfxFunc(driveID) + if err != nil { + return nil, clues.Wrap(err, "making drive tombstone previous path").WithClues(ctx) + } + + coll, err := NewCollection( + c.itemClient, + nil, // delete the drive + prevDrivePath, + driveID, + c.service, + c.statusUpdater, + c.source, + c.ctrl, + CollectionScopeUnknown, + true) + if err != nil { + return nil, clues.Wrap(err, "making drive tombstone").WithClues(ctx) + } + + collections = append(collections, coll) + } + + // add metadata collections service, category := c.source.toPathServiceCat() md, err := graph.MakeMetadataCollection( c.tenant, @@ -468,8 +491,7 @@ func (c *Collections) Get( collections = append(collections, md) } - // TODO(ashmrtn): Track and return the set of items to exclude. - return collections, excludedItems, nil + return collections, nil } func updateCollectionPaths( @@ -622,7 +644,7 @@ func (c *Collections) getCollectionPath( if item.GetParentReference() == nil || item.GetParentReference().GetPath() == nil { err := clues.New("no parent reference"). - With("item_name", ptr.Val(item.GetName())) + With("item_name", clues.Hide(ptr.Val(item.GetName()))) return nil, err } @@ -653,7 +675,7 @@ func (c *Collections) getCollectionPath( return nil, clues.New("folder with empty name") } - collectionPath, err = collectionPath.Append(name, false) + collectionPath, err = collectionPath.Append(false, name) if err != nil { return nil, clues.Wrap(err, "making non-root folder path") } @@ -687,16 +709,16 @@ func (c *Collections) UpdateCollections( var ( itemID = ptr.Val(item.GetId()) itemName = ptr.Val(item.GetName()) - ictx = clues.Add(ctx, "item_id", itemID, "item_name", itemName) + ictx = clues.Add(ctx, "item_id", itemID, "item_name", clues.Hide(itemName)) isFolder = item.GetFolder() != nil || item.GetPackage() != nil ) if item.GetMalware() != nil { addtl := graph.ItemInfo(item) - skip := fault.FileSkip(fault.SkipMalware, itemID, itemName, addtl) + skip := fault.FileSkip(fault.SkipMalware, driveID, itemID, itemName, addtl) if isFolder { - skip = fault.ContainerSkip(fault.SkipMalware, itemID, itemName, addtl) + skip = fault.ContainerSkip(fault.SkipMalware, driveID, itemID, itemName, addtl) } errs.AddSkip(skip) @@ -734,7 +756,7 @@ func (c *Collections) UpdateCollections( // Skip items that don't match the folder selectors we were given. if shouldSkipDrive(ctx, collectionPath, c.matcher, driveName) { - logger.Ctx(ictx).Debugw("Skipping drive path", "skipped_path", collectionPath.String()) + logger.Ctx(ictx).Debugw("path not selected", "skipped_path", collectionPath.String()) continue } diff --git a/src/internal/connector/onedrive/collections_test.go b/src/internal/connector/onedrive/collections_test.go index 5598d701e..49c0ad376 100644 --- a/src/internal/connector/onedrive/collections_test.go +++ b/src/internal/connector/onedrive/collections_test.go @@ -15,10 +15,10 @@ import ( "github.com/stretchr/testify/suite" "golang.org/x/exp/maps" + "github.com/alcionai/corso/src/internal/common/prefixmatcher" + pmMock "github.com/alcionai/corso/src/internal/common/prefixmatcher/mock" "github.com/alcionai/corso/src/internal/connector/graph" gapi "github.com/alcionai/corso/src/internal/connector/graph/api" - "github.com/alcionai/corso/src/internal/connector/onedrive/api" - "github.com/alcionai/corso/src/internal/connector/onedrive/api/mock" "github.com/alcionai/corso/src/internal/connector/onedrive/metadata" "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/data" @@ -27,6 +27,8 @@ import ( "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" + "github.com/alcionai/corso/src/pkg/services/m365/api" + "github.com/alcionai/corso/src/pkg/services/m365/api/mock" ) type statePath struct { @@ -780,7 +782,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { maps.Copy(outputFolderMap, tt.inputFolderMap) c := NewCollections( - graph.HTTPClient(graph.NoTimeout()), + graph.NewNoTimeoutHTTPWrapper(), tenant, user, OneDriveSource, @@ -910,8 +912,12 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() { }, }, expectedDeltas: map[string]string{}, - expectedPaths: map[string]map[string]string{}, - errCheck: assert.NoError, + expectedPaths: map[string]map[string]string{ + driveID1: { + folderID1: path1, + }, + }, + errCheck: assert.NoError, }, { // An empty path map but valid delta results in metadata being returned @@ -934,7 +940,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() { } }, }, - expectedDeltas: map[string]string{driveID1: deltaURL1}, + expectedDeltas: map[string]string{}, expectedPaths: map[string]map[string]string{driveID1: {}}, errCheck: assert.NoError, }, @@ -963,9 +969,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() { } }, }, - expectedDeltas: map[string]string{}, - expectedPaths: map[string]map[string]string{}, - errCheck: assert.NoError, + expectedDeltas: map[string]string{driveID1: ""}, + expectedPaths: map[string]map[string]string{ + driveID1: { + folderID1: path1, + }, + }, + errCheck: assert.NoError, }, { name: "SuccessTwoDrivesTwoCollections", @@ -1031,9 +1041,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() { } }, }, - expectedDeltas: map[string]string{}, - expectedPaths: map[string]map[string]string{}, - errCheck: assert.Error, + errCheck: assert.Error, }, { // Unexpected files are logged and skipped. They don't cause an error to @@ -1165,8 +1173,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() { deltas, paths, err := deserializeMetadata(ctx, cols, fault.New(true)) test.errCheck(t, err) - assert.Equal(t, test.expectedDeltas, deltas) - assert.Equal(t, test.expectedPaths, paths) + assert.Equal(t, test.expectedDeltas, deltas, "deltas") + assert.Equal(t, test.expectedPaths, paths, "paths") }) } } @@ -1244,16 +1252,15 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { user, path.OneDriveService, path.FilesCategory, - false, - ) + false) require.NoError(suite.T(), err, "making metadata path", clues.ToCore(err)) - driveID1 := uuid.NewString() + driveID1 := "drive-1-" + uuid.NewString() drive1 := models.NewDrive() drive1.SetId(&driveID1) drive1.SetName(&driveID1) - driveID2 := uuid.NewString() + driveID2 := "drive-2-" + uuid.NewString() drive2 := models.NewDrive() drive2.SetId(&driveID2) drive2.SetName(&driveID2) @@ -1280,12 +1287,15 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { prevFolderPaths map[string]map[string]string // Collection name -> set of item IDs. We can't check item data because // that's not mocked out. Metadata is checked separately. - expectedCollections map[string]map[data.CollectionState][]string - expectedDeltaURLs map[string]string - expectedFolderPaths map[string]map[string]string - expectedDelList map[string]map[string]struct{} + expectedCollections map[string]map[data.CollectionState][]string + expectedDeltaURLs map[string]string + expectedFolderPaths map[string]map[string]string + // Items that should be excluded from the base. Only populated if the delta + // was valid and there was at least 1 previous folder path. + expectedDelList *pmMock.PrefixMap expectedSkippedCount int - doNotMergeItems bool + // map full or previous path (prefers full) -> bool + doNotMergeItems map[string]bool }{ { name: "OneDrive_OneItemPage_DelFileOnly_NoFolders_NoErrors", @@ -1314,12 +1324,12 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { expectedFolderPaths: map[string]map[string]string{ driveID1: {"root": rootFolderPath1}, }, - expectedDelList: map[string]map[string]struct{}{ + expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{ rootFolderPath1: getDelList("file"), - }, + }), }, { - name: "OneDrive_OneItemPage_NoFolders_NoErrors", + name: "OneDrive_OneItemPage_NoFolderDeltas_NoErrors", drives: []models.Driveable{drive1}, items: map[string][]deltaPagerResult{ driveID1: { @@ -1345,9 +1355,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { expectedFolderPaths: map[string]map[string]string{ driveID1: {"root": rootFolderPath1}, }, - expectedDelList: map[string]map[string]struct{}{ + expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{ rootFolderPath1: getDelList("file"), - }, + }), }, { name: "OneDrive_OneItemPage_NoErrors", @@ -1364,10 +1374,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { }, }, }, - errCheck: assert.NoError, - prevFolderPaths: map[string]map[string]string{ - driveID1: {}, - }, + errCheck: assert.NoError, + prevFolderPaths: map[string]map[string]string{}, expectedCollections: map[string]map[data.CollectionState][]string{ rootFolderPath1: {data.NewState: {}}, folderPath1: {data.NewState: {"folder", "file"}}, @@ -1381,8 +1389,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { "folder": folderPath1, }, }, - expectedDelList: map[string]map[string]struct{}{ - rootFolderPath1: getDelList("file"), + expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}), + doNotMergeItems: map[string]bool{ + rootFolderPath1: true, + folderPath1: true, }, }, { @@ -1401,10 +1411,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { }, }, }, - errCheck: assert.NoError, - prevFolderPaths: map[string]map[string]string{ - driveID1: {}, - }, + errCheck: assert.NoError, + prevFolderPaths: map[string]map[string]string{}, expectedCollections: map[string]map[data.CollectionState][]string{ rootFolderPath1: {data.NewState: {}}, folderPath1: {data.NewState: {"folder", "file"}}, @@ -1418,8 +1426,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { "folder": folderPath1, }, }, - expectedDelList: map[string]map[string]struct{}{ - rootFolderPath1: getDelList("file"), + expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}), + doNotMergeItems: map[string]bool{ + rootFolderPath1: true, + folderPath1: true, }, }, { @@ -1440,7 +1450,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { }, errCheck: assert.NoError, prevFolderPaths: map[string]map[string]string{ - driveID1: {}, + driveID1: { + "root": rootFolderPath1, + }, }, expectedCollections: map[string]map[data.CollectionState][]string{ rootFolderPath1: {data.NotMovedState: {"file"}}, @@ -1455,9 +1467,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { "folder": folderPath1, }, }, - expectedDelList: map[string]map[string]struct{}{ + expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{ rootFolderPath1: getDelList("file"), - }, + }), }, { name: "OneDrive_OneItemPage_EmptyDelta_NoErrors", @@ -1482,10 +1494,17 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { rootFolderPath1: {data.NewState: {}}, folderPath1: {data.NewState: {"folder", "file"}}, }, - expectedDeltaURLs: map[string]string{}, - expectedFolderPaths: map[string]map[string]string{}, - expectedDelList: map[string]map[string]struct{}{ - rootFolderPath1: getDelList("file"), + expectedDeltaURLs: map[string]string{}, + expectedFolderPaths: map[string]map[string]string{ + driveID1: { + "root": rootFolderPath1, + "folder": folderPath1, + }, + }, + expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}), + doNotMergeItems: map[string]bool{ + rootFolderPath1: true, + folderPath1: true, }, }, { @@ -1528,8 +1547,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { "folder": folderPath1, }, }, - expectedDelList: map[string]map[string]struct{}{ - rootFolderPath1: getDelList("file", "file2"), + expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}), + doNotMergeItems: map[string]bool{ + rootFolderPath1: true, + folderPath1: true, }, }, { @@ -1585,9 +1606,12 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { "folder2": folderPath2, }, }, - expectedDelList: map[string]map[string]struct{}{ - rootFolderPath1: getDelList("file"), - rootFolderPath2: getDelList("file2"), + expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}), + doNotMergeItems: map[string]bool{ + rootFolderPath1: true, + folderPath1: true, + rootFolderPath2: true, + folderPath2: true, }, }, { @@ -1643,9 +1667,12 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { "folder": folderPath2, }, }, - expectedDelList: map[string]map[string]struct{}{ - rootFolderPath1: getDelList("file"), - rootFolderPath2: getDelList("file2"), + expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}), + doNotMergeItems: map[string]bool{ + rootFolderPath1: true, + folderPath1: true, + rootFolderPath2: true, + folderPath2: true, }, }, { @@ -1696,8 +1723,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { "root": rootFolderPath1, }, }, - expectedDelList: map[string]map[string]struct{}{}, - doNotMergeItems: true, + expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}), + doNotMergeItems: map[string]bool{ + rootFolderPath1: true, + }, }, { name: "OneDrive_TwoItemPage_DeltaError", @@ -1738,8 +1767,11 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { "folder": folderPath1, }, }, - expectedDelList: map[string]map[string]struct{}{}, - doNotMergeItems: true, + expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}), + doNotMergeItems: map[string]bool{ + rootFolderPath1: true, + folderPath1: true, + }, }, { name: "OneDrive_TwoItemPage_NoDeltaError", @@ -1765,7 +1797,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { }, errCheck: assert.NoError, prevFolderPaths: map[string]map[string]string{ - driveID1: {}, + driveID1: { + "root": rootFolderPath1, + }, }, expectedCollections: map[string]map[data.CollectionState][]string{ rootFolderPath1: {data.NotMovedState: {"file"}}, @@ -1780,10 +1814,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { "folder": folderPath1, }, }, - expectedDelList: map[string]map[string]struct{}{ + expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{ rootFolderPath1: getDelList("file", "file2"), - }, - doNotMergeItems: false, + }), + doNotMergeItems: map[string]bool{}, }, { name: "OneDrive_OneItemPage_InvalidPrevDelta_DeleteNonExistentFolder", @@ -1824,8 +1858,12 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { "folder2": expectedPath1("/folder2"), }, }, - expectedDelList: map[string]map[string]struct{}{}, - doNotMergeItems: true, + expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}), + doNotMergeItems: map[string]bool{ + rootFolderPath1: true, + folderPath1: true, + expectedPath1("/folder2"): true, + }, }, { name: "OneDrive_OneItemPage_InvalidPrevDelta_AnotherFolderAtDeletedLocation", @@ -1870,8 +1908,11 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { "folder2": expectedPath1("/folder"), }, }, - expectedDelList: map[string]map[string]struct{}{}, - doNotMergeItems: true, + expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}), + doNotMergeItems: map[string]bool{ + rootFolderPath1: true, + folderPath1: true, + }, }, { name: "OneDrive Two Item Pages with Malware", @@ -1915,8 +1956,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { "folder": folderPath1, }, }, - expectedDelList: map[string]map[string]struct{}{ - rootFolderPath1: getDelList("file", "file2"), + expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}), + doNotMergeItems: map[string]bool{ + rootFolderPath1: true, + folderPath1: true, }, expectedSkippedCount: 2, }, @@ -1970,8 +2013,12 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { "folder": folderPath1, }, }, - expectedDelList: map[string]map[string]struct{}{}, - doNotMergeItems: true, + expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}), + doNotMergeItems: map[string]bool{ + rootFolderPath1: true, + folderPath1: true, + expectedPath1("/folder2"): true, + }, }, { name: "One Drive Delta Error Random Folder Delete", @@ -2009,8 +2056,11 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { "root": rootFolderPath1, }, }, - expectedDelList: map[string]map[string]struct{}{}, - doNotMergeItems: true, + expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}), + doNotMergeItems: map[string]bool{ + rootFolderPath1: true, + folderPath1: true, + }, }, { name: "One Drive Delta Error Random Item Delete", @@ -2046,8 +2096,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { "root": rootFolderPath1, }, }, - expectedDelList: map[string]map[string]struct{}{}, - doNotMergeItems: true, + expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}), + doNotMergeItems: map[string]bool{ + rootFolderPath1: true, + }, }, { name: "One Drive Folder Made And Deleted", @@ -2087,8 +2139,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { "root": rootFolderPath1, }, }, - expectedDelList: map[string]map[string]struct{}{ - rootFolderPath1: getDelList("file"), + expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}), + doNotMergeItems: map[string]bool{ + rootFolderPath1: true, }, }, { @@ -2130,8 +2183,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { "folder": folderPath1, }, }, - expectedDelList: map[string]map[string]struct{}{ - rootFolderPath1: getDelList("file"), + expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}), + doNotMergeItems: map[string]bool{ + rootFolderPath1: true, + folderPath1: true, }, }, { @@ -2163,7 +2218,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { "root": rootFolderPath1, }, }, - expectedDelList: map[string]map[string]struct{}{}, + expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}), + doNotMergeItems: map[string]bool{ + rootFolderPath1: true, + }, }, { name: "One Drive Random Item Delete", @@ -2194,8 +2252,40 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { "root": rootFolderPath1, }, }, - expectedDelList: map[string]map[string]struct{}{ - rootFolderPath1: getDelList("file"), + expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}), + doNotMergeItems: map[string]bool{ + rootFolderPath1: true, + }, + }, + { + name: "TwoPriorDrives_OneTombstoned", + drives: []models.Driveable{drive1}, + items: map[string][]deltaPagerResult{ + driveID1: { + { + items: []models.DriveItemable{ + driveRootItem("root"), // will be present + }, + deltaLink: &delta, + }, + }, + }, + errCheck: assert.NoError, + prevFolderPaths: map[string]map[string]string{ + driveID1: {"root": rootFolderPath1}, + driveID2: {"root": rootFolderPath2}, + }, + expectedCollections: map[string]map[data.CollectionState][]string{ + rootFolderPath1: {data.NotMovedState: {}}, + rootFolderPath2: {data.DeletedState: {}}, + }, + expectedDeltaURLs: map[string]string{driveID1: delta}, + expectedFolderPaths: map[string]map[string]string{ + driveID1: {"root": rootFolderPath1}, + }, + expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}), + doNotMergeItems: map[string]bool{ + rootFolderPath2: true, }, }, } @@ -2231,7 +2321,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { } c := NewCollections( - graph.HTTPClient(graph.NoTimeout()), + graph.NewNoTimeoutHTTPWrapper(), tenant, user, OneDriveSource, @@ -2255,12 +2345,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { map[string]string{ driveID1: prevDelta, driveID2: prevDelta, - }, - ), + }), graph.NewMetadataEntry( graph.PreviousPathFileName, - test.prevFolderPaths, - ), + test.prevFolderPaths), }, func(*support.ConnectorOperationStatus) {}, ) @@ -2269,7 +2357,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { prevMetadata := []data.RestoreCollection{data.NotFoundRestoreCollection{Collection: mc}} errs := fault.New(true) - cols, delList, err := c.Get(ctx, prevMetadata, errs) + delList := prefixmatcher.NewStringSetBuilder() + + cols, err := c.Get(ctx, prevMetadata, delList, errs) test.errCheck(t, err) assert.Equal(t, test.expectedSkippedCount, len(errs.Skipped())) @@ -2298,7 +2388,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { } assert.Equal(t, test.expectedDeltaURLs, deltas, "delta urls") - assert.Equal(t, test.expectedFolderPaths, paths, "folder paths") + assert.Equal(t, test.expectedFolderPaths, paths, "folder paths") continue } @@ -2325,21 +2415,27 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { "state: %d, path: %s", baseCol.State(), folderPath) - assert.Equal(t, test.doNotMergeItems, baseCol.DoNotMergeItems(), "DoNotMergeItems") + + p := baseCol.FullPath() + if p == nil { + p = baseCol.PreviousPath() + } + + assert.Equalf( + t, + test.doNotMergeItems[p.String()], + baseCol.DoNotMergeItems(), + "DoNotMergeItems in collection: %s", p) } expectedCollectionCount := 0 - for c := range test.expectedCollections { - for range test.expectedCollections[c] { - expectedCollectionCount++ - } + for _, ec := range test.expectedCollections { + expectedCollectionCount += len(ec) } - // This check is necessary to make sure we are all the - // collections we expect it to assert.Equal(t, expectedCollectionCount, collectionCount, "number of collections") - assert.Equal(t, test.expectedDelList, delList, "del list") + test.expectedDelList.AssertEqual(t, delList) }) } } @@ -2366,7 +2462,7 @@ func coreItem( case isFolder: item.SetFolder(models.NewFolder()) case isPackage: - item.SetPackage(models.NewPackage_escaped()) + item.SetPackage(models.NewPackageEscaped()) } return item @@ -2433,7 +2529,7 @@ func delItem( case isFolder: item.SetFolder(models.NewFolder()) case isPackage: - item.SetPackage(models.NewPackage_escaped()) + item.SetPackage(models.NewPackageEscaped()) } return item diff --git a/src/internal/connector/onedrive/consts/consts.go b/src/internal/connector/onedrive/consts/consts.go new file mode 100644 index 000000000..e174062e0 --- /dev/null +++ b/src/internal/connector/onedrive/consts/consts.go @@ -0,0 +1,12 @@ +package onedrive + +const ( + // const used as the root dir for the drive portion of a path prefix. + // eg: tid/onedrive/ro/files/drives/driveid/... + DrivesPathDir = "drives" + // const used as the root-of-drive dir for the drive portion of a path prefix. + // eg: tid/onedrive/ro/files/drives/driveid/root:/... + RootPathDir = "root:" + // root id for drive items + RootID = "root" +) diff --git a/src/internal/connector/onedrive/data_collections.go b/src/internal/connector/onedrive/data_collections.go index a0c3e648f..e89753dae 100644 --- a/src/internal/connector/onedrive/data_collections.go +++ b/src/internal/connector/onedrive/data_collections.go @@ -2,15 +2,15 @@ package onedrive import ( "context" - "net/http" "github.com/alcionai/clues" - "golang.org/x/exp/maps" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/idname" + "github.com/alcionai/corso/src/internal/common/prefixmatcher" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/data" + "github.com/alcionai/corso/src/internal/version" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" @@ -35,15 +35,16 @@ func (fm odFolderMatcher) Matches(dir string) bool { func DataCollections( ctx context.Context, selector selectors.Selector, - user common.IDNamer, + user idname.Provider, metadata []data.RestoreCollection, + lastBackupVersion int, tenant string, - itemClient *http.Client, + itemClient graph.Requester, service graph.Servicer, su support.StatusUpdater, ctrlOpts control.Options, errs *fault.Bus, -) ([]data.BackupCollection, map[string]map[string]struct{}, error) { +) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, error) { odb, err := selector.ToOneDriveBackup() if err != nil { return nil, nil, clues.Wrap(err, "parsing selector").WithClues(ctx) @@ -53,7 +54,7 @@ func DataCollections( el = errs.Local() categories = map[path.CategoryType]struct{}{} collections = []data.BackupCollection{} - allExcludes = map[string]map[string]struct{}{} + ssmb = prefixmatcher.NewStringSetBuilder() ) // for each scope that includes oneDrive items, get all @@ -74,7 +75,7 @@ func DataCollections( su, ctrlOpts) - odcs, excludes, err := nc.Get(ctx, metadata, errs) + odcs, err := nc.Get(ctx, metadata, ssmb, errs) if err != nil { el.AddRecoverable(clues.Stack(err).Label(fault.LabelForceNoBackupCreation)) } @@ -82,19 +83,25 @@ func DataCollections( categories[scope.Category().PathType()] = struct{}{} collections = append(collections, odcs...) - - for k, ex := range excludes { - if _, ok := allExcludes[k]; !ok { - allExcludes[k] = map[string]struct{}{} - } - - maps.Copy(allExcludes[k], ex) - } } + mcs, err := migrationCollections( + service, + lastBackupVersion, + tenant, + user, + su, + ctrlOpts) + if err != nil { + return nil, nil, err + } + + collections = append(collections, mcs...) + if len(collections) > 0 { baseCols, err := graph.BaseCollections( ctx, + collections, tenant, user.ID(), path.OneDriveService, @@ -108,5 +115,51 @@ func DataCollections( collections = append(collections, baseCols...) } - return collections, allExcludes, el.Failure() + return collections, ssmb.ToReader(), el.Failure() +} + +// adds data migrations to the collection set. +func migrationCollections( + svc graph.Servicer, + lastBackupVersion int, + tenant string, + user idname.Provider, + su support.StatusUpdater, + ctrlOpts control.Options, +) ([]data.BackupCollection, error) { + // assume a version < 0 implies no prior backup, thus nothing to migrate. + if version.IsNoBackup(lastBackupVersion) { + return nil, nil + } + + if lastBackupVersion >= version.All8MigrateUserPNToID { + return nil, nil + } + + // unlike exchange, which enumerates all folders on every + // backup, onedrive needs to force the owner PN -> ID migration + mc, err := path.ServicePrefix( + tenant, + user.ID(), + path.OneDriveService, + path.FilesCategory) + if err != nil { + return nil, clues.Wrap(err, "creating user id migration path") + } + + mpc, err := path.ServicePrefix( + tenant, + user.Name(), + path.OneDriveService, + path.FilesCategory) + if err != nil { + return nil, clues.Wrap(err, "creating user name migration path") + } + + mgn, err := graph.NewPrefixCollection(mpc, mc, su) + if err != nil { + return nil, clues.Wrap(err, "creating migration collection") + } + + return []data.BackupCollection{mgn}, nil } diff --git a/src/internal/connector/onedrive/data_collections_test.go b/src/internal/connector/onedrive/data_collections_test.go new file mode 100644 index 000000000..e71fbf4ff --- /dev/null +++ b/src/internal/connector/onedrive/data_collections_test.go @@ -0,0 +1,121 @@ +package onedrive + +import ( + "strings" + "testing" + + "github.com/alcionai/clues" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/internal/version" + "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/selectors" +) + +type DataCollectionsUnitSuite struct { + tester.Suite +} + +func TestDataCollectionsUnitSuite(t *testing.T) { + suite.Run(t, &DataCollectionsUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *DataCollectionsUnitSuite) TestMigrationCollections() { + u := selectors.Selector{} + u = u.SetDiscreteOwnerIDName("i", "n") + + od := path.OneDriveService.String() + fc := path.FilesCategory.String() + + type migr struct { + full string + prev string + } + + table := []struct { + name string + version int + forceSkip bool + expectLen int + expectMigration []migr + }{ + { + name: "no backup version", + version: version.NoBackup, + forceSkip: false, + expectLen: 0, + expectMigration: []migr{}, + }, + { + name: "above current version", + version: version.Backup + 5, + forceSkip: false, + expectLen: 0, + expectMigration: []migr{}, + }, + { + name: "user pn to id", + version: version.All8MigrateUserPNToID - 1, + forceSkip: false, + expectLen: 1, + expectMigration: []migr{ + { + full: strings.Join([]string{"t", od, "i", fc}, "/"), + prev: strings.Join([]string{"t", od, "n", fc}, "/"), + }, + }, + }, + { + name: "skipped", + version: version.Backup + 5, + forceSkip: true, + expectLen: 0, + expectMigration: []migr{}, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + opts := control.Options{ + ToggleFeatures: control.Toggles{}, + } + + mc, err := migrationCollections(nil, test.version, "t", u, nil, opts) + require.NoError(t, err, clues.ToCore(err)) + + if test.expectLen == 0 { + assert.Nil(t, mc) + return + } + + assert.Len(t, mc, test.expectLen) + + migrs := []migr{} + + for _, col := range mc { + var fp, pp string + + if col.FullPath() != nil { + fp = col.FullPath().String() + } + + if col.PreviousPath() != nil { + pp = col.PreviousPath().String() + } + + t.Logf("Found migration collection:\n* full: %s\n* prev: %s\n", fp, pp) + + migrs = append(migrs, test.expectMigration...) + } + + for i, m := range migrs { + assert.Contains(t, migrs, m, "expected to find migration: %+v", test.expectMigration[i]) + } + }) + } +} diff --git a/src/internal/connector/onedrive/drive.go b/src/internal/connector/onedrive/drive.go index fd0c8859a..77460a504 100644 --- a/src/internal/connector/onedrive/drive.go +++ b/src/internal/connector/onedrive/drive.go @@ -6,20 +6,20 @@ import ( "strings" "github.com/alcionai/clues" - "github.com/microsoftgraph/msgraph-sdk-go/drive" + "github.com/microsoftgraph/msgraph-sdk-go/drives" "github.com/microsoftgraph/msgraph-sdk-go/models" "golang.org/x/exp/maps" "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/connector/graph" gapi "github.com/alcionai/corso/src/internal/connector/graph/api" - "github.com/alcionai/corso/src/internal/connector/onedrive/api" + odConsts "github.com/alcionai/corso/src/internal/connector/onedrive/consts" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" + "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) -var errFolderNotFound = clues.New("folder not found") - const ( maxDrivesRetries = 3 @@ -27,7 +27,6 @@ const ( // graph response nextLinkKey = "@odata.nextLink" itemChildrenRawURLFmt = "https://graph.microsoft.com/v1.0/drives/%s/items/%s/children" - itemByPathRawURLFmt = "https://graph.microsoft.com/v1.0/drives/%s/items/%s:/%s" itemNotFoundErrorCode = "itemNotFound" ) @@ -58,6 +57,25 @@ func PagerForSource( } } +type pathPrefixerFunc func(driveID string) (path.Path, error) + +func pathPrefixerForSource( + tenantID, resourceOwner string, + source driveSource, +) pathPrefixerFunc { + cat := path.FilesCategory + serv := path.OneDriveService + + if source == SharePointSource { + cat = path.LibrariesCategory + serv = path.SharePointService + } + + return func(driveID string) (path.Path, error) { + return path.Build(tenantID, resourceOwner, serv, cat, false, odConsts.DrivesPathDir, driveID, odConsts.RootPathDir) + } +} + // itemCollector functions collect the items found in a drive type itemCollector func( ctx context.Context, @@ -140,7 +158,8 @@ func collectItems( } for { - page, err := pager.GetPage(ctx) + // assume delta urls here, which allows single-token consumption + page, err := pager.GetPage(graph.ConsumeNTokens(ctx, graph.SingleGetOrDeltaLC)) if graph.IsErrInvalidDelta(err) { logger.Ctx(ctx).Infow("Invalid previous delta link", "link", prevDelta) @@ -195,42 +214,6 @@ func collectItems( return DeltaUpdate{URL: newDeltaURL, Reset: invalidPrevDelta}, newPaths, excluded, nil } -// getFolder will lookup the specified folder name under `parentFolderID` -func getFolder( - ctx context.Context, - service graph.Servicer, - driveID, parentFolderID, folderName string, -) (models.DriveItemable, error) { - // The `Children().Get()` API doesn't yet support $filter, so using that to find a folder - // will be sub-optimal. - // Instead, we leverage OneDrive path-based addressing - - // https://learn.microsoft.com/en-us/graph/onedrive-addressing-driveitems#path-based-addressing - // - which allows us to lookup an item by its path relative to the parent ID - rawURL := fmt.Sprintf(itemByPathRawURLFmt, driveID, parentFolderID, folderName) - builder := drive.NewItemsDriveItemItemRequestBuilder(rawURL, service.Adapter()) - - var ( - foundItem models.DriveItemable - err error - ) - - foundItem, err = builder.Get(ctx, nil) - if err != nil { - if graph.IsErrDeletedInFlight(err) { - return nil, graph.Stack(ctx, clues.Stack(errFolderNotFound, err)) - } - - return nil, graph.Wrap(ctx, err, "getting folder") - } - - // Check if the item found is a folder, fail the call if not - if foundItem.GetFolder() == nil { - return nil, graph.Stack(ctx, errFolderNotFound) - } - - return foundItem, nil -} - // Create a new item in the specified folder func CreateItem( ctx context.Context, @@ -241,7 +224,7 @@ func CreateItem( // Graph SDK doesn't yet provide a POST method for `/children` so we set the `rawUrl` ourselves as recommended // here: https://github.com/microsoftgraph/msgraph-sdk-go/issues/155#issuecomment-1136254310 rawURL := fmt.Sprintf(itemChildrenRawURLFmt, driveID, parentFolderID) - builder := drive.NewItemsRequestBuilder(rawURL, service.Adapter()) + builder := drives.NewItemItemsRequestBuilder(rawURL, service.Adapter()) newItem, err := builder.Post(ctx, newItem, nil) if err != nil { @@ -283,7 +266,7 @@ func GetAllFolders( prefix string, errs *fault.Bus, ) ([]*Displayable, error) { - drives, err := api.GetAllDrives(ctx, pager, true, maxDrivesRetries) + drvs, err := api.GetAllDrives(ctx, pager, true, maxDrivesRetries) if err != nil { return nil, clues.Wrap(err, "getting OneDrive folders") } @@ -293,7 +276,7 @@ func GetAllFolders( el = errs.Local() ) - for _, d := range drives { + for _, d := range drvs { if el.Failure() != nil { break } @@ -375,7 +358,12 @@ func DeleteItem( driveID string, itemID string, ) error { - err := gs.Client().DrivesById(driveID).ItemsById(itemID).Delete(ctx, nil) + err := gs.Client(). + Drives(). + ByDriveId(driveID). + Items(). + ByDriveItemId(itemID). + Delete(ctx, nil) if err != nil { return graph.Wrap(ctx, err, "deleting item").With("item_id", itemID) } diff --git a/src/internal/connector/onedrive/drive_test.go b/src/internal/connector/onedrive/drive_test.go index 26f8c5c85..7bbf186a8 100644 --- a/src/internal/connector/onedrive/drive_test.go +++ b/src/internal/connector/onedrive/drive_test.go @@ -13,16 +13,19 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" + "github.com/alcionai/corso/src/internal/common/prefixmatcher" "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/connector/graph" - "github.com/alcionai/corso/src/internal/connector/onedrive/api" - "github.com/alcionai/corso/src/internal/connector/onedrive/api/mock" "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" + "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" + "github.com/alcionai/corso/src/pkg/services/m365/api" + "github.com/alcionai/corso/src/pkg/services/m365/api/mock" ) // Unit tests @@ -40,10 +43,10 @@ const ( ) func odErr(code string) *odataerrors.ODataError { - odErr := &odataerrors.ODataError{} - merr := odataerrors.MainError{} + odErr := odataerrors.NewODataError() + merr := odataerrors.NewMainError() merr.SetCode(&code) - odErr.SetError(&merr) + odErr.SetError(merr) return odErr } @@ -277,32 +280,40 @@ func (suite *OneDriveUnitSuite) TestDrives() { // Integration tests -type OneDriveSuite struct { +type OneDriveIntgSuite struct { tester.Suite userID string + creds account.M365Config } -func TestOneDriveDriveSuite(t *testing.T) { - suite.Run(t, &OneDriveSuite{ +func TestOneDriveSuite(t *testing.T) { + suite.Run(t, &OneDriveIntgSuite{ Suite: tester.NewIntegrationSuite( t, - [][]string{tester.M365AcctCredEnvs}, - ), + [][]string{tester.M365AcctCredEnvs}), }) } -func (suite *OneDriveSuite) SetupSuite() { - suite.userID = tester.SecondaryM365UserID(suite.T()) +func (suite *OneDriveIntgSuite) SetupSuite() { + t := suite.T() + + suite.userID = tester.SecondaryM365UserID(t) + + acct := tester.NewM365Account(t) + creds, err := acct.M365Config() + require.NoError(t, err) + + suite.creds = creds } -func (suite *OneDriveSuite) TestCreateGetDeleteFolder() { +func (suite *OneDriveIntgSuite) TestCreateGetDeleteFolder() { ctx, flush := tester.NewContext() defer flush() var ( t = suite.T() folderIDs = []string{} - folderName1 = "Corso_Folder_Test_" + common.FormatNow(common.SimpleTimeTesting) + folderName1 = "Corso_Folder_Test_" + dttm.FormatNow(dttm.SafeForTesting) folderElements = []string{folderName1} gs = loadTestService(t) ) @@ -330,15 +341,28 @@ func (suite *OneDriveSuite) TestCreateGetDeleteFolder() { } }() - folderID, err := CreateRestoreFolders(ctx, gs, driveID, folderElements) + rootFolder, err := api.GetDriveRoot(ctx, gs, driveID) + require.NoError(t, err, clues.ToCore(err)) + + restoreDir := path.Builder{}.Append(folderElements...) + drivePath := path.DrivePath{ + DriveID: driveID, + Root: "root:", + Folders: folderElements, + } + + caches := NewRestoreCaches() + caches.DriveIDToRootFolderID[driveID] = ptr.Val(rootFolder.GetId()) + + folderID, err := createRestoreFolders(ctx, gs, &drivePath, restoreDir, caches) require.NoError(t, err, clues.ToCore(err)) folderIDs = append(folderIDs, folderID) - folderName2 := "Corso_Folder_Test_" + common.FormatNow(common.SimpleTimeTesting) - folderElements = append(folderElements, folderName2) + folderName2 := "Corso_Folder_Test_" + dttm.FormatNow(dttm.SafeForTesting) + restoreDir = restoreDir.Append(folderName2) - folderID, err = CreateRestoreFolders(ctx, gs, driveID, folderElements) + folderID, err = createRestoreFolders(ctx, gs, &drivePath, restoreDir, caches) require.NoError(t, err, clues.ToCore(err)) folderIDs = append(folderIDs, folderID) @@ -391,11 +415,11 @@ func (fm testFolderMatcher) IsAny() bool { return fm.scope.IsAny(selectors.OneDriveFolder) } -func (fm testFolderMatcher) Matches(path string) bool { - return fm.scope.Matches(selectors.OneDriveFolder, path) +func (fm testFolderMatcher) Matches(p string) bool { + return fm.scope.Matches(selectors.OneDriveFolder, p) } -func (suite *OneDriveSuite) TestOneDriveNewCollections() { +func (suite *OneDriveIntgSuite) TestOneDriveNewCollections() { creds, err := tester.NewM365Account(suite.T()).M365Config() require.NoError(suite.T(), err, clues.ToCore(err)) @@ -426,7 +450,7 @@ func (suite *OneDriveSuite) TestOneDriveNewCollections() { ) colls := NewCollections( - graph.HTTPClient(graph.NoTimeout()), + graph.NewNoTimeoutHTTPWrapper(), creds.AzureTenantID, test.user, OneDriveSource, @@ -437,10 +461,12 @@ func (suite *OneDriveSuite) TestOneDriveNewCollections() { ToggleFeatures: control.Toggles{}, }) - odcs, excludes, err := colls.Get(ctx, nil, fault.New(true)) + ssmb := prefixmatcher.NewStringSetBuilder() + + odcs, err := colls.Get(ctx, nil, ssmb, fault.New(true)) assert.NoError(t, err, clues.ToCore(err)) // Don't expect excludes as this isn't an incremental backup. - assert.Empty(t, excludes) + assert.True(t, ssmb.Empty()) for _, entry := range odcs { assert.NotEmpty(t, entry.FullPath()) diff --git a/src/internal/connector/onedrive/folder_cache.go b/src/internal/connector/onedrive/folder_cache.go new file mode 100644 index 000000000..696d42819 --- /dev/null +++ b/src/internal/connector/onedrive/folder_cache.go @@ -0,0 +1,28 @@ +package onedrive + +import ( + "github.com/microsoftgraph/msgraph-sdk-go/models" + + "github.com/alcionai/corso/src/pkg/path" +) + +// TODO: refactor to comply with graph/cache_container + +type folderCache struct { + cache map[string]models.DriveItemable +} + +func NewFolderCache() *folderCache { + return &folderCache{ + cache: map[string]models.DriveItemable{}, + } +} + +func (c *folderCache) get(loc *path.Builder) (models.DriveItemable, bool) { + mdi, ok := c.cache[loc.String()] + return mdi, ok +} + +func (c *folderCache) set(loc *path.Builder, mdi models.DriveItemable) { + c.cache[loc.String()] = mdi +} diff --git a/src/internal/connector/onedrive/item.go b/src/internal/connector/onedrive/item.go index 209cdce15..2c9046ebf 100644 --- a/src/internal/connector/onedrive/item.go +++ b/src/internal/connector/onedrive/item.go @@ -14,18 +14,17 @@ import ( "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/connector/graph" - "github.com/alcionai/corso/src/internal/connector/onedrive/api" - "github.com/alcionai/corso/src/internal/connector/uploadsession" - "github.com/alcionai/corso/src/internal/version" + "github.com/alcionai/corso/src/internal/connector/onedrive/metadata" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/logger" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) -const ( - // downloadUrlKey is used to find the download URL in a - // DriveItem response - downloadURLKey = "@microsoft.graph.downloadUrl" -) +// downloadUrlKeys is used to find the download URL in a DriveItem response. +var downloadURLKeys = []string{ + "@microsoft.graph.downloadUrl", + "@content.downloadUrl", +} // sharePointItemReader will return a io.ReadCloser for the specified item // It crafts this by querying M365 for a download URL for the item @@ -33,12 +32,12 @@ const ( // TODO: Add metadata fetching to SharePoint func sharePointItemReader( ctx context.Context, - hc *http.Client, + client graph.Requester, item models.DriveItemable, ) (details.ItemInfo, io.ReadCloser, error) { - resp, err := downloadItem(ctx, hc, item) + resp, err := downloadItem(ctx, client, item) if err != nil { - return details.ItemInfo{}, nil, clues.Wrap(err, "downloading item") + return details.ItemInfo{}, nil, clues.Wrap(err, "sharepoint reader") } dii := details.ItemInfo{ @@ -74,18 +73,18 @@ func baseItemMetaReader( item models.DriveItemable, ) (io.ReadCloser, int, error) { var ( - perms []UserPermission + perms []metadata.Permission err error - meta = Metadata{FileName: ptr.Val(item.GetName())} + meta = metadata.Metadata{FileName: ptr.Val(item.GetName())} ) if item.GetShared() == nil { - meta.SharingMode = SharingModeInherited + meta.SharingMode = metadata.SharingModeInherited } else { - meta.SharingMode = SharingModeCustom + meta.SharingMode = metadata.SharingModeCustom } - if meta.SharingMode == SharingModeCustom { + if meta.SharingMode == metadata.SharingModeCustom { perms, err = driveItemPermissionInfo(ctx, service, driveID, ptr.Val(item.GetId())) if err != nil { return nil, 0, err @@ -107,7 +106,7 @@ func baseItemMetaReader( // and using a http client to initialize a reader func oneDriveItemReader( ctx context.Context, - hc *http.Client, + client graph.Requester, item models.DriveItemable, ) (details.ItemInfo, io.ReadCloser, error) { var ( @@ -116,9 +115,9 @@ func oneDriveItemReader( ) if isFile { - resp, err := downloadItem(ctx, hc, item) + resp, err := downloadItem(ctx, client, item) if err != nil { - return details.ItemInfo{}, nil, clues.Wrap(err, "downloading item") + return details.ItemInfo{}, nil, clues.Wrap(err, "onedrive reader") } rc = resp.Body @@ -131,38 +130,35 @@ func oneDriveItemReader( return dii, rc, nil } -func downloadItem(ctx context.Context, hc *http.Client, item models.DriveItemable) (*http.Response, error) { - url, ok := item.GetAdditionalData()[downloadURLKey].(*string) - if !ok { +func downloadItem( + ctx context.Context, + client graph.Requester, + item models.DriveItemable, +) (*http.Response, error) { + var url string + + for _, key := range downloadURLKeys { + tmp, ok := item.GetAdditionalData()[key].(*string) + if ok { + url = ptr.Val(tmp) + break + } + } + + if len(url) == 0 { return nil, clues.New("extracting file url").With("item_id", ptr.Val(item.GetId())) } - req, err := http.NewRequest(http.MethodGet, *url, nil) + resp, err := client.Request(ctx, http.MethodGet, url, nil, nil) if err != nil { - return nil, graph.Wrap(ctx, err, "new item download request") - } - - //nolint:lll - // Decorate the traffic - // See https://learn.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online#how-to-decorate-your-http-traffic - req.Header.Set("User-Agent", "ISV|Alcion|Corso/"+version.Version) - - resp, err := hc.Do(req) - if err != nil { - cerr := graph.Wrap(ctx, err, "downloading item") - - if graph.IsMalware(err) { - cerr = cerr.Label(graph.LabelsMalware) - } - - return nil, cerr + return nil, err } if (resp.StatusCode / 100) == 2 { return resp, nil } - if graph.IsMalwareResp(context.Background(), resp) { + if graph.IsMalwareResp(ctx, resp) { return nil, clues.New("malware detected").Label(graph.LabelsMalware) } @@ -215,7 +211,7 @@ func driveItemPermissionInfo( service graph.Servicer, driveID string, itemID string, -) ([]UserPermission, error) { +) ([]metadata.Permission, error) { perm, err := api.GetItemPermission(ctx, service, driveID, itemID) if err != nil { return nil, err @@ -226,8 +222,8 @@ func driveItemPermissionInfo( return uperms, nil } -func filterUserPermissions(ctx context.Context, perms []models.Permissionable) []UserPermission { - up := []UserPermission{} +func filterUserPermissions(ctx context.Context, perms []models.Permissionable) []metadata.Permission { + up := []metadata.Permission{} for _, p := range perms { if p.GetGrantedToV2() == nil { @@ -236,33 +232,44 @@ func filterUserPermissions(ctx context.Context, perms []models.Permissionable) [ continue } - gv2 := p.GetGrantedToV2() - - // Below are the mapping from roles to "Advanced" permissions - // screen entries: - // - // owner - Full Control - // write - Design | Edit | Contribute (no difference in /permissions api) - // read - Read - // empty - Restricted View - roles := p.GetRoles() - - entityID := "" - if gv2.GetUser() != nil { - entityID = ptr.Val(gv2.GetUser().GetId()) - } else if gv2.GetGroup() != nil { - entityID = ptr.Val(gv2.GetGroup().GetId()) - } else { - // TODO Add application permissions when adding permissions for SharePoint + var ( + // Below are the mapping from roles to "Advanced" permissions + // screen entries: + // + // owner - Full Control + // write - Design | Edit | Contribute (no difference in /permissions api) + // read - Read + // empty - Restricted View + // + // helpful docs: // https://devblogs.microsoft.com/microsoft365dev/controlling-app-access-on-specific-sharepoint-site-collections/ - logm := logger.Ctx(ctx) - if gv2.GetApplication() != nil { - logm.With("application_id", ptr.Val(gv2.GetApplication().GetId())) - } - if gv2.GetDevice() != nil { - logm.With("application_id", ptr.Val(gv2.GetDevice().GetId())) - } - logm.Info("untracked permission") + roles = p.GetRoles() + gv2 = p.GetGrantedToV2() + entityID string + gv2t metadata.GV2Type + ) + + switch true { + case gv2.GetUser() != nil: + gv2t = metadata.GV2User + entityID = ptr.Val(gv2.GetUser().GetId()) + case gv2.GetSiteUser() != nil: + gv2t = metadata.GV2SiteUser + entityID = ptr.Val(gv2.GetSiteUser().GetId()) + case gv2.GetGroup() != nil: + gv2t = metadata.GV2Group + entityID = ptr.Val(gv2.GetGroup().GetId()) + case gv2.GetSiteGroup() != nil: + gv2t = metadata.GV2SiteGroup + entityID = ptr.Val(gv2.GetSiteGroup().GetId()) + case gv2.GetApplication() != nil: + gv2t = metadata.GV2App + entityID = ptr.Val(gv2.GetApplication().GetId()) + case gv2.GetDevice() != nil: + gv2t = metadata.GV2Device + entityID = ptr.Val(gv2.GetDevice().GetId()) + default: + logger.Ctx(ctx).Info("untracked permission") } // Technically GrantedToV2 can also contain devices, but the @@ -272,10 +279,11 @@ func filterUserPermissions(ctx context.Context, perms []models.Permissionable) [ continue } - up = append(up, UserPermission{ + up = append(up, metadata.Permission{ ID: ptr.Val(p.GetId()), Roles: roles, EntityID: entityID, + EntityType: gv2t, Expiration: p.GetExpirationDateTime(), }) } @@ -312,7 +320,7 @@ func sharePointItemInfo(di models.DriveItemable, itemSize int64) *details.ShareP } return &details.SharePointInfo{ - ItemType: details.OneDriveItem, + ItemType: details.SharePointLibrary, ItemName: ptr.Val(di.GetName()), Created: ptr.Val(di.GetCreatedDateTime()), Modified: ptr.Val(di.GetLastModifiedDateTime()), @@ -336,7 +344,13 @@ func driveItemWriter( session := drives.NewItemItemsItemCreateUploadSessionPostRequestBody() ctx = clues.Add(ctx, "upload_item_id", itemID) - r, err := service.Client().DrivesById(driveID).ItemsById(itemID).CreateUploadSession().Post(ctx, session, nil) + r, err := service.Client(). + Drives(). + ByDriveId(driveID). + Items(). + ByDriveItemId(itemID). + CreateUploadSession(). + Post(ctx, session, nil) if err != nil { return nil, graph.Wrap(ctx, err, "creating item upload session") } @@ -345,7 +359,7 @@ func driveItemWriter( url := ptr.Val(r.GetUploadUrl()) - return uploadsession.NewWriter(itemID, url, itemSize), nil + return graph.NewLargeItemWriter(itemID, url, itemSize), nil } // constructWebURL helper function for recreating the webURL diff --git a/src/internal/connector/onedrive/item_test.go b/src/internal/connector/onedrive/item_test.go index 89dbd4036..8cc4968fa 100644 --- a/src/internal/connector/onedrive/item_test.go +++ b/src/internal/connector/onedrive/item_test.go @@ -12,12 +12,13 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/connector/graph" - "github.com/alcionai/corso/src/internal/connector/onedrive/api" + "github.com/alcionai/corso/src/internal/connector/onedrive/metadata" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) type ItemIntegrationSuite struct { @@ -112,7 +113,7 @@ func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() { ) // Read data for the file - itemInfo, itemData, err := oneDriveItemReader(ctx, graph.HTTPClient(graph.NoTimeout()), driveItem) + itemInfo, itemData, err := oneDriveItemReader(ctx, graph.NewNoTimeoutHTTPWrapper(), driveItem) require.NoError(suite.T(), err, clues.ToCore(err)) require.NotNil(suite.T(), itemInfo.OneDrive) @@ -127,8 +128,7 @@ func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() { } // TestItemWriter is an integration test for uploading data to OneDrive -// It creates a new `testfolder_ item and writes data to it +// It creates a new folder with a new item and writes data to it func (suite *ItemIntegrationSuite) TestItemWriter() { table := []struct { name string @@ -151,27 +151,23 @@ func (suite *ItemIntegrationSuite) TestItemWriter() { t := suite.T() srv := suite.service - root, err := srv.Client().DrivesById(test.driveID).Root().Get(ctx, nil) + root, err := srv.Client().Drives().ByDriveId(test.driveID).Root().Get(ctx, nil) require.NoError(t, err, clues.ToCore(err)) - // Test Requirement 2: "Test Folder" should exist - folder, err := getFolder(ctx, srv, test.driveID, ptr.Val(root.GetId()), "Test Folder") - require.NoError(t, err, clues.ToCore(err)) - - newFolderName := "testfolder_" + common.FormatNow(common.SimpleTimeTesting) - t.Logf("Test will create folder %s", newFolderName) + newFolderName := tester.DefaultTestRestoreDestination("folder").ContainerName + t.Logf("creating folder %s", newFolderName) newFolder, err := CreateItem( ctx, srv, test.driveID, - ptr.Val(folder.GetId()), + ptr.Val(root.GetId()), newItem(newFolderName, true)) require.NoError(t, err, clues.ToCore(err)) require.NotNil(t, newFolder.GetId()) - newItemName := "testItem_" + common.FormatNow(common.SimpleTimeTesting) - t.Logf("Test will create item %s", newItemName) + newItemName := "testItem_" + dttm.FormatNow(dttm.SafeForTesting) + t.Logf("creating item %s", newItemName) newItem, err := CreateItem( ctx, @@ -184,8 +180,8 @@ func (suite *ItemIntegrationSuite) TestItemWriter() { // HACK: Leveraging this to test getFolder behavior for a file. `getFolder()` on the // newly created item should fail because it's a file not a folder - _, err = getFolder(ctx, srv, test.driveID, ptr.Val(newFolder.GetId()), newItemName) - require.ErrorIs(t, err, errFolderNotFound, clues.ToCore(err)) + _, err = api.GetFolderByName(ctx, srv, test.driveID, ptr.Val(newFolder.GetId()), newItemName) + require.ErrorIs(t, err, api.ErrFolderNotFound, clues.ToCore(err)) // Initialize a 100KB mockDataProvider td, writeSize := mockDataReader(int64(100 * 1024)) @@ -233,50 +229,70 @@ func (suite *ItemIntegrationSuite) TestDriveGetFolder() { t := suite.T() srv := suite.service - root, err := srv.Client().DrivesById(test.driveID).Root().Get(ctx, nil) + root, err := srv.Client().Drives().ByDriveId(test.driveID).Root().Get(ctx, nil) require.NoError(t, err, clues.ToCore(err)) // Lookup a folder that doesn't exist - _, err = getFolder(ctx, srv, test.driveID, ptr.Val(root.GetId()), "FolderDoesNotExist") - require.ErrorIs(t, err, errFolderNotFound, clues.ToCore(err)) + _, err = api.GetFolderByName(ctx, srv, test.driveID, ptr.Val(root.GetId()), "FolderDoesNotExist") + require.ErrorIs(t, err, api.ErrFolderNotFound, clues.ToCore(err)) // Lookup a folder that does exist - _, err = getFolder(ctx, srv, test.driveID, ptr.Val(root.GetId()), "") + _, err = api.GetFolderByName(ctx, srv, test.driveID, ptr.Val(root.GetId()), "") require.NoError(t, err, clues.ToCore(err)) }) } } -func getPermsUperms(permID, userID, entity string, scopes []string) (models.Permissionable, UserPermission) { - identity := models.NewIdentity() - identity.SetId(&userID) - identity.SetAdditionalData(map[string]any{"email": &userID}) +func getPermsAndResourceOwnerPerms( + permID, resourceOwner string, + gv2t metadata.GV2Type, + scopes []string, +) (models.Permissionable, metadata.Permission) { + sharepointIdentitySet := models.NewSharePointIdentitySet() - sharepointIdentity := models.NewSharePointIdentitySet() + switch gv2t { + case metadata.GV2App, metadata.GV2Device, metadata.GV2Group, metadata.GV2User: + identity := models.NewIdentity() + identity.SetId(&resourceOwner) + identity.SetAdditionalData(map[string]any{"email": &resourceOwner}) - switch entity { - case "user": - sharepointIdentity.SetUser(identity) - case "group": - sharepointIdentity.SetGroup(identity) - case "application": - sharepointIdentity.SetApplication(identity) - case "device": - sharepointIdentity.SetDevice(identity) + switch gv2t { + case metadata.GV2User: + sharepointIdentitySet.SetUser(identity) + case metadata.GV2Group: + sharepointIdentitySet.SetGroup(identity) + case metadata.GV2App: + sharepointIdentitySet.SetApplication(identity) + case metadata.GV2Device: + sharepointIdentitySet.SetDevice(identity) + } + + case metadata.GV2SiteUser, metadata.GV2SiteGroup: + spIdentity := models.NewSharePointIdentity() + spIdentity.SetId(&resourceOwner) + spIdentity.SetAdditionalData(map[string]any{"email": &resourceOwner}) + + switch gv2t { + case metadata.GV2SiteUser: + sharepointIdentitySet.SetSiteUser(spIdentity) + case metadata.GV2SiteGroup: + sharepointIdentitySet.SetSiteGroup(spIdentity) + } } perm := models.NewPermission() perm.SetId(&permID) perm.SetRoles([]string{"read"}) - perm.SetGrantedToV2(sharepointIdentity) + perm.SetGrantedToV2(sharepointIdentitySet) - uperm := UserPermission{ - ID: permID, - Roles: []string{"read"}, - EntityID: userID, + ownersPerm := metadata.Permission{ + ID: permID, + Roles: []string{"read"}, + EntityID: resourceOwner, + EntityType: gv2t, } - return perm, uperm + return perm, ownersPerm } type ItemUnitTestSuite struct { @@ -287,74 +303,123 @@ func TestItemUnitTestSuite(t *testing.T) { suite.Run(t, &ItemUnitTestSuite{Suite: tester.NewUnitSuite(t)}) } -func (suite *ItemUnitTestSuite) TestOneDrivePermissionsFilter() { - permID := "fakePermId" - userID := "fakeuser@provider.com" - userID2 := "fakeuser2@provider.com" +func (suite *ItemUnitTestSuite) TestDrivePermissionsFilter() { + var ( + pID = "fakePermId" + uID = "fakeuser@provider.com" + uID2 = "fakeuser2@provider.com" + own = []string{"owner"} + r = []string{"read"} + rw = []string{"read", "write"} + ) - userOwnerPerm, userOwnerUperm := getPermsUperms(permID, userID, "user", []string{"owner"}) - userReadPerm, userReadUperm := getPermsUperms(permID, userID, "user", []string{"read"}) - userReadWritePerm, userReadWriteUperm := getPermsUperms(permID, userID2, "user", []string{"read", "write"}) + userOwnerPerm, userOwnerROperm := getPermsAndResourceOwnerPerms(pID, uID, metadata.GV2User, own) + userReadPerm, userReadROperm := getPermsAndResourceOwnerPerms(pID, uID, metadata.GV2User, r) + userReadWritePerm, userReadWriteROperm := getPermsAndResourceOwnerPerms(pID, uID2, metadata.GV2User, rw) + siteUserOwnerPerm, siteUserOwnerROperm := getPermsAndResourceOwnerPerms(pID, uID, metadata.GV2SiteUser, own) + siteUserReadPerm, siteUserReadROperm := getPermsAndResourceOwnerPerms(pID, uID, metadata.GV2SiteUser, r) + siteUserReadWritePerm, siteUserReadWriteROperm := getPermsAndResourceOwnerPerms(pID, uID2, metadata.GV2SiteUser, rw) - groupReadPerm, groupReadUperm := getPermsUperms(permID, userID, "group", []string{"read"}) - groupReadWritePerm, groupReadWriteUperm := getPermsUperms(permID, userID2, "group", []string{"read", "write"}) + groupReadPerm, groupReadROperm := getPermsAndResourceOwnerPerms(pID, uID, metadata.GV2Group, r) + groupReadWritePerm, groupReadWriteROperm := getPermsAndResourceOwnerPerms(pID, uID2, metadata.GV2Group, rw) + siteGroupReadPerm, siteGroupReadROperm := getPermsAndResourceOwnerPerms(pID, uID, metadata.GV2SiteGroup, r) + siteGroupReadWritePerm, siteGroupReadWriteROperm := getPermsAndResourceOwnerPerms(pID, uID2, metadata.GV2SiteGroup, rw) - noPerm, _ := getPermsUperms(permID, userID, "user", []string{"read"}) + noPerm, _ := getPermsAndResourceOwnerPerms(pID, uID, "user", []string{"read"}) noPerm.SetGrantedToV2(nil) // eg: link shares cases := []struct { name string graphPermissions []models.Permissionable - parsedPermissions []UserPermission + parsedPermissions []metadata.Permission }{ { name: "no perms", graphPermissions: []models.Permissionable{}, - parsedPermissions: []UserPermission{}, + parsedPermissions: []metadata.Permission{}, }, { name: "no user bound to perms", graphPermissions: []models.Permissionable{noPerm}, - parsedPermissions: []UserPermission{}, + parsedPermissions: []metadata.Permission{}, }, // user { name: "user with read permissions", graphPermissions: []models.Permissionable{userReadPerm}, - parsedPermissions: []UserPermission{userReadUperm}, + parsedPermissions: []metadata.Permission{userReadROperm}, }, { name: "user with owner permissions", graphPermissions: []models.Permissionable{userOwnerPerm}, - parsedPermissions: []UserPermission{userOwnerUperm}, + parsedPermissions: []metadata.Permission{userOwnerROperm}, }, { name: "user with read and write permissions", graphPermissions: []models.Permissionable{userReadWritePerm}, - parsedPermissions: []UserPermission{userReadWriteUperm}, + parsedPermissions: []metadata.Permission{userReadWriteROperm}, }, { name: "multiple users with separate permissions", graphPermissions: []models.Permissionable{userReadPerm, userReadWritePerm}, - parsedPermissions: []UserPermission{userReadUperm, userReadWriteUperm}, + parsedPermissions: []metadata.Permission{userReadROperm, userReadWriteROperm}, + }, + + // site-user + { + name: "site user with read permissions", + graphPermissions: []models.Permissionable{siteUserReadPerm}, + parsedPermissions: []metadata.Permission{siteUserReadROperm}, + }, + { + name: "site user with owner permissions", + graphPermissions: []models.Permissionable{siteUserOwnerPerm}, + parsedPermissions: []metadata.Permission{siteUserOwnerROperm}, + }, + { + name: "site user with read and write permissions", + graphPermissions: []models.Permissionable{siteUserReadWritePerm}, + parsedPermissions: []metadata.Permission{siteUserReadWriteROperm}, + }, + { + name: "multiple site users with separate permissions", + graphPermissions: []models.Permissionable{siteUserReadPerm, siteUserReadWritePerm}, + parsedPermissions: []metadata.Permission{siteUserReadROperm, siteUserReadWriteROperm}, }, // group { name: "group with read permissions", graphPermissions: []models.Permissionable{groupReadPerm}, - parsedPermissions: []UserPermission{groupReadUperm}, + parsedPermissions: []metadata.Permission{groupReadROperm}, }, { name: "group with read and write permissions", graphPermissions: []models.Permissionable{groupReadWritePerm}, - parsedPermissions: []UserPermission{groupReadWriteUperm}, + parsedPermissions: []metadata.Permission{groupReadWriteROperm}, }, { name: "multiple groups with separate permissions", graphPermissions: []models.Permissionable{groupReadPerm, groupReadWritePerm}, - parsedPermissions: []UserPermission{groupReadUperm, groupReadWriteUperm}, + parsedPermissions: []metadata.Permission{groupReadROperm, groupReadWriteROperm}, + }, + + // site-group + { + name: "site group with read permissions", + graphPermissions: []models.Permissionable{siteGroupReadPerm}, + parsedPermissions: []metadata.Permission{siteGroupReadROperm}, + }, + { + name: "site group with read and write permissions", + graphPermissions: []models.Permissionable{siteGroupReadWritePerm}, + parsedPermissions: []metadata.Permission{siteGroupReadWriteROperm}, + }, + { + name: "multiple site groups with separate permissions", + graphPermissions: []models.Permissionable{siteGroupReadPerm, siteGroupReadWritePerm}, + parsedPermissions: []metadata.Permission{siteGroupReadROperm, siteGroupReadWriteROperm}, }, } for _, tc := range cases { diff --git a/src/internal/connector/onedrive/metadata/metadata.go b/src/internal/connector/onedrive/metadata/metadata.go new file mode 100644 index 000000000..32fb33707 --- /dev/null +++ b/src/internal/connector/onedrive/metadata/metadata.go @@ -0,0 +1,31 @@ +package metadata + +import ( + "io" + "time" +) + +// ItemMeta contains metadata about the Item. It gets stored in a +// separate file in kopia +type Metadata struct { + FileName string `json:"filename,omitempty"` + // SharingMode denotes what the current mode of sharing is for the object. + // - inherited: permissions same as parent permissions (no "shared" in delta) + // - custom: use Permissions to set correct permissions ("shared" has value in delta) + SharingMode SharingMode `json:"permissionMode,omitempty"` + Permissions []Permission `json:"permissions,omitempty"` +} + +type Item struct { + ID string + Data io.ReadCloser + Mod time.Time +} + +// Deleted implements an interface function. However, OneDrive items are marked +// as deleted by adding them to the exclude list so this can always return +// false. +func (i *Item) Deleted() bool { return false } +func (i *Item) UUID() string { return i.ID } +func (i *Item) ToReader() io.ReadCloser { return i.Data } +func (i *Item) ModTime() time.Time { return i.Mod } diff --git a/src/internal/connector/onedrive/metadata/permissions.go b/src/internal/connector/onedrive/metadata/permissions.go new file mode 100644 index 000000000..6f17b76c6 --- /dev/null +++ b/src/internal/connector/onedrive/metadata/permissions.go @@ -0,0 +1,102 @@ +package metadata + +import ( + "time" + + "golang.org/x/exp/slices" +) + +type SharingMode int + +const ( + SharingModeCustom = SharingMode(iota) + SharingModeInherited +) + +type GV2Type string + +const ( + GV2App GV2Type = "application" + GV2Device GV2Type = "device" + GV2Group GV2Type = "group" + GV2SiteUser GV2Type = "site_user" + GV2SiteGroup GV2Type = "site_group" + GV2User GV2Type = "user" +) + +// FilePermission is used to store permissions of a specific resource owner +// to a drive item. +type Permission struct { + ID string `json:"id,omitempty"` + Roles []string `json:"role,omitempty"` + Email string `json:"email,omitempty"` // DEPRECATED: Replaced with EntityID in newer backups + EntityID string `json:"entityId,omitempty"` // this is the resource owner's ID + EntityType GV2Type `json:"entityType,omitempty"` + Expiration *time.Time `json:"expiration,omitempty"` +} + +// isSamePermission checks equality of two UserPermission objects +func (p Permission) Equals(other Permission) bool { + // EntityID can be empty for older backups and Email can be empty + // for newer ones. It is not possible for both to be empty. Also, + // if EntityID/Email for one is not empty then the other will also + // have EntityID/Email as we backup permissions for all the + // parents and children when we have a change in permissions. + if p.EntityID != "" && p.EntityID != other.EntityID { + return false + } + + if p.Email != "" && p.Email != other.Email { + return false + } + + p1r := p.Roles + p2r := other.Roles + + slices.Sort(p1r) + slices.Sort(p2r) + + return slices.Equal(p1r, p2r) +} + +// DiffPermissions compares the before and after set, returning +// the permissions that were added and removed (in that order) +// in the after set. +func DiffPermissions(before, after []Permission) ([]Permission, []Permission) { + var ( + added = []Permission{} + removed = []Permission{} + ) + + for _, cp := range after { + found := false + + for _, pp := range before { + if cp.Equals(pp) { + found = true + break + } + } + + if !found { + added = append(added, cp) + } + } + + for _, pp := range before { + found := false + + for _, cp := range after { + if cp.Equals(pp) { + found = true + break + } + } + + if !found { + removed = append(removed, pp) + } + } + + return added, removed +} diff --git a/src/internal/connector/onedrive/metadata/permissions_test.go b/src/internal/connector/onedrive/metadata/permissions_test.go new file mode 100644 index 000000000..046052f37 --- /dev/null +++ b/src/internal/connector/onedrive/metadata/permissions_test.go @@ -0,0 +1,149 @@ +package metadata + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/tester" +) + +type PermissionsUnitTestSuite struct { + tester.Suite +} + +func TestPermissionsUnitTestSuite(t *testing.T) { + suite.Run(t, &PermissionsUnitTestSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *PermissionsUnitTestSuite) TestDiffPermissions() { + perm1 := Permission{ + ID: "id1", + Roles: []string{"read"}, + EntityID: "user-id1", + } + + perm2 := Permission{ + ID: "id2", + Roles: []string{"write"}, + EntityID: "user-id2", + } + + perm3 := Permission{ + ID: "id3", + Roles: []string{"write"}, + EntityID: "user-id3", + } + + // The following two permissions have same id and user but + // different roles, this is a valid scenario for permissions. + sameidperm1 := Permission{ + ID: "id0", + Roles: []string{"write"}, + EntityID: "user-id0", + } + sameidperm2 := Permission{ + ID: "id0", + Roles: []string{"read"}, + EntityID: "user-id0", + } + + emailperm1 := Permission{ + ID: "id1", + Roles: []string{"read"}, + Email: "email1@provider.com", + } + + emailperm2 := Permission{ + ID: "id1", + Roles: []string{"read"}, + Email: "email2@provider.com", + } + + table := []struct { + name string + before []Permission + after []Permission + added []Permission + removed []Permission + }{ + { + name: "single permission added", + before: []Permission{}, + after: []Permission{perm1}, + added: []Permission{perm1}, + removed: []Permission{}, + }, + { + name: "single permission removed", + before: []Permission{perm1}, + after: []Permission{}, + added: []Permission{}, + removed: []Permission{perm1}, + }, + { + name: "multiple permission added", + before: []Permission{}, + after: []Permission{perm1, perm2}, + added: []Permission{perm1, perm2}, + removed: []Permission{}, + }, + { + name: "single permission removed", + before: []Permission{perm1, perm2}, + after: []Permission{}, + added: []Permission{}, + removed: []Permission{perm1, perm2}, + }, + { + name: "extra permissions", + before: []Permission{perm1, perm2}, + after: []Permission{perm1, perm2, perm3}, + added: []Permission{perm3}, + removed: []Permission{}, + }, + { + name: "less permissions", + before: []Permission{perm1, perm2, perm3}, + after: []Permission{perm1, perm2}, + added: []Permission{}, + removed: []Permission{perm3}, + }, + { + name: "same id different role", + before: []Permission{sameidperm1}, + after: []Permission{sameidperm2}, + added: []Permission{sameidperm2}, + removed: []Permission{sameidperm1}, + }, + { + name: "email based extra permissions", + before: []Permission{emailperm1}, + after: []Permission{emailperm1, emailperm2}, + added: []Permission{emailperm2}, + removed: []Permission{}, + }, + { + name: "email based less permissions", + before: []Permission{emailperm1, emailperm2}, + after: []Permission{emailperm1}, + added: []Permission{}, + removed: []Permission{emailperm2}, + }, + } + + for _, test := range table { + suite.Run(test.name, func() { + _, flush := tester.NewContext() + defer flush() + + t := suite.T() + + added, removed := DiffPermissions(test.before, test.after) + + assert.Equal(t, added, test.added, "added permissions") + assert.Equal(t, removed, test.removed, "removed permissions") + }) + } +} diff --git a/src/internal/connector/onedrive/permission.go b/src/internal/connector/onedrive/permission.go index d59461a00..0f1451bb8 100644 --- a/src/internal/connector/onedrive/permission.go +++ b/src/internal/connector/onedrive/permission.go @@ -2,11 +2,11 @@ package onedrive import ( "context" + "fmt" "github.com/alcionai/clues" - msdrive "github.com/microsoftgraph/msgraph-sdk-go/drive" + "github.com/microsoftgraph/msgraph-sdk-go/drives" "github.com/microsoftgraph/msgraph-sdk-go/models" - "golang.org/x/exp/slices" "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/connector/graph" @@ -15,24 +15,25 @@ import ( "github.com/alcionai/corso/src/internal/version" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) func getParentMetadata( parentPath path.Path, - metas map[string]Metadata, -) (Metadata, error) { - parentMeta, ok := metas[parentPath.String()] + parentDirToMeta map[string]metadata.Metadata, +) (metadata.Metadata, error) { + parentMeta, ok := parentDirToMeta[parentPath.String()] if !ok { - onedrivePath, err := path.ToOneDrivePath(parentPath) + drivePath, err := path.ToDrivePath(parentPath) if err != nil { - return Metadata{}, clues.Wrap(err, "invalid restore path") + return metadata.Metadata{}, clues.Wrap(err, "invalid restore path") } - if len(onedrivePath.Folders) != 0 { - return Metadata{}, clues.Wrap(err, "computing item permissions") + if len(drivePath.Folders) != 0 { + return metadata.Metadata{}, clues.Wrap(err, "computing item permissions") } - parentMeta = Metadata{} + parentMeta = metadata.Metadata{} } return parentMeta, nil @@ -42,35 +43,34 @@ func getCollectionMetadata( ctx context.Context, drivePath *path.DrivePath, dc data.RestoreCollection, - metas map[string]Metadata, + caches *restoreCaches, backupVersion int, restorePerms bool, -) (Metadata, error) { +) (metadata.Metadata, error) { if !restorePerms || backupVersion < version.OneDrive1DataAndMetaFiles { - return Metadata{}, nil + return metadata.Metadata{}, nil } var ( - err error - collectionPath = dc.FullPath() + err error + fullPath = dc.FullPath() ) if len(drivePath.Folders) == 0 { // No permissions for root folder - return Metadata{}, nil + return metadata.Metadata{}, nil } if backupVersion < version.OneDrive4DirIncludesPermissions { - colMeta, err := getParentMetadata(collectionPath, metas) + colMeta, err := getParentMetadata(fullPath, caches.ParentDirToMeta) if err != nil { - return Metadata{}, clues.Wrap(err, "collection metadata") + return metadata.Metadata{}, clues.Wrap(err, "collection metadata") } return colMeta, nil } - // Root folder doesn't have a metadata file associated with it. - folders := collectionPath.Folders() + folders := fullPath.Folders() metaName := folders[len(folders)-1] + metadata.DirMetaFileSuffix if backupVersion >= version.OneDrive5DirMetaNoName { @@ -79,155 +79,57 @@ func getCollectionMetadata( meta, err := fetchAndReadMetadata(ctx, dc, metaName) if err != nil { - return Metadata{}, clues.Wrap(err, "collection metadata") + return metadata.Metadata{}, clues.Wrap(err, "collection metadata") } return meta, nil } -// createRestoreFoldersWithPermissions creates the restore folder hierarchy in -// the specified drive and returns the folder ID of the last folder entry in the -// hierarchy. Permissions are only applied to the last folder in the hierarchy. -// Passing nil for the permissions results in just creating the folder(s). -func createRestoreFoldersWithPermissions( - ctx context.Context, - creds account.M365Config, - service graph.Servicer, - drivePath *path.DrivePath, - restoreFolders []string, - folderPath path.Path, - folderMetadata Metadata, - folderMetas map[string]Metadata, - permissionIDMappings map[string]string, - restorePerms bool, -) (string, error) { - id, err := CreateRestoreFolders(ctx, service, drivePath.DriveID, restoreFolders) - if err != nil { - return "", err - } - - if len(drivePath.Folders) == 0 { - // No permissions for root folder - return id, nil - } - - if !restorePerms { - return id, nil - } - - err = RestorePermissions( - ctx, - creds, - service, - drivePath.DriveID, - id, - folderPath, - folderMetadata, - folderMetas, - permissionIDMappings) - - return id, err -} - -// isSamePermission checks equality of two UserPermission objects -func isSamePermission(p1, p2 UserPermission) bool { - // EntityID can be empty for older backups and Email can be empty - // for newer ones. It is not possible for both to be empty. Also, - // if EntityID/Email for one is not empty then the other will also - // have EntityID/Email as we backup permissions for all the - // parents and children when we have a change in permissions. - if p1.EntityID != "" && p1.EntityID != p2.EntityID { - return false - } - - if p1.Email != "" && p1.Email != p2.Email { - return false - } - - p1r := p1.Roles - p2r := p2.Roles - - slices.Sort(p1r) - slices.Sort(p2r) - - return slices.Equal(p1r, p2r) -} - -func diffPermissions(before, after []UserPermission) ([]UserPermission, []UserPermission) { - var ( - added = []UserPermission{} - removed = []UserPermission{} - ) - - for _, cp := range after { - found := false - - for _, pp := range before { - if isSamePermission(cp, pp) { - found = true - break - } - } - - if !found { - added = append(added, cp) - } - } - - for _, pp := range before { - found := false - - for _, cp := range after { - if isSamePermission(cp, pp) { - found = true - break - } - } - - if !found { - removed = append(removed, pp) - } - } - - return added, removed -} - // computeParentPermissions computes the parent permissions by -// traversing folderMetas and finding the first item with custom -// permissions. folderMetas is expected to have all the parent +// traversing parentMetas and finding the first item with custom +// permissions. parentMetas is expected to have all the parent // directory metas for this to work. -func computeParentPermissions(itemPath path.Path, folderMetas map[string]Metadata) (Metadata, error) { +func computeParentPermissions( + ctx context.Context, + originDir path.Path, + // map parent dir -> parent's metadata + parentMetas map[string]metadata.Metadata, +) (metadata.Metadata, error) { var ( parent path.Path - meta Metadata + meta metadata.Metadata err error ok bool ) - parent = itemPath + parent = originDir for { parent, err = parent.Dir() if err != nil { - return Metadata{}, clues.New("getting parent") + return metadata.Metadata{}, clues.New("getting parent").WithClues(ctx) } - onedrivePath, err := path.ToOneDrivePath(parent) + fmt.Println("pd", parent) + + ictx := clues.Add(ctx, "parent_dir", parent) + + drivePath, err := path.ToDrivePath(parent) if err != nil { - return Metadata{}, clues.New("get parent path") + return metadata.Metadata{}, clues.New("transforming dir to drivePath").WithClues(ictx) } - if len(onedrivePath.Folders) == 0 { - return Metadata{}, nil + if len(drivePath.Folders) == 0 { + return metadata.Metadata{}, nil } - meta, ok = folderMetas[parent.String()] + meta, ok = parentMetas[parent.String()] if !ok { - return Metadata{}, clues.New("no parent meta") + return metadata.Metadata{}, clues.New("no metadata found for parent folder: " + parent.String()).WithClues(ictx) } - if meta.SharingMode == SharingModeCustom { + if meta.SharingMode == metadata.SharingModeCustom { return meta, nil } } @@ -241,38 +143,51 @@ func UpdatePermissions( service graph.Servicer, driveID string, itemID string, - permAdded, permRemoved []UserPermission, - permissionIDMappings map[string]string, + permAdded, permRemoved []metadata.Permission, + oldPermIDToNewID map[string]string, ) error { // The ordering of the operations is important here. We first // remove all the removed permissions and then add the added ones. for _, p := range permRemoved { + ictx := clues.Add( + ctx, + "permission_entity_type", p.EntityType, + "permission_entity_id", clues.Hide(p.EntityID)) + // deletes require unique http clients // https://github.com/alcionai/corso/issues/2707 // this is bad citizenship, and could end up consuming a lot of // system resources if servicers leak client connections (sockets, etc). a, err := graph.CreateAdapter(creds.AzureTenantID, creds.AzureClientID, creds.AzureClientSecret) if err != nil { - return graph.Wrap(ctx, err, "creating delete client") + return graph.Wrap(ictx, err, "creating delete client") } - pid, ok := permissionIDMappings[p.ID] + pid, ok := oldPermIDToNewID[p.ID] if !ok { return clues.New("no new permission id").WithClues(ctx) } err = graph.NewService(a). Client(). - DrivesById(driveID). - ItemsById(itemID). - PermissionsById(pid). - Delete(ctx, nil) + Drives(). + ByDriveId(driveID). + Items(). + ByDriveItemId(itemID). + Permissions(). + ByPermissionId(pid). + Delete(graph.ConsumeNTokens(ictx, graph.PermissionsLC), nil) if err != nil { - return graph.Wrap(ctx, err, "removing permissions") + return graph.Wrap(ictx, err, "removing permissions") } } for _, p := range permAdded { + ictx := clues.Add( + ctx, + "permission_entity_type", p.EntityType, + "permission_entity_id", clues.Hide(p.EntityID)) + // We are not able to restore permissions when there are no // roles or for owner, this seems to be restriction in graph roles := []string{} @@ -283,11 +198,13 @@ func UpdatePermissions( } } - if len(roles) == 0 { + // TODO: sitegroup support. Currently errors with "One or more users could not be resolved", + // likely due to the site group entityID consisting of a single integer (ex: 4) + if len(roles) == 0 || p.EntityType == metadata.GV2SiteGroup { continue } - pbody := msdrive.NewItemsItemInvitePostRequestBody() + pbody := drives.NewItemItemsItemInvitePostRequestBody() pbody.SetRoles(roles) if p.Expiration != nil { @@ -295,14 +212,11 @@ func UpdatePermissions( pbody.SetExpirationDateTime(&expiry) } - si := false - pbody.SetSendInvitation(&si) - - rs := true - pbody.SetRequireSignIn(&rs) + pbody.SetSendInvitation(ptr.To(false)) + pbody.SetRequireSignIn(ptr.To(true)) rec := models.NewDriveRecipient() - if p.EntityID != "" { + if len(p.EntityID) > 0 { rec.SetObjectId(&p.EntityID) } else { // Previous versions used to only store email for a @@ -312,12 +226,12 @@ func UpdatePermissions( pbody.SetRecipients([]models.DriveRecipientable{rec}) - np, err := service.Client().DrivesById(driveID).ItemsById(itemID).Invite().Post(ctx, pbody, nil) + newPerm, err := api.PostItemPermissionUpdate(ictx, service, driveID, itemID, pbody) if err != nil { - return graph.Wrap(ctx, err, "setting permissions") + return clues.Stack(err) } - permissionIDMappings[p.ID] = ptr.Val(np.GetValue()[0].GetId()) + oldPermIDToNewID[p.ID] = ptr.Val(newPerm.GetValue()[0].GetId()) } return nil @@ -334,22 +248,29 @@ func RestorePermissions( driveID string, itemID string, itemPath path.Path, - meta Metadata, - folderMetas map[string]Metadata, - permissionIDMappings map[string]string, + current metadata.Metadata, + caches *restoreCaches, ) error { - if meta.SharingMode == SharingModeInherited { + if current.SharingMode == metadata.SharingModeInherited { return nil } ctx = clues.Add(ctx, "permission_item_id", itemID) - parentPermissions, err := computeParentPermissions(itemPath, folderMetas) + parents, err := computeParentPermissions(ctx, itemPath, caches.ParentDirToMeta) if err != nil { - return clues.Wrap(err, "parent permissions").WithClues(ctx) + return clues.Wrap(err, "parent permissions") } - permAdded, permRemoved := diffPermissions(parentPermissions.Permissions, meta.Permissions) + permAdded, permRemoved := metadata.DiffPermissions(parents.Permissions, current.Permissions) - return UpdatePermissions(ctx, creds, service, driveID, itemID, permAdded, permRemoved, permissionIDMappings) + return UpdatePermissions( + ctx, + creds, + service, + driveID, + itemID, + permAdded, + permRemoved, + caches.OldPermIDToNewID) } diff --git a/src/internal/connector/onedrive/permission_test.go b/src/internal/connector/onedrive/permission_test.go index 0483462b1..4e0fd1fd3 100644 --- a/src/internal/connector/onedrive/permission_test.go +++ b/src/internal/connector/onedrive/permission_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/alcionai/corso/src/internal/connector/onedrive/metadata" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/path" ) @@ -21,28 +22,39 @@ func TestPermissionsUnitTestSuite(t *testing.T) { suite.Run(t, &PermissionsUnitTestSuite{Suite: tester.NewUnitSuite(t)}) } -func (suite *PermissionsUnitTestSuite) TestComputeParentPermissions() { +func (suite *PermissionsUnitTestSuite) TestComputeParentPermissions_oneDrive() { + runComputeParentPermissionsTest(suite, path.OneDriveService, path.FilesCategory, "user") +} + +func (suite *PermissionsUnitTestSuite) TestComputeParentPermissions_sharePoint() { + runComputeParentPermissionsTest(suite, path.SharePointService, path.LibrariesCategory, "site") +} + +func runComputeParentPermissionsTest( + suite *PermissionsUnitTestSuite, + service path.ServiceType, + category path.CategoryType, + resourceOwner string, +) { entryPath := fmt.Sprintf(rootDrivePattern, "drive-id") + "/level0/level1/level2/entry" rootEntryPath := fmt.Sprintf(rootDrivePattern, "drive-id") + "/entry" entry, err := path.Build( "tenant", - "user", - path.OneDriveService, - path.FilesCategory, + resourceOwner, + service, + category, false, - strings.Split(entryPath, "/")..., - ) + strings.Split(entryPath, "/")...) require.NoError(suite.T(), err, "creating path") rootEntry, err := path.Build( "tenant", - "user", - path.OneDriveService, - path.FilesCategory, + resourceOwner, + service, + category, false, - strings.Split(rootEntryPath, "/")..., - ) + strings.Split(rootEntryPath, "/")...) require.NoError(suite.T(), err, "creating path") level2, err := entry.Dir() @@ -54,9 +66,9 @@ func (suite *PermissionsUnitTestSuite) TestComputeParentPermissions() { level0, err := level1.Dir() require.NoError(suite.T(), err, "level0 path") - metadata := Metadata{ - SharingMode: SharingModeCustom, - Permissions: []UserPermission{ + md := metadata.Metadata{ + SharingMode: metadata.SharingModeCustom, + Permissions: []metadata.Permission{ { Roles: []string{"write"}, EntityID: "user-id", @@ -64,9 +76,9 @@ func (suite *PermissionsUnitTestSuite) TestComputeParentPermissions() { }, } - metadata2 := Metadata{ - SharingMode: SharingModeCustom, - Permissions: []UserPermission{ + metadata2 := metadata.Metadata{ + SharingMode: metadata.SharingModeCustom, + Permissions: []metadata.Permission{ { Roles: []string{"read"}, EntityID: "user-id", @@ -74,52 +86,52 @@ func (suite *PermissionsUnitTestSuite) TestComputeParentPermissions() { }, } - inherited := Metadata{ - SharingMode: SharingModeInherited, - Permissions: []UserPermission{}, + inherited := metadata.Metadata{ + SharingMode: metadata.SharingModeInherited, + Permissions: []metadata.Permission{}, } table := []struct { name string item path.Path - meta Metadata - parentPerms map[string]Metadata + meta metadata.Metadata + parentPerms map[string]metadata.Metadata }{ { name: "root level entry", item: rootEntry, - meta: Metadata{}, - parentPerms: map[string]Metadata{}, + meta: metadata.Metadata{}, + parentPerms: map[string]metadata.Metadata{}, }, { name: "root level directory", item: level0, - meta: Metadata{}, - parentPerms: map[string]Metadata{}, + meta: metadata.Metadata{}, + parentPerms: map[string]metadata.Metadata{}, }, { name: "direct parent perms", item: entry, - meta: metadata, - parentPerms: map[string]Metadata{ - level2.String(): metadata, + meta: md, + parentPerms: map[string]metadata.Metadata{ + level2.String(): md, }, }, { name: "top level parent perms", item: entry, - meta: metadata, - parentPerms: map[string]Metadata{ + meta: md, + parentPerms: map[string]metadata.Metadata{ level2.String(): inherited, level1.String(): inherited, - level0.String(): metadata, + level0.String(): md, }, }, { name: "all inherited", item: entry, - meta: Metadata{}, - parentPerms: map[string]Metadata{ + meta: metadata.Metadata{}, + parentPerms: map[string]metadata.Metadata{ level2.String(): inherited, level1.String(): inherited, level0.String(): inherited, @@ -128,10 +140,10 @@ func (suite *PermissionsUnitTestSuite) TestComputeParentPermissions() { { name: "multiple custom permission", item: entry, - meta: metadata, - parentPerms: map[string]Metadata{ + meta: md, + parentPerms: map[string]metadata.Metadata{ level2.String(): inherited, - level1.String(): metadata, + level1.String(): md, level0.String(): metadata2, }, }, @@ -139,146 +151,15 @@ func (suite *PermissionsUnitTestSuite) TestComputeParentPermissions() { for _, test := range table { suite.Run(test.name, func() { - _, flush := tester.NewContext() + ctx, flush := tester.NewContext() defer flush() t := suite.T() - m, err := computeParentPermissions(test.item, test.parentPerms) + m, err := computeParentPermissions(ctx, test.item, test.parentPerms) require.NoError(t, err, "compute permissions") assert.Equal(t, m, test.meta) }) } } - -func (suite *PermissionsUnitTestSuite) TestDiffPermissions() { - perm1 := UserPermission{ - ID: "id1", - Roles: []string{"read"}, - EntityID: "user-id1", - } - - perm2 := UserPermission{ - ID: "id2", - Roles: []string{"write"}, - EntityID: "user-id2", - } - - perm3 := UserPermission{ - ID: "id3", - Roles: []string{"write"}, - EntityID: "user-id3", - } - - // The following two permissions have same id and user but - // different roles, this is a valid scenario for permissions. - sameidperm1 := UserPermission{ - ID: "id0", - Roles: []string{"write"}, - EntityID: "user-id0", - } - sameidperm2 := UserPermission{ - ID: "id0", - Roles: []string{"read"}, - EntityID: "user-id0", - } - - emailperm1 := UserPermission{ - ID: "id1", - Roles: []string{"read"}, - Email: "email1@provider.com", - } - - emailperm2 := UserPermission{ - ID: "id1", - Roles: []string{"read"}, - Email: "email2@provider.com", - } - - table := []struct { - name string - before []UserPermission - after []UserPermission - added []UserPermission - removed []UserPermission - }{ - { - name: "single permission added", - before: []UserPermission{}, - after: []UserPermission{perm1}, - added: []UserPermission{perm1}, - removed: []UserPermission{}, - }, - { - name: "single permission removed", - before: []UserPermission{perm1}, - after: []UserPermission{}, - added: []UserPermission{}, - removed: []UserPermission{perm1}, - }, - { - name: "multiple permission added", - before: []UserPermission{}, - after: []UserPermission{perm1, perm2}, - added: []UserPermission{perm1, perm2}, - removed: []UserPermission{}, - }, - { - name: "single permission removed", - before: []UserPermission{perm1, perm2}, - after: []UserPermission{}, - added: []UserPermission{}, - removed: []UserPermission{perm1, perm2}, - }, - { - name: "extra permissions", - before: []UserPermission{perm1, perm2}, - after: []UserPermission{perm1, perm2, perm3}, - added: []UserPermission{perm3}, - removed: []UserPermission{}, - }, - { - name: "less permissions", - before: []UserPermission{perm1, perm2, perm3}, - after: []UserPermission{perm1, perm2}, - added: []UserPermission{}, - removed: []UserPermission{perm3}, - }, - { - name: "same id different role", - before: []UserPermission{sameidperm1}, - after: []UserPermission{sameidperm2}, - added: []UserPermission{sameidperm2}, - removed: []UserPermission{sameidperm1}, - }, - { - name: "email based extra permissions", - before: []UserPermission{emailperm1}, - after: []UserPermission{emailperm1, emailperm2}, - added: []UserPermission{emailperm2}, - removed: []UserPermission{}, - }, - { - name: "email based less permissions", - before: []UserPermission{emailperm1, emailperm2}, - after: []UserPermission{emailperm1}, - added: []UserPermission{}, - removed: []UserPermission{emailperm2}, - }, - } - - for _, test := range table { - suite.Run(test.name, func() { - _, flush := tester.NewContext() - defer flush() - - t := suite.T() - - added, removed := diffPermissions(test.before, test.after) - - assert.Equal(t, added, test.added, "added permissions") - assert.Equal(t, removed, test.removed, "removed permissions") - }) - } -} diff --git a/src/internal/connector/onedrive/restore.go b/src/internal/connector/onedrive/restore.go index cf968bedd..755496420 100644 --- a/src/internal/connector/onedrive/restore.go +++ b/src/internal/connector/onedrive/restore.go @@ -25,6 +25,7 @@ import ( "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) // copyBufferSize is used for chunked upload @@ -32,6 +33,22 @@ import ( // https://docs.microsoft.com/en-us/graph/api/driveitem-createuploadsession?view=graph-rest-1.0#best-practices const copyBufferSize = 5 * 1024 * 1024 +type restoreCaches struct { + Folders *folderCache + ParentDirToMeta map[string]metadata.Metadata + OldPermIDToNewID map[string]string + DriveIDToRootFolderID map[string]string +} + +func NewRestoreCaches() *restoreCaches { + return &restoreCaches{ + Folders: NewFolderCache(), + ParentDirToMeta: map[string]metadata.Metadata{}, + OldPermIDToNewID: map[string]string{}, + DriveIDToRootFolderID: map[string]string{}, + } +} + // RestoreCollections will restore the specified data collections into OneDrive func RestoreCollections( ctx context.Context, @@ -46,12 +63,8 @@ func RestoreCollections( ) (*support.ConnectorOperationStatus, error) { var ( restoreMetrics support.CollectionMetrics - metrics support.CollectionMetrics - folderMetas = map[string]Metadata{} - - // permissionIDMappings is used to map between old and new id - // of permissions as we restore them - permissionIDMappings = map[string]string{} + caches = NewRestoreCaches() + el = errs.Local() ) ctx = clues.Add( @@ -60,12 +73,8 @@ func RestoreCollections( "destination", dest.ContainerName) // Reorder collections so that the parents directories are created - // before the child directories - sort.Slice(dcs, func(i, j int) bool { - return dcs[i].FullPath().String() < dcs[j].FullPath().String() - }) - - el := errs.Local() + // before the child directories; a requirement for permissions. + data.SortRestoreCollections(dcs) // Iterate through the data collections and restore the contents of each for _, dc := range dcs { @@ -74,12 +83,14 @@ func RestoreCollections( } var ( - err error - ictx = clues.Add( + err error + metrics support.CollectionMetrics + ictx = clues.Add( ctx, - "resource_owner", clues.Hide(dc.FullPath().ResourceOwner()), "category", dc.FullPath().Category(), - "path", dc.FullPath()) + "destination", clues.Hide(dest.ContainerName), + "resource_owner", clues.Hide(dc.FullPath().ResourceOwner()), + "full_path", dc.FullPath()) ) metrics, err = RestoreCollection( @@ -88,8 +99,7 @@ func RestoreCollections( backupVersion, service, dc, - folderMetas, - permissionIDMappings, + caches, OneDriveSource, dest.ContainerName, deets, @@ -127,8 +137,7 @@ func RestoreCollection( backupVersion int, service graph.Servicer, dc data.RestoreCollection, - folderMetas map[string]Metadata, - permissionIDMappings map[string]string, + caches *restoreCaches, source driveSource, restoreContainerName string, deets *details.Builder, @@ -142,25 +151,33 @@ func RestoreCollection( el = errs.Local() ) - ctx, end := diagnostics.Span(ctx, "gc:oneDrive:restoreCollection", diagnostics.Label("path", directory)) + ctx, end := diagnostics.Span(ctx, "gc:drive:restoreCollection", diagnostics.Label("path", directory)) defer end() - drivePath, err := path.ToOneDrivePath(directory) + drivePath, err := path.ToDrivePath(directory) if err != nil { return metrics, clues.Wrap(err, "creating drive path").WithClues(ctx) } + if _, ok := caches.DriveIDToRootFolderID[drivePath.DriveID]; !ok { + root, err := api.GetDriveRoot(ctx, service, drivePath.DriveID) + if err != nil { + return metrics, clues.Wrap(err, "getting drive root id") + } + + caches.DriveIDToRootFolderID[drivePath.DriveID] = ptr.Val(root.GetId()) + } + // Assemble folder hierarchy we're going to restore into (we recreate the folder hierarchy // from the backup under this the restore folder instead of root) - // i.e. Restore into `/root://` - - restoreFolderElements := []string{restoreContainerName} - restoreFolderElements = append(restoreFolderElements, drivePath.Folders...) + // i.e. Restore into `/` + // the drive into which this folder gets restored is tracked separately in drivePath. + restoreDir := path.Builder{}.Append(restoreContainerName).Append(drivePath.Folders...) ctx = clues.Add( ctx, "directory", dc.FullPath().Folder(false), - "destination_elements", restoreFolderElements, + "restore_destination", restoreDir, "drive_id", drivePath.DriveID) trace.Log(ctx, "gc:oneDrive:restoreCollection", directory.String()) @@ -170,7 +187,7 @@ func RestoreCollection( ctx, drivePath, dc, - folderMetas, + caches, backupVersion, restorePerms) if err != nil { @@ -178,22 +195,21 @@ func RestoreCollection( } // Create restore folders and get the folder ID of the folder the data stream will be restored in - restoreFolderID, err := createRestoreFoldersWithPermissions( + restoreFolderID, err := CreateRestoreFolders( ctx, creds, service, drivePath, - restoreFolderElements, + restoreDir, dc.FullPath(), colMeta, - folderMetas, - permissionIDMappings, + caches, restorePerms) if err != nil { return metrics, clues.Wrap(err, "creating folders for restore") } - folderMetas[dc.FullPath().String()] = colMeta + caches.ParentDirToMeta[dc.FullPath().String()] = colMeta items := dc.Items(ctx, errs) for { @@ -210,14 +226,16 @@ func RestoreCollection( return metrics, nil } - itemPath, err := dc.FullPath().Append(itemData.UUID(), true) + ictx := clues.Add(ctx, "restore_item_id", itemData.UUID()) + + itemPath, err := dc.FullPath().AppendItem(itemData.UUID()) if err != nil { - el.AddRecoverable(clues.Wrap(err, "appending item to full path").WithClues(ctx)) + el.AddRecoverable(clues.Wrap(err, "appending item to full path").WithClues(ictx)) continue } itemInfo, skipped, err := restoreItem( - ctx, + ictx, creds, dc, backupVersion, @@ -226,8 +244,7 @@ func RestoreCollection( drivePath, restoreFolderID, copyBuffer, - folderMetas, - permissionIDMappings, + caches, restorePerms, itemData, itemPath) @@ -244,7 +261,7 @@ func RestoreCollection( } if skipped { - logger.Ctx(ctx).With("item_path", itemPath).Debug("did not restore item") + logger.Ctx(ictx).With("item_path", itemPath).Debug("did not restore item") continue } @@ -255,7 +272,7 @@ func RestoreCollection( itemInfo) if err != nil { // Not critical enough to need to stop restore operation. - logger.CtxErr(ctx, err).Infow("adding restored item to details") + logger.CtxErr(ictx, err).Infow("adding restored item to details") } metrics.Successes++ @@ -277,13 +294,13 @@ func restoreItem( drivePath *path.DrivePath, restoreFolderID string, copyBuffer []byte, - folderMetas map[string]Metadata, - permissionIDMappings map[string]string, + caches *restoreCaches, restorePerms bool, itemData data.Stream, itemPath path.Path, ) (details.ItemInfo, bool, error) { itemUUID := itemData.UUID() + ctx = clues.Add(ctx, "item_id", itemUUID) if backupVersion < version.OneDrive1DataAndMetaFiles { itemInfo, err := restoreV0File( @@ -327,7 +344,7 @@ func restoreItem( } trimmedPath := strings.TrimSuffix(itemPath.String(), metadata.DirMetaFileSuffix) - folderMetas[trimmedPath] = meta + caches.ParentDirToMeta[trimmedPath] = meta return details.ItemInfo{}, true, nil } @@ -345,8 +362,7 @@ func restoreItem( restoreFolderID, copyBuffer, restorePerms, - folderMetas, - permissionIDMappings, + caches, itemPath, itemData) if err != nil { @@ -368,8 +384,7 @@ func restoreItem( restoreFolderID, copyBuffer, restorePerms, - folderMetas, - permissionIDMappings, + caches, itemPath, itemData) if err != nil { @@ -418,8 +433,7 @@ func restoreV1File( restoreFolderID string, copyBuffer []byte, restorePerms bool, - folderMetas map[string]Metadata, - permissionIDMappings map[string]string, + caches *restoreCaches, itemPath path.Path, itemData data.Stream, ) (details.ItemInfo, error) { @@ -460,8 +474,7 @@ func restoreV1File( itemID, itemPath, meta, - folderMetas, - permissionIDMappings) + caches) if err != nil { return details.ItemInfo{}, clues.Wrap(err, "restoring item permissions") } @@ -479,8 +492,7 @@ func restoreV6File( restoreFolderID string, copyBuffer []byte, restorePerms bool, - folderMetas map[string]Metadata, - permissionIDMappings map[string]string, + caches *restoreCaches, itemPath path.Path, itemData data.Stream, ) (details.ItemInfo, error) { @@ -494,6 +506,11 @@ func restoreV6File( return details.ItemInfo{}, clues.Wrap(err, "restoring file") } + ctx = clues.Add( + ctx, + "count_perms", len(meta.Permissions), + "restore_item_name", clues.Hide(meta.FileName)) + if err != nil { return details.ItemInfo{}, clues.Wrap(err, "deserializing item metadata") } @@ -532,8 +549,7 @@ func restoreV6File( itemID, itemPath, meta, - folderMetas, - permissionIDMappings) + caches) if err != nil { return details.ItemInfo{}, clues.Wrap(err, "restoring item permissions") } @@ -541,43 +557,113 @@ func restoreV6File( return itemInfo, nil } -// CreateRestoreFolders creates the restore folder hierarchy in the specified -// drive and returns the folder ID of the last folder entry in the hierarchy. +// CreateRestoreFolders creates the restore folder hierarchy in +// the specified drive and returns the folder ID of the last folder entry in the +// hierarchy. Permissions are only applied to the last folder in the hierarchy. +// Passing nil for the permissions results in just creating the folder(s). +// folderCache is mutated, as a side effect of populating the items. func CreateRestoreFolders( ctx context.Context, + creds account.M365Config, service graph.Servicer, - driveID string, - restoreFolders []string, + drivePath *path.DrivePath, + restoreDir *path.Builder, + folderPath path.Path, + folderMetadata metadata.Metadata, + caches *restoreCaches, + restorePerms bool, ) (string, error) { - driveRoot, err := service.Client().DrivesById(driveID).Root().Get(ctx, nil) + id, err := createRestoreFolders( + ctx, + service, + drivePath, + restoreDir, + caches) if err != nil { - return "", graph.Wrap(ctx, err, "getting drive root") + return "", err } - parentFolderID := ptr.Val(driveRoot.GetId()) - ctx = clues.Add(ctx, "drive_root_id", parentFolderID) + if len(drivePath.Folders) == 0 { + // No permissions for root folder + return id, nil + } - logger.Ctx(ctx).Debug("found drive root") + if !restorePerms { + return id, nil + } - for _, folder := range restoreFolders { - folderItem, err := getFolder(ctx, service, driveID, parentFolderID, folder) - if err == nil { - parentFolderID = ptr.Val(folderItem.GetId()) + err = RestorePermissions( + ctx, + creds, + service, + drivePath.DriveID, + id, + folderPath, + folderMetadata, + caches) + + return id, err +} + +// createRestoreFolders creates the restore folder hierarchy in the specified +// drive and returns the folder ID of the last folder entry in the hierarchy. +// folderCache is mutated, as a side effect of populating the items. +func createRestoreFolders( + ctx context.Context, + service graph.Servicer, + drivePath *path.DrivePath, + restoreDir *path.Builder, + caches *restoreCaches, +) (string, error) { + var ( + driveID = drivePath.DriveID + folders = restoreDir.Elements() + location = path.Builder{}.Append(driveID) + parentFolderID = caches.DriveIDToRootFolderID[drivePath.DriveID] + ) + + ctx = clues.Add( + ctx, + "drive_id", drivePath.DriveID, + "root_folder_id", parentFolderID) + + for _, folder := range folders { + location = location.Append(folder) + ictx := clues.Add( + ctx, + "creating_restore_folder", folder, + "restore_folder_location", location, + "parent_of_restore_folder", parentFolderID) + + if fl, ok := caches.Folders.get(location); ok { + parentFolderID = ptr.Val(fl.GetId()) + // folder was already created, move on to the child continue } - if !errors.Is(err, errFolderNotFound) { - return "", clues.Wrap(err, "folder not found").With("folder_id", folder).WithClues(ctx) + folderItem, err := api.GetFolderByName(ictx, service, driveID, parentFolderID, folder) + if err != nil && !errors.Is(err, api.ErrFolderNotFound) { + return "", clues.Wrap(err, "getting folder by display name") } - folderItem, err = CreateItem(ctx, service, driveID, parentFolderID, newItem(folder, true)) + // folder found, moving to next child + if err == nil { + parentFolderID = ptr.Val(folderItem.GetId()) + caches.Folders.set(location, folderItem) + + continue + } + + // create the folder if not found + folderItem, err = CreateItem(ictx, service, driveID, parentFolderID, newItem(folder, true)) if err != nil { return "", clues.Wrap(err, "creating folder") } parentFolderID = ptr.Val(folderItem.GetId()) + caches.Folders.set(location, folderItem) - logger.Ctx(ctx).Debugw("resolved restore destination", "dest_id", parentFolderID) + logger.Ctx(ictx).Debug("resolved restore destination") } return parentFolderID, nil @@ -596,10 +682,7 @@ func restoreData( ctx, end := diagnostics.Span(ctx, "gc:oneDrive:restoreItem", diagnostics.Label("item_uuid", itemData.UUID())) defer end() - ctx = clues.Add(ctx, "item_name", itemData.UUID()) - - itemName := itemData.UUID() - trace.Log(ctx, "gc:oneDrive:restoreItem", itemName) + trace.Log(ctx, "gc:oneDrive:restoreItem", itemData.UUID()) // Get the stream size (needed to create the upload session) ss, ok := itemData.(data.StreamSize) @@ -610,13 +693,13 @@ func restoreData( // Create Item newItem, err := CreateItem(ctx, service, driveID, parentFolderID, newItem(name, false)) if err != nil { - return "", details.ItemInfo{}, clues.Wrap(err, "creating item") + return "", details.ItemInfo{}, err } // Get a drive item writer w, err := driveItemWriter(ctx, service, driveID, ptr.Val(newItem.GetId()), ss.Size()) if err != nil { - return "", details.ItemInfo{}, clues.Wrap(err, "creating item writer") + return "", details.ItemInfo{}, err } iReader := itemData.ToReader() @@ -624,7 +707,7 @@ func restoreData( ctx, iReader, observe.ItemRestoreMsg, - clues.Hide(itemName), + clues.Hide(name), ss.Size()) go closer() @@ -651,11 +734,12 @@ func fetchAndReadMetadata( ctx context.Context, fetcher fileFetcher, metaName string, -) (Metadata, error) { +) (metadata.Metadata, error) { + ctx = clues.Add(ctx, "meta_file_name", metaName) + metaFile, err := fetcher.Fetch(ctx, metaName) if err != nil { - err = clues.Wrap(err, "getting item metadata").With("meta_file_name", metaName) - return Metadata{}, err + return metadata.Metadata{}, clues.Wrap(err, "getting item metadata") } metaReader := metaFile.ToReader() @@ -663,26 +747,25 @@ func fetchAndReadMetadata( meta, err := getMetadata(metaReader) if err != nil { - err = clues.Wrap(err, "deserializing item metadata").With("meta_file_name", metaName) - return Metadata{}, err + return metadata.Metadata{}, clues.Wrap(err, "deserializing item metadata") } return meta, nil } // getMetadata read and parses the metadata info for an item -func getMetadata(metar io.ReadCloser) (Metadata, error) { - var meta Metadata +func getMetadata(metar io.ReadCloser) (metadata.Metadata, error) { + var meta metadata.Metadata // `metar` will be nil for the top level container folder if metar != nil { metaraw, err := io.ReadAll(metar) if err != nil { - return Metadata{}, err + return metadata.Metadata{}, err } err = json.Unmarshal(metaraw, &meta) if err != nil { - return Metadata{}, err + return metadata.Metadata{}, err } } @@ -691,27 +774,62 @@ func getMetadata(metar io.ReadCloser) (Metadata, error) { // Augment restore path to add extra files(meta) needed for restore as // well as do any other ordering operations on the paths -func AugmentRestorePaths(backupVersion int, paths []path.Path) ([]path.Path, error) { - colPaths := map[string]path.Path{} +// +// Only accepts StoragePath/RestorePath pairs where the RestorePath is +// at least as long as the StoragePath. If the RestorePath is longer than the +// StoragePath then the first few (closest to the root) directories will use +// default permissions during restore. +func AugmentRestorePaths( + backupVersion int, + paths []path.RestorePaths, +) ([]path.RestorePaths, error) { + // Keyed by each value's StoragePath.String() which corresponds to the RepoRef + // of the directory. + colPaths := map[string]path.RestorePaths{} for _, p := range paths { + first := true + for { - np, err := p.Dir() + sp, err := p.StoragePath.Dir() if err != nil { return nil, err } - onedrivePath, err := path.ToOneDrivePath(np) + drivePath, err := path.ToDrivePath(sp) if err != nil { return nil, err } - if len(onedrivePath.Folders) == 0 { + if len(drivePath.Folders) == 0 { break } - colPaths[np.String()] = np - p = np + if len(p.RestorePath.Elements()) < len(sp.Elements()) { + return nil, clues.New("restorePath shorter than storagePath"). + With("restore_path", p.RestorePath, "storage_path", sp) + } + + rp := p.RestorePath + + // Make sure the RestorePath always points to the level of the current + // collection. We need to track if it's the first iteration because the + // RestorePath starts out at the collection level to begin with. + if !first { + rp, err = p.RestorePath.Dir() + if err != nil { + return nil, err + } + } + + paths := path.RestorePaths{ + StoragePath: sp, + RestorePath: rp, + } + + colPaths[sp.String()] = paths + p = paths + first = false } } @@ -724,32 +842,45 @@ func AugmentRestorePaths(backupVersion int, paths []path.Path) ([]path.Path, err // As of now look up metadata for parent directories from a // collection. for _, p := range colPaths { - el := p.Elements() + el := p.StoragePath.Elements() if backupVersion >= version.OneDrive6NameInMeta { - mPath, err := p.Append(".dirmeta", true) + mPath, err := p.StoragePath.AppendItem(".dirmeta") if err != nil { return nil, err } - paths = append(paths, mPath) + paths = append( + paths, + path.RestorePaths{StoragePath: mPath, RestorePath: p.RestorePath}) } else if backupVersion >= version.OneDrive4DirIncludesPermissions { - mPath, err := p.Append(el[len(el)-1]+".dirmeta", true) + mPath, err := p.StoragePath.AppendItem(el.Last() + ".dirmeta") if err != nil { return nil, err } - paths = append(paths, mPath) + paths = append( + paths, + path.RestorePaths{StoragePath: mPath, RestorePath: p.RestorePath}) } else if backupVersion >= version.OneDrive1DataAndMetaFiles { - pp, err := p.Dir() + pp, err := p.StoragePath.Dir() if err != nil { return nil, err } - mPath, err := pp.Append(el[len(el)-1]+".dirmeta", true) + + mPath, err := pp.AppendItem(el.Last() + ".dirmeta") if err != nil { return nil, err } - paths = append(paths, mPath) + + prp, err := p.RestorePath.Dir() + if err != nil { + return nil, err + } + + paths = append( + paths, + path.RestorePaths{StoragePath: mPath, RestorePath: prp}) } } @@ -757,8 +888,11 @@ func AugmentRestorePaths(backupVersion int, paths []path.Path) ([]path.Path, err // files. This is only a necessity for OneDrive as we are storing // metadata for files/folders in separate meta files and we the // data to be restored before we can restore the metadata. + // + // This sorting assumes stuff in the same StoragePath directory end up in the + // same RestorePath collection. sort.Slice(paths, func(i, j int) bool { - return paths[i].String() < paths[j].String() + return paths[i].StoragePath.String() < paths[j].StoragePath.String() }) return paths, nil diff --git a/src/internal/connector/onedrive/restore_test.go b/src/internal/connector/onedrive/restore_test.go index a05be92c1..c085d689f 100644 --- a/src/internal/connector/onedrive/restore_test.go +++ b/src/internal/connector/onedrive/restore_test.go @@ -24,7 +24,7 @@ func TestRestoreUnitSuite(t *testing.T) { func (suite *RestoreUnitSuite) TestAugmentRestorePaths() { // Adding a simple test here so that we can be sure that this // function gets updated whenever we add a new version. - require.LessOrEqual(suite.T(), version.Backup, version.OneDrive7LocationRef, "unsupported backup version") + require.LessOrEqual(suite.T(), version.Backup, version.All8MigrateUserPNToID, "unsupported backup version") table := []struct { name string @@ -172,20 +172,30 @@ func (suite *RestoreUnitSuite) TestAugmentRestorePaths() { base := "id/onedrive/user/files/drives/driveID/root:/" - inPaths := []path.Path{} + inPaths := []path.RestorePaths{} for _, ps := range test.input { p, err := path.FromDataLayerPath(base+ps, true) require.NoError(t, err, "creating path", clues.ToCore(err)) - inPaths = append(inPaths, p) + pd, err := p.Dir() + require.NoError(t, err, "creating collection path", clues.ToCore(err)) + + inPaths = append( + inPaths, + path.RestorePaths{StoragePath: p, RestorePath: pd}) } - outPaths := []path.Path{} + outPaths := []path.RestorePaths{} for _, ps := range test.output { p, err := path.FromDataLayerPath(base+ps, true) require.NoError(t, err, "creating path", clues.ToCore(err)) - outPaths = append(outPaths, p) + pd, err := p.Dir() + require.NoError(t, err, "creating collection path", clues.ToCore(err)) + + outPaths = append( + outPaths, + path.RestorePaths{StoragePath: p, RestorePath: pd}) } actual, err := AugmentRestorePaths(test.version, inPaths) @@ -197,3 +207,111 @@ func (suite *RestoreUnitSuite) TestAugmentRestorePaths() { }) } } + +// TestAugmentRestorePaths_DifferentRestorePath tests that RestorePath +// substitution works properly. Since it's only possible for future backup +// versions to need restore path substitution (i.e. due to storing folders by +// ID instead of name) this is only tested against the most recent backup +// version at the moment. +func (suite *RestoreUnitSuite) TestAugmentRestorePaths_DifferentRestorePath() { + // Adding a simple test here so that we can be sure that this + // function gets updated whenever we add a new version. + require.LessOrEqual(suite.T(), version.Backup, version.All8MigrateUserPNToID, "unsupported backup version") + + type pathPair struct { + storage string + restore string + } + + table := []struct { + name string + version int + input []pathPair + output []pathPair + errCheck assert.ErrorAssertionFunc + }{ + { + name: "nested folders", + version: version.Backup, + input: []pathPair{ + {storage: "folder-id/file.txt.data", restore: "folder"}, + {storage: "folder-id/folder2-id/file.txt.data", restore: "folder/folder2"}, + }, + output: []pathPair{ + {storage: "folder-id/.dirmeta", restore: "folder"}, + {storage: "folder-id/file.txt.data", restore: "folder"}, + {storage: "folder-id/folder2-id/.dirmeta", restore: "folder/folder2"}, + {storage: "folder-id/folder2-id/file.txt.data", restore: "folder/folder2"}, + }, + errCheck: assert.NoError, + }, + { + name: "restore path longer one folder", + version: version.Backup, + input: []pathPair{ + {storage: "folder-id/file.txt.data", restore: "corso_restore/folder"}, + }, + output: []pathPair{ + {storage: "folder-id/.dirmeta", restore: "corso_restore/folder"}, + {storage: "folder-id/file.txt.data", restore: "corso_restore/folder"}, + }, + errCheck: assert.NoError, + }, + { + name: "restore path shorter one folder", + version: version.Backup, + input: []pathPair{ + {storage: "folder-id/file.txt.data", restore: ""}, + }, + errCheck: assert.Error, + }, + } + + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + _, flush := tester.NewContext() + defer flush() + + base := "id/onedrive/user/files/drives/driveID/root:/" + + inPaths := []path.RestorePaths{} + for _, ps := range test.input { + p, err := path.FromDataLayerPath(base+ps.storage, true) + require.NoError(t, err, "creating path", clues.ToCore(err)) + + r, err := path.FromDataLayerPath(base+ps.restore, false) + require.NoError(t, err, "creating path", clues.ToCore(err)) + + inPaths = append( + inPaths, + path.RestorePaths{StoragePath: p, RestorePath: r}) + } + + outPaths := []path.RestorePaths{} + for _, ps := range test.output { + p, err := path.FromDataLayerPath(base+ps.storage, true) + require.NoError(t, err, "creating path", clues.ToCore(err)) + + r, err := path.FromDataLayerPath(base+ps.restore, false) + require.NoError(t, err, "creating path", clues.ToCore(err)) + + outPaths = append( + outPaths, + path.RestorePaths{StoragePath: p, RestorePath: r}) + } + + actual, err := AugmentRestorePaths(test.version, inPaths) + test.errCheck(t, err, "augmenting paths", clues.ToCore(err)) + + if err != nil { + return + } + + // Ordering of paths matter here as we need dirmeta files + // to show up before file in dir + assert.Equal(t, outPaths, actual, "augmented paths") + }) + } +} diff --git a/src/internal/connector/onedrive/service_test.go b/src/internal/connector/onedrive/service_test.go index 94aac53b3..9c27d7dde 100644 --- a/src/internal/connector/onedrive/service_test.go +++ b/src/internal/connector/onedrive/service_test.go @@ -23,6 +23,8 @@ func (ms *MockGraphService) Adapter() *msgraphsdk.GraphRequestAdapter { return nil } +var _ graph.Servicer = &oneDriveService{} + // TODO(ashmrtn): Merge with similar structs in graph and exchange packages. type oneDriveService struct { client msgraphsdk.GraphServiceClient diff --git a/src/internal/connector/sharepoint/api/pages.go b/src/internal/connector/sharepoint/api/pages.go index 32bcafef1..bd1c0879d 100644 --- a/src/internal/connector/sharepoint/api/pages.go +++ b/src/internal/connector/sharepoint/api/pages.go @@ -91,7 +91,7 @@ func GetSite(ctx context.Context, gs graph.Servicer, siteID string) (models.Site }, } - resp, err := gs.Client().SitesById(siteID).Get(ctx, options) + resp, err := gs.Client().Sites().BySiteId(siteID).Get(ctx, options) if err != nil { return nil, err } diff --git a/src/internal/connector/sharepoint/api/pages_test.go b/src/internal/connector/sharepoint/api/pages_test.go index 3e52402be..c56c3bc86 100644 --- a/src/internal/connector/sharepoint/api/pages_test.go +++ b/src/internal/connector/sharepoint/api/pages_test.go @@ -10,7 +10,6 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/alcionai/corso/src/internal/common" "github.com/alcionai/corso/src/internal/connector/sharepoint" "github.com/alcionai/corso/src/internal/connector/sharepoint/api" spMock "github.com/alcionai/corso/src/internal/connector/sharepoint/mock" @@ -81,7 +80,7 @@ func (suite *SharePointPageSuite) TestRestoreSinglePage() { t := suite.T() - destName := "Corso_Restore_" + common.FormatNow(common.SimpleTimeTesting) + destName := tester.DefaultTestRestoreDestination("").ContainerName testName := "MockPage" // Create Test Page @@ -98,8 +97,7 @@ func (suite *SharePointPageSuite) TestRestoreSinglePage() { suite.service, pageData, suite.siteID, - destName, - ) + destName) require.NoError(t, err, clues.ToCore(err)) require.NotNil(t, info) diff --git a/src/internal/connector/sharepoint/collection_test.go b/src/internal/connector/sharepoint/collection_test.go index 33103acee..47169687f 100644 --- a/src/internal/connector/sharepoint/collection_test.go +++ b/src/internal/connector/sharepoint/collection_test.go @@ -12,9 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/alcionai/corso/src/internal/common" "github.com/alcionai/corso/src/internal/common/ptr" - "github.com/alcionai/corso/src/internal/connector/onedrive" "github.com/alcionai/corso/src/internal/connector/sharepoint/api" spMock "github.com/alcionai/corso/src/internal/connector/sharepoint/mock" "github.com/alcionai/corso/src/internal/connector/support" @@ -176,11 +174,13 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() { // TestRestoreListCollection verifies Graph Restore API for the List Collection func (suite *SharePointCollectionSuite) TestListCollection_Restore() { + t := suite.T() + // https://github.com/microsoftgraph/msgraph-sdk-go/issues/490 + t.Skip("disabled until upstream issue with list restore is fixed.") + ctx, flush := tester.NewContext() defer flush() - t := suite.T() - service := createTestService(t, suite.creds) listing := spMock.ListDefault("Mock List") testName := "MockListing" @@ -194,7 +194,7 @@ func (suite *SharePointCollectionSuite) TestListCollection_Restore() { info: sharePointListInfo(listing, int64(len(byteArray))), } - destName := "Corso_Restore_" + common.FormatNow(common.SimpleTimeTesting) + destName := tester.DefaultTestRestoreDestination("").ContainerName deets, err := restoreListItem(ctx, service, listData, suite.siteID, destName) assert.NoError(t, err, clues.ToCore(err)) @@ -202,7 +202,7 @@ func (suite *SharePointCollectionSuite) TestListCollection_Restore() { // Clean-Up var ( - builder = service.Client().SitesById(suite.siteID).Lists() + builder = service.Client().Sites().BySiteId(suite.siteID).Lists() isFound bool deleteID string ) @@ -233,30 +233,3 @@ func (suite *SharePointCollectionSuite) TestListCollection_Restore() { assert.NoError(t, err, clues.ToCore(err)) } } - -// TestRestoreLocation temporary test for greater restore operation -// TODO delete after full functionality tested in GraphConnector -func (suite *SharePointCollectionSuite) TestRestoreLocation() { - ctx, flush := tester.NewContext() - defer flush() - - t := suite.T() - - service := createTestService(t, suite.creds) - rootFolder := "General_" + common.FormatNow(common.SimpleTimeTesting) - folderID, err := createRestoreFolders(ctx, service, suite.siteID, []string{rootFolder}) - require.NoError(t, err, clues.ToCore(err)) - t.Log("FolderID: " + folderID) - - _, err = createRestoreFolders(ctx, service, suite.siteID, []string{rootFolder, "Tsao"}) - require.NoError(t, err, clues.ToCore(err)) - - // CleanUp - siteDrive, err := service.Client().SitesById(suite.siteID).Drive().Get(ctx, nil) - require.NoError(t, err, clues.ToCore(err)) - - driveID := ptr.Val(siteDrive.GetId()) - - err = onedrive.DeleteItem(ctx, service, driveID, folderID) - assert.NoError(t, err, clues.ToCore(err)) -} diff --git a/src/internal/connector/sharepoint/data_collections.go b/src/internal/connector/sharepoint/data_collections.go index 815f3b1bb..d2a626e49 100644 --- a/src/internal/connector/sharepoint/data_collections.go +++ b/src/internal/connector/sharepoint/data_collections.go @@ -2,10 +2,11 @@ package sharepoint import ( "context" - "net/http" "github.com/alcionai/clues" + "github.com/alcionai/corso/src/internal/common/idname" + "github.com/alcionai/corso/src/internal/common/prefixmatcher" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/onedrive" "github.com/alcionai/corso/src/internal/connector/sharepoint/api" @@ -29,24 +30,31 @@ type statusUpdater interface { // for the specified user func DataCollections( ctx context.Context, - itemClient *http.Client, + itemClient graph.Requester, selector selectors.Selector, + site idname.Provider, + metadata []data.RestoreCollection, creds account.M365Config, serv graph.Servicer, su statusUpdater, ctrlOpts control.Options, errs *fault.Bus, -) ([]data.BackupCollection, map[string]map[string]struct{}, error) { +) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, error) { b, err := selector.ToSharePointBackup() if err != nil { return nil, nil, clues.Wrap(err, "sharePointDataCollection: parsing selector") } + ctx = clues.Add( + ctx, + "site_id", clues.Hide(site.ID()), + "site_url", clues.Hide(site.Name())) + var ( el = errs.Local() - site = b.DiscreteOwner collections = []data.BackupCollection{} categories = map[path.CategoryType]struct{}{} + ssmb = prefixmatcher.NewStringSetBuilder() ) for _, scope := range b.Scopes() { @@ -78,12 +86,14 @@ func DataCollections( } case path.LibrariesCategory: - spcs, _, err = collectLibraries( + spcs, err = collectLibraries( ctx, itemClient, serv, creds.AzureTenantID, site, + metadata, + ssmb, scope, su, ctrlOpts, @@ -117,8 +127,9 @@ func DataCollections( if len(collections) > 0 { baseCols, err := graph.BaseCollections( ctx, + collections, creds.AzureTenantID, - site, + site.ID(), path.SharePointService, categories, su.UpdateStatus, @@ -130,25 +141,26 @@ func DataCollections( collections = append(collections, baseCols...) } - return collections, nil, el.Failure() + return collections, ssmb.ToReader(), el.Failure() } func collectLists( ctx context.Context, serv graph.Servicer, - tenantID, siteID string, + tenantID string, + site idname.Provider, updater statusUpdater, ctrlOpts control.Options, errs *fault.Bus, ) ([]data.BackupCollection, error) { - logger.Ctx(ctx).With("site", siteID).Debug("Creating SharePoint List Collections") + logger.Ctx(ctx).Debug("Creating SharePoint List Collections") var ( el = errs.Local() spcs = make([]data.BackupCollection, 0) ) - lists, err := preFetchLists(ctx, serv, siteID) + lists, err := preFetchLists(ctx, serv, site.ID()) if err != nil { return nil, err } @@ -160,7 +172,7 @@ func collectLists( dir, err := path.Build( tenantID, - siteID, + site.ID(), path.SharePointService, path.ListsCategory, false, @@ -182,14 +194,17 @@ func collectLists( // all the drives associated with the site. func collectLibraries( ctx context.Context, - itemClient *http.Client, + itemClient graph.Requester, serv graph.Servicer, - tenantID, siteID string, + tenantID string, + site idname.Provider, + metadata []data.RestoreCollection, + ssmb *prefixmatcher.StringSetMatchBuilder, scope selectors.SharePointScope, updater statusUpdater, ctrlOpts control.Options, errs *fault.Bus, -) ([]data.BackupCollection, map[string]map[string]struct{}, error) { +) ([]data.BackupCollection, error) { logger.Ctx(ctx).Debug("creating SharePoint Library collections") var ( @@ -197,7 +212,7 @@ func collectLibraries( colls = onedrive.NewCollections( itemClient, tenantID, - siteID, + site.ID(), onedrive.SharePointSource, folderMatcher{scope}, serv, @@ -205,14 +220,12 @@ func collectLibraries( ctrlOpts) ) - // TODO(ashmrtn): Pass previous backup metadata when SharePoint supports delta - // token-based incrementals. - odcs, excludes, err := colls.Get(ctx, nil, errs) + odcs, err := colls.Get(ctx, metadata, ssmb, errs) if err != nil { - return nil, nil, graph.Wrap(ctx, err, "getting library") + return nil, graph.Wrap(ctx, err, "getting library") } - return append(collections, odcs...), excludes, nil + return append(collections, odcs...), nil } // collectPages constructs a sharepoint Collections struct and Get()s the associated @@ -221,7 +234,7 @@ func collectPages( ctx context.Context, creds account.M365Config, serv graph.Servicer, - siteID string, + site idname.Provider, updater statusUpdater, ctrlOpts control.Options, errs *fault.Bus, @@ -245,7 +258,7 @@ func collectPages( betaService := m365api.NewBetaService(adpt) - tuples, err := api.FetchPages(ctx, betaService, siteID) + tuples, err := api.FetchPages(ctx, betaService, site.ID()) if err != nil { return nil, err } @@ -257,7 +270,7 @@ func collectPages( dir, err := path.Build( creds.AzureTenantID, - siteID, + site.ID(), path.SharePointService, path.PagesCategory, false, diff --git a/src/internal/connector/sharepoint/data_collections_test.go b/src/internal/connector/sharepoint/data_collections_test.go index b7411e059..282c3dfa1 100644 --- a/src/internal/connector/sharepoint/data_collections_test.go +++ b/src/internal/connector/sharepoint/data_collections_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/alcionai/corso/src/internal/common/idname/mock" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/onedrive" "github.com/alcionai/corso/src/internal/tester" @@ -109,14 +110,14 @@ func (suite *SharePointLibrariesUnitSuite) TestUpdateCollections() { ) c := onedrive.NewCollections( - graph.HTTPClient(graph.NoTimeout()), + graph.NewNoTimeoutHTTPWrapper(), tenant, site, onedrive.SharePointSource, testFolderMatcher{test.scope}, &MockGraphService{}, nil, - control.Options{}) + control.Defaults()) c.CollectionMap = collMap @@ -194,9 +195,11 @@ func (suite *SharePointPagesSuite) TestCollectPages() { ctx, flush := tester.NewContext() defer flush() - t := suite.T() - siteID := tester.M365SiteID(t) - a := tester.NewM365Account(t) + var ( + t = suite.T() + siteID = tester.M365SiteID(t) + a = tester.NewM365Account(t) + ) account, err := a.M365Config() require.NoError(t, err, clues.ToCore(err)) @@ -205,7 +208,7 @@ func (suite *SharePointPagesSuite) TestCollectPages() { ctx, account, nil, - siteID, + mock.NewProvider(siteID, siteID), &MockGraphService{}, control.Defaults(), fault.New(true)) diff --git a/src/internal/connector/sharepoint/list.go b/src/internal/connector/sharepoint/list.go index fd8b42a06..0da99fcea 100644 --- a/src/internal/connector/sharepoint/list.go +++ b/src/internal/connector/sharepoint/list.go @@ -36,7 +36,7 @@ func preFetchLists( siteID string, ) ([]listTuple, error) { var ( - builder = gs.Client().SitesById(siteID).Lists() + builder = gs.Client().Sites().BySiteId(siteID).Lists() options = preFetchListOptions() listTuples = make([]listTuple, 0) ) @@ -128,7 +128,7 @@ func loadSiteLists( err error ) - entry, err = gs.Client().SitesById(siteID).ListsById(id).Get(ctx, nil) + entry, err = gs.Client().Sites().BySiteId(siteID).Lists().ByListId(id).Get(ctx, nil) if err != nil { el.AddRecoverable(graph.Wrap(ctx, err, "getting site list")) return @@ -195,7 +195,7 @@ func fetchListItems( errs *fault.Bus, ) ([]models.ListItemable, error) { var ( - prefix = gs.Client().SitesById(siteID).ListsById(listID) + prefix = gs.Client().Sites().BySiteId(siteID).Lists().ByListId(listID) builder = prefix.Items() itms = make([]models.ListItemable, 0) el = errs.Local() @@ -216,7 +216,7 @@ func fetchListItems( break } - newPrefix := prefix.ItemsById(ptr.Val(itm.GetId())) + newPrefix := prefix.Items().ByListItemId(ptr.Val(itm.GetId())) fields, err := newPrefix.Fields().Get(ctx, nil) if err != nil { @@ -252,7 +252,7 @@ func fetchColumns( cs := make([]models.ColumnDefinitionable, 0) if len(cTypeID) == 0 { - builder := gs.Client().SitesById(siteID).ListsById(listID).Columns() + builder := gs.Client().Sites().BySiteId(siteID).Lists().ByListId(listID).Columns() for { resp, err := builder.Get(ctx, nil) @@ -270,7 +270,14 @@ func fetchColumns( builder = sites.NewItemListsItemColumnsRequestBuilder(link, gs.Adapter()) } } else { - builder := gs.Client().SitesById(siteID).ListsById(listID).ContentTypesById(cTypeID).Columns() + builder := gs.Client(). + Sites(). + BySiteId(siteID). + Lists(). + ByListId(listID). + ContentTypes(). + ByContentTypeId(cTypeID). + Columns() for { resp, err := builder.Get(ctx, nil) @@ -307,7 +314,7 @@ func fetchContentTypes( var ( el = errs.Local() cTypes = make([]models.ContentTypeable, 0) - builder = gs.Client().SitesById(siteID).ListsById(listID).ContentTypes() + builder = gs.Client().Sites().BySiteId(siteID).Lists().ByListId(listID).ContentTypes() ) for { @@ -363,8 +370,15 @@ func fetchColumnLinks( siteID, listID, cTypeID string, ) ([]models.ColumnLinkable, error) { var ( - builder = gs.Client().SitesById(siteID).ListsById(listID).ContentTypesById(cTypeID).ColumnLinks() - links = make([]models.ColumnLinkable, 0) + builder = gs.Client(). + Sites(). + BySiteId(siteID). + Lists(). + ByListId(listID). + ContentTypes(). + ByContentTypeId(cTypeID). + ColumnLinks() + links = make([]models.ColumnLinkable, 0) ) for { @@ -396,7 +410,7 @@ func DeleteList( gs graph.Servicer, siteID, listID string, ) error { - err := gs.Client().SitesById(siteID).ListsById(listID).Delete(ctx, nil) + err := gs.Client().Sites().BySiteId(siteID).Lists().ByListId(listID).Delete(ctx, nil) if err != nil { return graph.Wrap(ctx, err, "deleting list") } diff --git a/src/internal/connector/sharepoint/restore.go b/src/internal/connector/sharepoint/restore.go index aa71b67a6..9a9a1bd49 100644 --- a/src/internal/connector/sharepoint/restore.go +++ b/src/internal/connector/sharepoint/restore.go @@ -2,6 +2,7 @@ package sharepoint import ( "context" + "errors" "fmt" "io" "runtime/trace" @@ -45,24 +46,36 @@ func RestoreCollections( creds account.M365Config, service graph.Servicer, dest control.RestoreDestination, + opts control.Options, dcs []data.RestoreCollection, deets *details.Builder, errs *fault.Bus, ) (*support.ConnectorOperationStatus, error) { var ( - err error restoreMetrics support.CollectionMetrics + caches = onedrive.NewRestoreCaches() + el = errs.Local() ) + // Reorder collections so that the parents directories are created + // before the child directories; a requirement for permissions. + data.SortRestoreCollections(dcs) + // Iterate through the data collections and restore the contents of each for _, dc := range dcs { + if el.Failure() != nil { + break + } + var ( + err error category = dc.FullPath().Category() metrics support.CollectionMetrics ictx = clues.Add(ctx, "category", category, "destination", clues.Hide(dest.ContainerName), - "resource_owner", clues.Hide(dc.FullPath().ResourceOwner())) + "resource_owner", clues.Hide(dc.FullPath().ResourceOwner()), + "full_path", dc.FullPath()) ) switch dc.FullPath().Category() { @@ -73,13 +86,13 @@ func RestoreCollections( backupVersion, service, dc, - map[string]onedrive.Metadata{}, // Currently permission data is not stored for sharepoint - map[string]string{}, + caches, onedrive.SharePointSource, dest.ContainerName, deets, - false, + opts.RestorePermissions, errs) + case path.ListsCategory: metrics, err = RestoreListCollection( ictx, @@ -88,6 +101,7 @@ func RestoreCollections( dest.ContainerName, deets, errs) + case path.PagesCategory: metrics, err = RestorePageCollection( ictx, @@ -96,13 +110,18 @@ func RestoreCollections( dest.ContainerName, deets, errs) + default: - return nil, clues.Wrap(clues.New(category.String()), "category not supported") + return nil, clues.Wrap(clues.New(category.String()), "category not supported").With("category", category) } restoreMetrics = support.CombineMetrics(restoreMetrics, metrics) if err != nil { + el.AddRecoverable(err) + } + + if errors.Is(err, context.Canceled) { break } } @@ -114,28 +133,11 @@ func RestoreCollections( restoreMetrics, dest.ContainerName) - return status, err -} - -// createRestoreFolders creates the restore folder hierarchy in the specified drive and returns the folder ID -// of the last folder entry given in the hierarchy -func createRestoreFolders( - ctx context.Context, - service graph.Servicer, - siteID string, - restoreFolders []string, -) (string, error) { - // Get Main Drive for Site, Documents - mainDrive, err := service.Client().SitesById(siteID).Drive().Get(ctx, nil) - if err != nil { - return "", graph.Wrap(ctx, err, "getting site drive root") - } - - return onedrive.CreateRestoreFolders(ctx, service, ptr.Val(mainDrive.GetId()), restoreFolders) + return status, el.Failure() } // restoreListItem utility function restores a List to the siteID. -// The name is changed to to Corso_Restore_{timeStame}_name +// The name is changed to to {DestName}_{name} // API Reference: https://learn.microsoft.com/en-us/graph/api/list-create?view=graph-rest-1.0&tabs=http // Restored List can be verified within the Site contents. func restoreListItem( @@ -182,7 +184,7 @@ func restoreListItem( newList.SetItems(contents) // Restore to List base to M365 back store - restoredList, err := service.Client().SitesById(siteID).Lists().Post(ctx, newList, nil) + restoredList, err := service.Client().Sites().BySiteId(siteID).Lists().Post(ctx, newList, nil) if err != nil { return dii, graph.Wrap(ctx, err, "restoring list") } @@ -192,8 +194,10 @@ func restoreListItem( if len(contents) > 0 { for _, lItem := range contents { _, err := service.Client(). - SitesById(siteID). - ListsById(ptr.Val(restoredList.GetId())). + Sites(). + BySiteId(siteID). + Lists(). + ByListId(ptr.Val(restoredList.GetId())). Items(). Post(ctx, lItem, nil) if err != nil { @@ -257,7 +261,7 @@ func RestoreListCollection( metrics.Bytes += itemInfo.SharePoint.Size - itemPath, err := dc.FullPath().Append(itemData.UUID(), true) + itemPath, err := dc.FullPath().AppendItem(itemData.UUID()) if err != nil { el.AddRecoverable(clues.Wrap(err, "appending item to full path").WithClues(ctx)) continue @@ -345,7 +349,7 @@ func RestorePageCollection( metrics.Bytes += itemInfo.SharePoint.Size - itemPath, err := dc.FullPath().Append(itemData.UUID(), true) + itemPath, err := dc.FullPath().AppendItem(itemData.UUID()) if err != nil { el.AddRecoverable(clues.Wrap(err, "appending item to full path").WithClues(ctx)) continue diff --git a/src/internal/connector/support/m365Support_test.go b/src/internal/connector/support/m365Support_test.go index dbff1e37b..f35761781 100644 --- a/src/internal/connector/support/m365Support_test.go +++ b/src/internal/connector/support/m365Support_test.go @@ -213,7 +213,7 @@ func (suite *DataSupportSuite) TestCreatePageFromBytes() { pg.SetWebUrl(&title) writer := kioser.NewJsonSerializationWriter() - err := pg.Serialize(writer) + err := writer.WriteObjectValue("", pg) require.NoError(t, err, clues.ToCore(err)) byteArray, err := writer.GetSerializedContent() @@ -231,6 +231,11 @@ func (suite *DataSupportSuite) TestCreatePageFromBytes() { result, err := CreatePageFromBytes(test.getBytes(t)) test.checkError(t, err) test.isNil(t, result) + if result != nil { + assert.Equal(t, "Tested", *result.GetName(), "name") + assert.Equal(t, "Tested", *result.GetTitle(), "title") + assert.Equal(t, "Tested", *result.GetWebUrl(), "webURL") + } }) } } diff --git a/src/internal/data/data_collection.go b/src/internal/data/data_collection.go index d407a23a3..eef37a029 100644 --- a/src/internal/data/data_collection.go +++ b/src/internal/data/data_collection.go @@ -143,7 +143,7 @@ func StateOf(prev, curr path.Path) CollectionState { return NewState } - if curr.Folder(false) != prev.Folder(false) { + if curr.String() != prev.String() { return MovedState } diff --git a/src/internal/data/data_collection_test.go b/src/internal/data/data_collection_test.go index 5e7f8b175..fd0cb0020 100644 --- a/src/internal/data/data_collection_test.go +++ b/src/internal/data/data_collection_test.go @@ -25,6 +25,8 @@ func (suite *DataCollectionSuite) TestStateOf() { require.NoError(suite.T(), err, clues.ToCore(err)) barP, err := path.Build("t", "u", path.ExchangeService, path.EmailCategory, false, "bar") require.NoError(suite.T(), err, clues.ToCore(err)) + preP, err := path.Build("_t", "_u", path.ExchangeService, path.EmailCategory, false, "foo") + require.NoError(suite.T(), err, clues.ToCore(err)) table := []struct { name string @@ -49,6 +51,12 @@ func (suite *DataCollectionSuite) TestStateOf() { curr: barP, expect: MovedState, }, + { + name: "moved if prefix changes", + prev: fooP, + curr: preP, + expect: MovedState, + }, { name: "deleted", prev: fooP, diff --git a/src/internal/data/helpers.go b/src/internal/data/helpers.go new file mode 100644 index 000000000..9594ffdf9 --- /dev/null +++ b/src/internal/data/helpers.go @@ -0,0 +1,10 @@ +package data + +import "sort" + +// SortRestoreCollections performs an in-place sort on the provided collection. +func SortRestoreCollections(rcs []RestoreCollection) { + sort.Slice(rcs, func(i, j int) bool { + return rcs[i].FullPath().String() < rcs[j].FullPath().String() + }) +} diff --git a/src/internal/events/events.go b/src/internal/events/events.go index 47a15f5e9..a2f52dc54 100644 --- a/src/internal/events/events.go +++ b/src/internal/events/events.go @@ -28,13 +28,15 @@ const ( tenantIDDeprecated = "m365_tenant_hash_deprecated" // Event Keys - CorsoStart = "Corso Start" - RepoInit = "Repo Init" - RepoConnect = "Repo Connect" - BackupStart = "Backup Start" - BackupEnd = "Backup End" - RestoreStart = "Restore Start" - RestoreEnd = "Restore End" + CorsoStart = "Corso Start" + RepoInit = "Repo Init" + RepoConnect = "Repo Connect" + BackupStart = "Backup Start" + BackupEnd = "Backup End" + RestoreStart = "Restore Start" + RestoreEnd = "Restore End" + MaintenanceStart = "Maintenance Start" + MaintenanceEnd = "Maintenance End" // Event Data Keys BackupCreateTime = "backup_creation_time" @@ -188,10 +190,12 @@ func tenantHash(tenID string) string { // metrics aggregation // --------------------------------------------------------------------------- -type m string +type metricsCategory string // metrics collection bucket -const APICall m = "api_call" +const ( + APICall metricsCategory = "api_call" +) // configurations const ( @@ -256,13 +260,19 @@ func dumpMetrics(ctx context.Context, stop <-chan struct{}, sig *metrics.InmemSi } // Inc increments the given category by 1. -func Inc(cat m, keys ...string) { +func Inc(cat metricsCategory, keys ...string) { cats := append([]string{string(cat)}, keys...) metrics.IncrCounter(cats, 1) } +// IncN increments the given category by N. +func IncN(n int, cat metricsCategory, keys ...string) { + cats := append([]string{string(cat)}, keys...) + metrics.IncrCounter(cats, float32(n)) +} + // Since records the duration between the provided time and now, in millis. -func Since(start time.Time, cat m, keys ...string) { +func Since(start time.Time, cat metricsCategory, keys ...string) { cats := append([]string{string(cat)}, keys...) metrics.MeasureSince(cats, start) } diff --git a/src/internal/events/events_test.go b/src/internal/events/events_test.go index 46363a695..3d44690e9 100644 --- a/src/internal/events/events_test.go +++ b/src/internal/events/events_test.go @@ -52,7 +52,7 @@ func (suite *EventsIntegrationSuite) TestNewBus() { ) require.NoError(t, err, clues.ToCore(err)) - b, err := events.NewBus(ctx, s, a.ID(), control.Options{}) + b, err := events.NewBus(ctx, s, a.ID(), control.Defaults()) require.NotEmpty(t, b) require.NoError(t, err, clues.ToCore(err)) diff --git a/src/internal/kopia/conn.go b/src/internal/kopia/conn.go index cabb1a555..09d4a34ff 100644 --- a/src/internal/kopia/conn.go +++ b/src/internal/kopia/conn.go @@ -18,6 +18,7 @@ import ( "github.com/kopia/kopia/snapshot/snapshotfs" "github.com/pkg/errors" + "github.com/alcionai/corso/src/pkg/control/repository" "github.com/alcionai/corso/src/pkg/storage" ) @@ -69,7 +70,7 @@ func NewConn(s storage.Storage) *conn { } } -func (w *conn) Initialize(ctx context.Context) error { +func (w *conn) Initialize(ctx context.Context, opts repository.Options) error { bst, err := blobStoreByProvider(ctx, w.storage) if err != nil { return clues.Wrap(err, "initializing storage") @@ -92,6 +93,7 @@ func (w *conn) Initialize(ctx context.Context) error { err = w.commonConnect( ctx, + opts, cfg.KopiaCfgDir, bst, cfg.CorsoPassphrase, @@ -108,7 +110,7 @@ func (w *conn) Initialize(ctx context.Context) error { return nil } -func (w *conn) Connect(ctx context.Context) error { +func (w *conn) Connect(ctx context.Context, opts repository.Options) error { bst, err := blobStoreByProvider(ctx, w.storage) if err != nil { return clues.Wrap(err, "initializing storage") @@ -122,6 +124,7 @@ func (w *conn) Connect(ctx context.Context) error { return w.commonConnect( ctx, + opts, cfg.KopiaCfgDir, bst, cfg.CorsoPassphrase, @@ -131,16 +134,21 @@ func (w *conn) Connect(ctx context.Context) error { func (w *conn) commonConnect( ctx context.Context, + opts repository.Options, configDir string, bst blob.Storage, password, compressor string, ) error { - var opts *repo.ConnectOptions + kopiaOpts := &repo.ConnectOptions{ + ClientOptions: repo.ClientOptions{ + Username: opts.User, + Hostname: opts.Host, + }, + } + if len(configDir) > 0 { - opts = &repo.ConnectOptions{ - CachingOptions: content.CachingOptions{ - CacheDirectory: configDir, - }, + kopiaOpts.CachingOptions = content.CachingOptions{ + CacheDirectory: configDir, } } else { configDir = defaultKopiaConfigDir @@ -154,7 +162,7 @@ func (w *conn) commonConnect( cfgFile, bst, password, - opts, + kopiaOpts, ); err != nil { return clues.Wrap(err, "connecting to repo").WithClues(ctx) } diff --git a/src/internal/kopia/conn_test.go b/src/internal/kopia/conn_test.go index 16d2bd943..fd619f6da 100644 --- a/src/internal/kopia/conn_test.go +++ b/src/internal/kopia/conn_test.go @@ -14,16 +14,18 @@ import ( "github.com/stretchr/testify/suite" "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/pkg/control/repository" "github.com/alcionai/corso/src/pkg/storage" ) -//revive:disable:context-as-argument -func openKopiaRepo(t *testing.T, ctx context.Context) (*conn, error) { - //revive:enable:context-as-argument +func openKopiaRepo( + t *testing.T, + ctx context.Context, //revive:disable-line:context-as-argument +) (*conn, error) { st := tester.NewPrefixedS3Storage(t) k := NewConn(st) - if err := k.Initialize(ctx); err != nil { + if err := k.Initialize(ctx, repository.Options{}); err != nil { return nil, err } @@ -77,13 +79,13 @@ func (suite *WrapperIntegrationSuite) TestRepoExistsError() { st := tester.NewPrefixedS3Storage(t) k := NewConn(st) - err := k.Initialize(ctx) + err := k.Initialize(ctx, repository.Options{}) require.NoError(t, err, clues.ToCore(err)) err = k.Close(ctx) require.NoError(t, err, clues.ToCore(err)) - err = k.Initialize(ctx) + err = k.Initialize(ctx, repository.Options{}) assert.Error(t, err, clues.ToCore(err)) assert.ErrorIs(t, err, ErrorRepoAlreadyExists) } @@ -97,7 +99,7 @@ func (suite *WrapperIntegrationSuite) TestBadProviderErrors() { st.Provider = storage.ProviderUnknown k := NewConn(st) - err := k.Initialize(ctx) + err := k.Initialize(ctx, repository.Options{}) assert.Error(t, err, clues.ToCore(err)) } @@ -109,7 +111,7 @@ func (suite *WrapperIntegrationSuite) TestConnectWithoutInitErrors() { st := tester.NewPrefixedS3Storage(t) k := NewConn(st) - err := k.Connect(ctx) + err := k.Connect(ctx, repository.Options{}) assert.Error(t, err, clues.ToCore(err)) } @@ -356,7 +358,7 @@ func (suite *WrapperIntegrationSuite) TestConfigDefaultsSetOnInitAndNotOnConnect err = k.Close(ctx) require.NoError(t, err, clues.ToCore(err)) - err = k.Connect(ctx) + err = k.Connect(ctx, repository.Options{}) require.NoError(t, err, clues.ToCore(err)) defer func() { @@ -384,9 +386,63 @@ func (suite *WrapperIntegrationSuite) TestInitAndConnWithTempDirectory() { require.NoError(t, err, clues.ToCore(err)) // Re-open with Connect. - err = k.Connect(ctx) + err = k.Connect(ctx, repository.Options{}) require.NoError(t, err, clues.ToCore(err)) err = k.Close(ctx) assert.NoError(t, err, clues.ToCore(err)) } + +func (suite *WrapperIntegrationSuite) TestSetUserAndHost() { + ctx, flush := tester.NewContext() + defer flush() + + opts := repository.Options{ + User: "foo", + Host: "bar", + } + + t := suite.T() + st := tester.NewPrefixedS3Storage(t) + k := NewConn(st) + + err := k.Initialize(ctx, opts) + require.NoError(t, err, clues.ToCore(err)) + + kopiaOpts := k.ClientOptions() + require.Equal(t, opts.User, kopiaOpts.Username) + require.Equal(t, opts.Host, kopiaOpts.Hostname) + + err = k.Close(ctx) + require.NoError(t, err, clues.ToCore(err)) + + // Re-open with Connect and a different user/hostname. + opts.User = "hello" + opts.Host = "world" + + err = k.Connect(ctx, opts) + require.NoError(t, err, clues.ToCore(err)) + + kopiaOpts = k.ClientOptions() + require.Equal(t, opts.User, kopiaOpts.Username) + require.Equal(t, opts.Host, kopiaOpts.Hostname) + + err = k.Close(ctx) + require.NoError(t, err, clues.ToCore(err)) + + // Make sure not setting the values uses the kopia defaults. + opts.User = "" + opts.Host = "" + + err = k.Connect(ctx, opts) + require.NoError(t, err, clues.ToCore(err)) + + kopiaOpts = k.ClientOptions() + assert.NotEmpty(t, kopiaOpts.Username) + assert.NotEqual(t, "hello", kopiaOpts.Username) + assert.NotEmpty(t, kopiaOpts.Hostname) + assert.NotEqual(t, "world", kopiaOpts.Hostname) + + err = k.Close(ctx) + assert.NoError(t, err, clues.ToCore(err)) +} diff --git a/src/internal/kopia/data_collection.go b/src/internal/kopia/data_collection.go index 9c2ebf5c7..b77da148f 100644 --- a/src/internal/kopia/data_collection.go +++ b/src/internal/kopia/data_collection.go @@ -9,6 +9,7 @@ import ( "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" ) @@ -18,10 +19,25 @@ var ( ) type kopiaDataCollection struct { - path path.Path - streams []data.Stream - snapshotRoot fs.Entry - counter ByteCounter + path path.Path + streams []data.Stream + dir fs.Directory + counter ByteCounter + expectedVersion uint32 +} + +func (kdc *kopiaDataCollection) addStream( + ctx context.Context, + name string, +) error { + s, err := kdc.Fetch(ctx, name) + if err != nil { + return err + } + + kdc.streams = append(kdc.streams, s) + + return nil } func (kdc *kopiaDataCollection) Items( @@ -45,23 +61,61 @@ func (kdc kopiaDataCollection) FullPath() path.Path { return kdc.path } +// Fetch returns the file with the given name from the collection as a +// data.Stream. Returns a data.ErrNotFound error if the file isn't in the +// collection. func (kdc kopiaDataCollection) Fetch( ctx context.Context, name string, ) (data.Stream, error) { - if kdc.snapshotRoot == nil { - return nil, clues.New("no snapshot root") + ctx = clues.Add(ctx, "item_name", clues.Hide(name)) + + if kdc.dir == nil { + return nil, clues.New("no snapshot directory") } - p, err := kdc.FullPath().Append(name, true) + if len(name) == 0 { + return nil, clues.Wrap(errNoRestorePath, "unknown item").WithClues(ctx) + } + + e, err := kdc.dir.Child(ctx, encodeAsPath(name)) if err != nil { - return nil, clues.Wrap(err, "creating item path") + if isErrEntryNotFound(err) { + err = clues.Stack(data.ErrNotFound, err) + } + + return nil, clues.Wrap(err, "getting item").WithClues(ctx) } - // TODO(ashmrtn): We could possibly hold a reference to the folder this - // collection corresponds to, but that requires larger changes for the - // creation of these collections. - return getItemStream(ctx, p, kdc.snapshotRoot, kdc.counter) + f, ok := e.(fs.File) + if !ok { + return nil, clues.New("object is not a file").WithClues(ctx) + } + + size := f.Size() - int64(versionSize) + if size < 0 { + logger.Ctx(ctx).Infow("negative file size; resetting to 0", "file_size", size) + + size = 0 + } + + if kdc.counter != nil { + kdc.counter.Count(size) + } + + r, err := f.Open(ctx) + if err != nil { + return nil, clues.Wrap(err, "opening file").WithClues(ctx) + } + + return &kopiaDataStream{ + uuid: name, + reader: &restoreStreamReader{ + ReadCloser: r, + expectedVersion: kdc.expectedVersion, + }, + size: size, + }, nil } type kopiaDataStream struct { diff --git a/src/internal/kopia/data_collection_test.go b/src/internal/kopia/data_collection_test.go index 8ae52157f..8684ebd05 100644 --- a/src/internal/kopia/data_collection_test.go +++ b/src/internal/kopia/data_collection_test.go @@ -20,6 +20,48 @@ import ( "github.com/alcionai/corso/src/pkg/path" ) +// --------------- +// Wrappers to match required interfaces. +// --------------- + +// These types are needed because we check that a fs.File was returned. +// Unfortunately fs.StreamingFile and fs.File have different interfaces so we +// have to fake things. +type mockSeeker struct{} + +func (s mockSeeker) Seek(offset int64, whence int) (int64, error) { + return 0, clues.New("not implemented") +} + +type mockReader struct { + io.ReadCloser + mockSeeker +} + +func (r mockReader) Entry() (fs.Entry, error) { + return nil, clues.New("not implemented") +} + +type mockFile struct { + // Use for Entry interface. + fs.StreamingFile + r io.ReadCloser + openErr error + size int64 +} + +func (f *mockFile) Open(ctx context.Context) (fs.Reader, error) { + if f.openErr != nil { + return nil, f.openErr + } + + return mockReader{ReadCloser: f.r}, nil +} + +func (f *mockFile) Size() int64 { + return f.size +} + // --------------- // unit tests // --------------- @@ -44,52 +86,127 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsPath() { require.NoError(t, err, clues.ToCore(err)) c := kopiaDataCollection{ - streams: []data.Stream{}, - path: pth, + path: pth, } assert.Equal(t, pth, c.FullPath()) } func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() { - testData := [][]byte{ - []byte("abcdefghijklmnopqrstuvwxyz"), - []byte("zyxwvutsrqponmlkjihgfedcba"), + type loadedData struct { + uuid string + data []byte + size int64 } - uuids := []string{ - "a-file", - "another-file", + var ( + fileData = [][]byte{ + []byte("abcdefghijklmnopqrstuvwxyz"), + []byte("zyxwvutsrqponmlkjihgfedcba"), + } + + uuids = []string{ + "a-file", + "another-file", + } + + files = []loadedData{ + {uuid: uuids[0], data: fileData[0], size: int64(len(fileData[0]))}, + {uuid: uuids[1], data: fileData[1], size: int64(len(fileData[1]))}, + } + + fileLookupErrName = "errLookup" + fileOpenErrName = "errOpen" + notFileErrName = "errNotFile" + ) + + // Needs to be a function so the readers get refreshed each time. + getLayout := func() fs.Directory { + return virtualfs.NewStaticDirectory(encodeAsPath("foo"), []fs.Entry{ + &mockFile{ + StreamingFile: virtualfs.StreamingFileFromReader( + encodeAsPath(files[0].uuid), + nil, + ), + r: newBackupStreamReader( + serializationVersion, + io.NopCloser(bytes.NewReader(files[0].data)), + ), + size: int64(len(files[0].data) + versionSize), + }, + &mockFile{ + StreamingFile: virtualfs.StreamingFileFromReader( + encodeAsPath(files[1].uuid), + nil, + ), + r: newBackupStreamReader( + serializationVersion, + io.NopCloser(bytes.NewReader(files[1].data)), + ), + size: int64(len(files[1].data) + versionSize), + }, + &mockFile{ + StreamingFile: virtualfs.StreamingFileFromReader( + encodeAsPath(fileOpenErrName), + nil, + ), + openErr: assert.AnError, + }, + virtualfs.NewStaticDirectory(encodeAsPath(notFileErrName), []fs.Entry{}), + }) } table := []struct { - name string - streams []data.Stream + name string + uuidsAndErrors map[string]assert.ErrorAssertionFunc + // Data and stuff about the loaded data. + expectedLoaded []loadedData }{ { name: "SingleStream", - streams: []data.Stream{ - &kopiaDataStream{ - reader: io.NopCloser(bytes.NewReader(testData[0])), - uuid: uuids[0], - size: int64(len(testData[0])), - }, + uuidsAndErrors: map[string]assert.ErrorAssertionFunc{ + uuids[0]: assert.NoError, }, + expectedLoaded: []loadedData{files[0]}, }, { name: "MultipleStreams", - streams: []data.Stream{ - &kopiaDataStream{ - reader: io.NopCloser(bytes.NewReader(testData[0])), - uuid: uuids[0], - size: int64(len(testData[0])), - }, - &kopiaDataStream{ - reader: io.NopCloser(bytes.NewReader(testData[1])), - uuid: uuids[1], - size: int64(len(testData[1])), - }, + uuidsAndErrors: map[string]assert.ErrorAssertionFunc{ + uuids[0]: assert.NoError, + uuids[1]: assert.NoError, }, + expectedLoaded: files, + }, + { + name: "Some Not Found Errors", + uuidsAndErrors: map[string]assert.ErrorAssertionFunc{ + fileLookupErrName: assert.Error, + uuids[0]: assert.NoError, + }, + expectedLoaded: []loadedData{files[0]}, + }, + { + name: "Some Not A File Errors", + uuidsAndErrors: map[string]assert.ErrorAssertionFunc{ + notFileErrName: assert.Error, + uuids[0]: assert.NoError, + }, + expectedLoaded: []loadedData{files[0]}, + }, + { + name: "Some Open Errors", + uuidsAndErrors: map[string]assert.ErrorAssertionFunc{ + fileOpenErrName: assert.Error, + uuids[0]: assert.NoError, + }, + expectedLoaded: []loadedData{files[0]}, + }, + { + name: "Empty Name Errors", + uuidsAndErrors: map[string]assert.ErrorAssertionFunc{ + "": assert.Error, + }, + expectedLoaded: []loadedData{}, }, } @@ -101,112 +218,101 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() { t := suite.T() c := kopiaDataCollection{ - streams: test.streams, - path: nil, + dir: getLayout(), + path: nil, + expectedVersion: serializationVersion, } - count := 0 - for returnedStream := range c.Items(ctx, fault.New(true)) { - require.Less(t, count, len(test.streams)) - assert.Equal(t, returnedStream.UUID(), uuids[count]) + for uuid, expectErr := range test.uuidsAndErrors { + err := c.addStream(ctx, uuid) + expectErr(t, err, "adding stream to collection", clues.ToCore(err)) + } + + var ( + found []loadedData + bus = fault.New(true) + ) + + for returnedStream := range c.Items(ctx, bus) { + require.Less(t, len(found), len(test.expectedLoaded), "items read safety") + + found = append(found, loadedData{}) + f := &found[len(found)-1] + f.uuid = returnedStream.UUID() buf, err := io.ReadAll(returnedStream.ToReader()) - require.NoError(t, err, clues.ToCore(err)) - assert.Equal(t, buf, testData[count]) - require.Implements(t, (*data.StreamSize)(nil), returnedStream) + if !assert.NoError(t, err, clues.ToCore(err)) { + continue + } + + f.data = buf + + if !assert.Implements(t, (*data.StreamSize)(nil), returnedStream) { + continue + } ss := returnedStream.(data.StreamSize) - assert.Equal(t, len(buf), int(ss.Size())) - count++ + f.size = ss.Size() } - assert.Equal(t, len(test.streams), count) + assert.Empty(t, bus.Recovered(), "expected no recoverable errors") + assert.NoError(t, bus.Failure(), "expected no hard failures") + + assert.ElementsMatch(t, test.expectedLoaded, found, "loaded items") }) } } -// These types are needed because we check that a fs.File was returned. -// Unfortunately fs.StreamingFile and fs.File have different interfaces so we -// have to fake things. -type mockSeeker struct{} - -func (s mockSeeker) Seek(offset int64, whence int) (int64, error) { - return 0, clues.New("not implemented") -} - -type mockReader struct { - io.ReadCloser - mockSeeker -} - -func (r mockReader) Entry() (fs.Entry, error) { - return nil, clues.New("not implemented") -} - -type mockFile struct { - // Use for Entry interface. - fs.StreamingFile - r io.ReadCloser -} - -func (f *mockFile) Open(ctx context.Context) (fs.Reader, error) { - return mockReader{ReadCloser: f.r}, nil -} - func (suite *KopiaDataCollectionUnitSuite) TestFetch() { var ( tenant = "a-tenant" user = "a-user" - service = path.ExchangeService.String() category = path.EmailCategory folder1 = "folder1" folder2 = "folder2" noErrFileName = "noError" errFileName = "error" + errFileName2 = "error2" noErrFileData = "foo bar baz" - - errReader = &exchMock.Data{ + errReader = &exchMock.Data{ ReadErr: assert.AnError, } ) // Needs to be a function so we can switch the serialization version as // needed. - getLayout := func(serVersion uint32) fs.Entry { - return virtualfs.NewStaticDirectory(encodeAsPath(tenant), []fs.Entry{ - virtualfs.NewStaticDirectory(encodeAsPath(service), []fs.Entry{ - virtualfs.NewStaticDirectory(encodeAsPath(user), []fs.Entry{ - virtualfs.NewStaticDirectory(encodeAsPath(category.String()), []fs.Entry{ - virtualfs.NewStaticDirectory(encodeAsPath(folder1), []fs.Entry{ - virtualfs.NewStaticDirectory(encodeAsPath(folder2), []fs.Entry{ - &mockFile{ - StreamingFile: virtualfs.StreamingFileFromReader( - encodeAsPath(noErrFileName), - nil, - ), - r: newBackupStreamReader( - serVersion, - io.NopCloser(bytes.NewReader([]byte(noErrFileData))), - ), - }, - &mockFile{ - StreamingFile: virtualfs.StreamingFileFromReader( - encodeAsPath(errFileName), - nil, - ), - r: newBackupStreamReader( - serVersion, - errReader.ToReader(), - ), - }, - }), - }), - }), - }), - }), + getLayout := func(serVersion uint32) fs.Directory { + return virtualfs.NewStaticDirectory(encodeAsPath(folder2), []fs.Entry{ + &mockFile{ + StreamingFile: virtualfs.StreamingFileFromReader( + encodeAsPath(noErrFileName), + nil, + ), + r: newBackupStreamReader( + serVersion, + io.NopCloser(bytes.NewReader([]byte(noErrFileData))), + ), + }, + &mockFile{ + StreamingFile: virtualfs.StreamingFileFromReader( + encodeAsPath(errFileName), + nil, + ), + r: newBackupStreamReader( + serVersion, + errReader.ToReader(), + ), + }, + &mockFile{ + StreamingFile: virtualfs.StreamingFileFromReader( + encodeAsPath(errFileName2), + nil, + ), + openErr: assert.AnError, + }, }) } @@ -268,7 +374,12 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetch() { root := getLayout(test.inputSerializationVersion) c := &i64counter{} - col := &kopiaDataCollection{path: pth, snapshotRoot: root, counter: c} + col := &kopiaDataCollection{ + path: pth, + dir: root, + counter: c, + expectedVersion: serializationVersion, + } s, err := col.Fetch(ctx, test.inputName) diff --git a/src/internal/kopia/merge_collection.go b/src/internal/kopia/merge_collection.go new file mode 100644 index 000000000..ab95dead8 --- /dev/null +++ b/src/internal/kopia/merge_collection.go @@ -0,0 +1,112 @@ +package kopia + +import ( + "context" + "errors" + + "github.com/alcionai/clues" + "golang.org/x/exp/slices" + + "github.com/alcionai/corso/src/internal/data" + "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/logger" + "github.com/alcionai/corso/src/pkg/path" +) + +var _ data.RestoreCollection = &mergeCollection{} + +type col struct { + storagePath string + data.RestoreCollection +} + +type mergeCollection struct { + cols []col + // Technically don't need to track this but it can help detect errors. + fullPath path.Path +} + +func (mc *mergeCollection) addCollection( + storagePath string, + c data.RestoreCollection, +) error { + if c == nil { + return clues.New("adding nil collection"). + With("current_path", mc.FullPath()) + } else if mc.FullPath().String() != c.FullPath().String() { + return clues.New("attempting to merge collection with different path"). + With("current_path", mc.FullPath(), "new_path", c.FullPath()) + } + + mc.cols = append(mc.cols, col{storagePath: storagePath, RestoreCollection: c}) + + // Keep a stable sorting of this merged collection set so we can say there's + // some deterministic behavior when Fetch is called. We don't expect to have + // to merge many collections. + slices.SortStableFunc(mc.cols, func(a, b col) bool { + return a.storagePath < b.storagePath + }) + + return nil +} + +func (mc mergeCollection) FullPath() path.Path { + return mc.fullPath +} + +func (mc *mergeCollection) Items( + ctx context.Context, + errs *fault.Bus, +) <-chan data.Stream { + res := make(chan data.Stream) + + go func() { + defer close(res) + + logger.Ctx(ctx).Infow( + "getting items for merged collection", + "merged_collection_count", len(mc.cols)) + + for _, c := range mc.cols { + // Unfortunately doesn't seem to be a way right now to see if the + // iteration failed and we should be exiting early. + ictx := clues.Add(ctx, "merged_collection_storage_path", c.storagePath) + logger.Ctx(ictx).Debug("sending items from merged collection") + + for item := range c.Items(ictx, errs) { + res <- item + } + } + }() + + return res +} + +// Fetch goes through all the collections in this one and returns the first +// match found or the first error that is not data.ErrNotFound. If multiple +// collections have the requested item, the instance in the collection with the +// lexicographically smallest storage path is returned. +func (mc *mergeCollection) Fetch( + ctx context.Context, + name string, +) (data.Stream, error) { + logger.Ctx(ctx).Infow( + "fetching item in merged collection", + "merged_collection_count", len(mc.cols)) + + for _, c := range mc.cols { + ictx := clues.Add(ctx, "merged_collection_storage_path", c.storagePath) + + logger.Ctx(ictx).Debug("looking for item in merged collection") + + s, err := c.Fetch(ictx, name) + if err == nil { + return s, nil + } else if err != nil && !errors.Is(err, data.ErrNotFound) { + return nil, clues.Wrap(err, "fetching from merged collection"). + WithClues(ictx) + } + } + + return nil, clues.Wrap(data.ErrNotFound, "merged collection fetch") +} diff --git a/src/internal/kopia/merge_collection_test.go b/src/internal/kopia/merge_collection_test.go new file mode 100644 index 000000000..e287452dc --- /dev/null +++ b/src/internal/kopia/merge_collection_test.go @@ -0,0 +1,297 @@ +package kopia + +import ( + "bytes" + "io" + "testing" + + "github.com/alcionai/clues" + "github.com/kopia/kopia/fs" + "github.com/kopia/kopia/fs/virtualfs" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/connector/exchange/mock" + "github.com/alcionai/corso/src/internal/data" + "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/path" +) + +type MergeCollectionUnitSuite struct { + tester.Suite +} + +func TestMergeCollectionUnitSuite(t *testing.T) { + suite.Run(t, &MergeCollectionUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *MergeCollectionUnitSuite) TestReturnsPath() { + t := suite.T() + + pth, err := path.Build( + "a-tenant", + "a-user", + path.ExchangeService, + path.EmailCategory, + false, + "some", "path", "for", "data") + require.NoError(t, err, clues.ToCore(err)) + + c := mergeCollection{ + fullPath: pth, + } + + assert.Equal(t, pth, c.FullPath()) +} + +func (suite *MergeCollectionUnitSuite) TestItems() { + ctx, flush := tester.NewContext() + defer flush() + + t := suite.T() + storagePaths := []string{ + "tenant-id/exchange/user-id/mail/some/folder/path1", + "tenant-id/exchange/user-id/mail/some/folder/path2", + } + + expectedItemNames := []string{"1", "2"} + + pth, err := path.Build( + "a-tenant", + "a-user", + path.ExchangeService, + path.EmailCategory, + false, + "some", "path", "for", "data") + require.NoError(t, err, clues.ToCore(err)) + + c1 := mock.NewCollection(pth, nil, 1) + c1.Names[0] = expectedItemNames[0] + + c2 := mock.NewCollection(pth, nil, 1) + c2.Names[0] = expectedItemNames[1] + + // Not testing fetch here so safe to use this wrapper. + cols := []data.RestoreCollection{ + data.NotFoundRestoreCollection{Collection: c1}, + data.NotFoundRestoreCollection{Collection: c2}, + } + + dc := &mergeCollection{fullPath: pth} + + for i, c := range cols { + err := dc.addCollection(storagePaths[i], c) + require.NoError(t, err, "adding collection", clues.ToCore(err)) + } + + gotItemNames := []string{} + + for item := range dc.Items(ctx, fault.New(true)) { + gotItemNames = append(gotItemNames, item.UUID()) + } + + assert.ElementsMatch(t, expectedItemNames, gotItemNames) +} + +func (suite *MergeCollectionUnitSuite) TestAddCollection_DifferentPathFails() { + t := suite.T() + + pth1, err := path.Build( + "a-tenant", + "a-user", + path.ExchangeService, + path.EmailCategory, + false, + "some", "path", "for", "data") + require.NoError(t, err, clues.ToCore(err)) + + pth2, err := path.Build( + "a-tenant", + "a-user", + path.ExchangeService, + path.EmailCategory, + false, + "some", "path", "for", "data2") + require.NoError(t, err, clues.ToCore(err)) + + dc := mergeCollection{fullPath: pth1} + + err = dc.addCollection("some/path", &kopiaDataCollection{path: pth2}) + assert.Error(t, err, clues.ToCore(err)) +} + +func (suite *MergeCollectionUnitSuite) TestFetch() { + var ( + fileData1 = []byte("abcdefghijklmnopqrstuvwxyz") + fileData2 = []byte("zyxwvutsrqponmlkjihgfedcba") + fileData3 = []byte("foo bar baz") + + fileName1 = "file1" + fileName2 = "file2" + fileLookupErrName = "errLookup" + fileOpenErrName = "errOpen" + + colPaths = []string{ + "tenant-id/exchange/user-id/mail/some/data/directory1", + "tenant-id/exchange/user-id/mail/some/data/directory2", + } + ) + + pth, err := path.Build( + "a-tenant", + "a-user", + path.ExchangeService, + path.EmailCategory, + false, + "some", "path", "for", "data") + require.NoError(suite.T(), err, clues.ToCore(err)) + + // Needs to be a function so the readers get refreshed each time. + layouts := []func() fs.Directory{ + // Has the following; + // - file1: data[0] + // - errOpen: (error opening file) + func() fs.Directory { + return virtualfs.NewStaticDirectory(encodeAsPath(colPaths[0]), []fs.Entry{ + &mockFile{ + StreamingFile: virtualfs.StreamingFileFromReader( + encodeAsPath(fileName1), + nil, + ), + r: newBackupStreamReader( + serializationVersion, + io.NopCloser(bytes.NewReader(fileData1)), + ), + size: int64(len(fileData1) + versionSize), + }, + &mockFile{ + StreamingFile: virtualfs.StreamingFileFromReader( + encodeAsPath(fileOpenErrName), + nil, + ), + openErr: assert.AnError, + }, + }) + }, + + // Has the following; + // - file1: data[1] + // - file2: data[0] + // - errOpen: data[2] + func() fs.Directory { + return virtualfs.NewStaticDirectory(encodeAsPath(colPaths[1]), []fs.Entry{ + &mockFile{ + StreamingFile: virtualfs.StreamingFileFromReader( + encodeAsPath(fileName1), + nil, + ), + r: newBackupStreamReader( + serializationVersion, + io.NopCloser(bytes.NewReader(fileData2)), + ), + size: int64(len(fileData2) + versionSize), + }, + &mockFile{ + StreamingFile: virtualfs.StreamingFileFromReader( + encodeAsPath(fileName2), + nil, + ), + r: newBackupStreamReader( + serializationVersion, + io.NopCloser(bytes.NewReader(fileData1)), + ), + size: int64(len(fileData1) + versionSize), + }, + &mockFile{ + StreamingFile: virtualfs.StreamingFileFromReader( + encodeAsPath(fileOpenErrName), + nil, + ), + r: newBackupStreamReader( + serializationVersion, + io.NopCloser(bytes.NewReader(fileData3)), + ), + size: int64(len(fileData3) + versionSize), + }, + }) + }, + } + + table := []struct { + name string + fileName string + expectError assert.ErrorAssertionFunc + expectData []byte + notFoundErr bool + }{ + { + name: "Duplicate File, first collection", + fileName: fileName1, + expectError: assert.NoError, + expectData: fileData1, + }, + { + name: "Distinct File, second collection", + fileName: fileName2, + expectError: assert.NoError, + expectData: fileData1, + }, + { + name: "Error opening file", + fileName: fileOpenErrName, + expectError: assert.Error, + }, + { + name: "File not found", + fileName: fileLookupErrName, + expectError: assert.Error, + notFoundErr: true, + }, + } + + for _, test := range table { + suite.Run(test.name, func() { + ctx, flush := tester.NewContext() + defer flush() + + t := suite.T() + c := &i64counter{} + + dc := mergeCollection{fullPath: pth} + + for i, layout := range layouts { + col := &kopiaDataCollection{ + path: pth, + dir: layout(), + counter: c, + expectedVersion: serializationVersion, + } + + err := dc.addCollection(colPaths[i], col) + require.NoError(t, err, "adding collection", clues.ToCore(err)) + } + + s, err := dc.Fetch(ctx, test.fileName) + test.expectError(t, err, clues.ToCore(err)) + + if err != nil { + if test.notFoundErr { + assert.ErrorIs(t, err, data.ErrNotFound, clues.ToCore(err)) + } + + return + } + + fileData, err := io.ReadAll(s.ToReader()) + require.NoError(t, err, "reading file data", clues.ToCore(err)) + + if err != nil { + return + } + + assert.Equal(t, test.expectData, fileData) + }) + } +} diff --git a/src/internal/kopia/merge_details.go b/src/internal/kopia/merge_details.go index 5917892a7..2ec6cc4bb 100644 --- a/src/internal/kopia/merge_details.go +++ b/src/internal/kopia/merge_details.go @@ -114,7 +114,7 @@ type locRefs struct { } type locationPrefixMatcher struct { - m prefixmatcher.Matcher[locRefs] + m prefixmatcher.Builder[locRefs] } func (m *locationPrefixMatcher) add( diff --git a/src/internal/kopia/model_store.go b/src/internal/kopia/model_store.go index e0d4d3968..54e7b67b5 100644 --- a/src/internal/kopia/model_store.go +++ b/src/internal/kopia/model_store.go @@ -130,7 +130,7 @@ func putInner( base.ID = model.StableID(uuid.NewString()) } - tmpTags, err := tagsForModelWithID(s, base.ID, base.Version, base.Tags) + tmpTags, err := tagsForModelWithID(s, base.ID, base.ModelVersion, base.Tags) if err != nil { // Will be wrapped at a higher layer. return clues.Stack(err).WithClues(ctx) @@ -158,7 +158,7 @@ func (ms *ModelStore) Put( return clues.Stack(errUnrecognizedSchema) } - m.Base().Version = ms.modelVersion + m.Base().ModelVersion = ms.modelVersion err := repo.WriteSession( ctx, @@ -205,7 +205,7 @@ func (ms ModelStore) populateBaseModelFromMetadata( base.ModelStoreID = m.ID base.ID = model.StableID(id) - base.Version = v + base.ModelVersion = v base.Tags = m.Labels stripHiddenTags(base.Tags) @@ -424,7 +424,7 @@ func (ms *ModelStore) Update( return clues.Stack(errNoModelStoreID).WithClues(ctx) } - base.Version = ms.modelVersion + base.ModelVersion = ms.modelVersion // TODO(ashmrtnz): Can remove if bottleneck. if err := ms.checkPrevModelVersion(ctx, s, base); err != nil { diff --git a/src/internal/kopia/model_store_test.go b/src/internal/kopia/model_store_test.go index 9b9daf9f7..b5bf76bcd 100644 --- a/src/internal/kopia/model_store_test.go +++ b/src/internal/kopia/model_store_test.go @@ -17,6 +17,7 @@ import ( "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/backup" + "github.com/alcionai/corso/src/pkg/control/repository" ) type fooModel struct { @@ -264,7 +265,7 @@ func (suite *ModelStoreIntegrationSuite) TestPutGet() { require.NotEmpty(t, foo.ModelStoreID) require.NotEmpty(t, foo.ID) - require.Equal(t, globalModelVersion, foo.Version) + require.Equal(t, globalModelVersion, foo.ModelVersion) returned := &fooModel{} err = suite.m.Get(suite.ctx, test.s, foo.ID, returned) @@ -569,14 +570,14 @@ func (suite *ModelStoreIntegrationSuite) TestPutUpdate() { name: "NoTags", mutator: func(m *fooModel) { m.Bar = "baz" - m.Version = 42 + m.ModelVersion = 42 }, }, { name: "WithTags", mutator: func(m *fooModel) { m.Bar = "baz" - m.Version = 42 + m.ModelVersion = 42 m.Tags = map[string]string{ "a": "42", } @@ -607,7 +608,7 @@ func (suite *ModelStoreIntegrationSuite) TestPutUpdate() { oldModelID := foo.ModelStoreID oldStableID := foo.ID - oldVersion := foo.Version + oldVersion := foo.ModelVersion test.mutator(foo) @@ -616,7 +617,7 @@ func (suite *ModelStoreIntegrationSuite) TestPutUpdate() { assert.Equal(t, oldStableID, foo.ID) // The version in the model store has not changed so we get the old // version back. - assert.Equal(t, oldVersion, foo.Version) + assert.Equal(t, oldVersion, foo.ModelVersion) returned := &fooModel{} @@ -627,7 +628,7 @@ func (suite *ModelStoreIntegrationSuite) TestPutUpdate() { ids, err := m.GetIDsForType(ctx, theModelType, nil) require.NoError(t, err, clues.ToCore(err)) require.Len(t, ids, 1) - assert.Equal(t, globalModelVersion, ids[0].Version) + assert.Equal(t, globalModelVersion, ids[0].ModelVersion) if oldModelID == foo.ModelStoreID { // Unlikely, but we don't control ModelStoreID generation and can't @@ -803,7 +804,7 @@ func openConnAndModelStore( st := tester.NewPrefixedS3Storage(t) c := NewConn(st) - err := c.Initialize(ctx) + err := c.Initialize(ctx, repository.Options{}) require.NoError(t, err, clues.ToCore(err)) defer func() { @@ -822,7 +823,7 @@ func reconnectToModelStore( ctx context.Context, //revive:disable-line:context-as-argument c *conn, ) *ModelStore { - err := c.Connect(ctx) + err := c.Connect(ctx, repository.Options{}) require.NoError(t, err, clues.ToCore(err)) defer func() { diff --git a/src/internal/kopia/path_encoder.go b/src/internal/kopia/path_encoder.go index f30cfaf08..2f529e964 100644 --- a/src/internal/kopia/path_encoder.go +++ b/src/internal/kopia/path_encoder.go @@ -3,6 +3,8 @@ package kopia import ( "encoding/base64" "path" + + "github.com/alcionai/clues" ) var encoder = base64.URLEncoding @@ -20,6 +22,21 @@ func encodeElements(elements ...string) []string { return encoded } +func decodeElements(elements ...string) ([]string, error) { + decoded := make([]string, 0, len(elements)) + + for _, e := range elements { + bs, err := encoder.DecodeString(e) + if err != nil { + return nil, clues.Wrap(err, "decoding element").With("element", e) + } + + decoded = append(decoded, string(bs)) + } + + return decoded, nil +} + // encodeAsPath takes a set of elements and returns the concatenated elements as // if they were a path. The elements are joined with the separator in the golang // path package. diff --git a/src/internal/kopia/s3.go b/src/internal/kopia/s3.go index 5810487dc..6b5c081d7 100644 --- a/src/internal/kopia/s3.go +++ b/src/internal/kopia/s3.go @@ -31,6 +31,10 @@ func s3BlobStorage(ctx context.Context, s storage.Storage) (blob.Storage, error) Prefix: cfg.Prefix, DoNotUseTLS: cfg.DoNotUseTLS, DoNotVerifyTLS: cfg.DoNotVerifyTLS, + Tags: s.SessionTags, + SessionName: s.SessionName, + RoleARN: s.Role, + RoleDuration: s.SessionDuration, } store, err := s3.New(ctx, &opts, false) diff --git a/src/internal/kopia/snapshot_manager.go b/src/internal/kopia/snapshot_manager.go index a25d52b92..a89eccbd5 100644 --- a/src/internal/kopia/snapshot_manager.go +++ b/src/internal/kopia/snapshot_manager.go @@ -39,6 +39,11 @@ func (r Reason) TagKeys() []string { } } +// Key is the concatenation of the ResourceOwner, Service, and Category. +func (r Reason) Key() string { + return r.ResourceOwner + r.Service.String() + r.Category.String() +} + type ManifestEntry struct { *snapshot.Manifest // Reason contains the ResourceOwners and Service/Categories that caused this diff --git a/src/internal/kopia/upload.go b/src/internal/kopia/upload.go index 1e1f85e96..a1cc0bed2 100644 --- a/src/internal/kopia/upload.go +++ b/src/internal/kopia/upload.go @@ -21,6 +21,7 @@ import ( "github.com/kopia/kopia/repo/manifest" "github.com/kopia/kopia/snapshot/snapshotfs" + "github.com/alcionai/corso/src/internal/common/prefixmatcher" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/graph/metadata" "github.com/alcionai/corso/src/internal/data" @@ -346,7 +347,7 @@ func collectionEntries( seen[encodedName] = struct{}{} // For now assuming that item IDs don't need escaping. - itemPath, err := streamedEnts.FullPath().Append(e.UUID(), true) + itemPath, err := streamedEnts.FullPath().AppendItem(e.UUID()) if err != nil { err = clues.Wrap(err, "getting full item path") progress.errs.AddRecoverable(err) @@ -413,7 +414,7 @@ func streamBaseEntries( locationPath *path.Builder, dir fs.Directory, encodedSeen map[string]struct{}, - globalExcludeSet map[string]map[string]struct{}, + globalExcludeSet prefixmatcher.StringSetReader, progress *corsoProgress, ) error { if dir == nil { @@ -421,20 +422,19 @@ func streamBaseEntries( } var ( + longest string excludeSet map[string]struct{} - curPrefix string ) - ctx = clues.Add(ctx, "current_item_path", curPath) - - for prefix, excludes := range globalExcludeSet { - // Select the set with the longest prefix to be most precise. - if strings.HasPrefix(curPath.String(), prefix) && len(prefix) >= len(curPrefix) { - excludeSet = excludes - curPrefix = prefix - } + if globalExcludeSet != nil { + longest, excludeSet, _ = globalExcludeSet.LongestPrefix(curPath.String()) } + ctx = clues.Add( + ctx, + "current_item_path", curPath, + "longest_prefix", longest) + err := dir.IterateEntries(ctx, func(innerCtx context.Context, entry fs.Entry) error { if err := innerCtx.Err(); err != nil { return err @@ -464,7 +464,7 @@ func streamBaseEntries( } // For now assuming that item IDs don't need escaping. - itemPath, err := curPath.Append(entName, true) + itemPath, err := curPath.AppendItem(entName) if err != nil { return clues.Wrap(err, "getting full item path for base entry") } @@ -473,7 +473,7 @@ func streamBaseEntries( // backup details. If the item moved and we had only the new path, we'd be // unable to find it in the old backup details because we wouldn't know what // to look for. - prevItemPath, err := prevPath.Append(entName, true) + prevItemPath, err := prevPath.AppendItem(entName) if err != nil { return clues.Wrap(err, "getting previous full item path for base entry") } @@ -521,7 +521,7 @@ func getStreamItemFunc( staticEnts []fs.Entry, streamedEnts data.BackupCollection, baseDir fs.Directory, - globalExcludeSet map[string]map[string]struct{}, + globalExcludeSet prefixmatcher.StringSetReader, progress *corsoProgress, ) func(context.Context, func(context.Context, fs.Entry) error) error { return func(ctx context.Context, cb func(context.Context, fs.Entry) error) error { @@ -569,7 +569,7 @@ func getStreamItemFunc( func buildKopiaDirs( dirName string, dir *treeMap, - globalExcludeSet map[string]map[string]struct{}, + globalExcludeSet prefixmatcher.StringSetReader, progress *corsoProgress, ) (fs.Directory, error) { // Need to build the directory tree from the leaves up because intermediate @@ -727,7 +727,7 @@ func inflateCollectionTree( toMerge *mergeDetails, ) (map[string]*treeMap, map[string]path.Path, error) { roots := make(map[string]*treeMap) - // Contains the old path for collections that have been moved or renamed. + // Contains the old path for collections that are not new. // Allows resolving what the new path should be when walking the base // snapshot(s)'s hierarchy. Nil represents a collection that was deleted. updatedPaths := make(map[string]path.Path) @@ -776,6 +776,14 @@ func inflateCollectionTree( if err := addMergeLocation(s, toMerge); err != nil { return nil, nil, clues.Wrap(err, "adding merge location").WithClues(ictx) } + case data.NotMovedState: + p := s.PreviousPath().String() + if _, ok := updatedPaths[p]; ok { + return nil, nil, clues.New("multiple previous state changes to collection"). + WithClues(ictx) + } + + updatedPaths[p] = s.FullPath() } if s.FullPath() == nil || len(s.FullPath().Elements()) == 0 { @@ -1003,15 +1011,20 @@ func inflateBaseTree( return clues.Wrap(err, "subtree root is not directory").WithClues(ictx) } - // We're assuming here that the prefix for the path has not changed (i.e. - // all of tenant, service, resource owner, and category are the same in the - // old snapshot (snap) and the snapshot we're currently trying to make. + // This ensures that a migration on the directory prefix can complete. + // The prefix is the tenant/service/owner/category set, which remains + // otherwise unchecked in tree inflation below this point. + newSubtreePath := subtreePath + if p, ok := updatedPaths[subtreePath.String()]; ok { + newSubtreePath = p.ToBuilder() + } + if err = traverseBaseDir( ictx, 0, updatedPaths, subtreePath.Dir(), - subtreePath.Dir(), + newSubtreePath.Dir(), subtreeDir, roots, ); err != nil { @@ -1040,7 +1053,7 @@ func inflateDirTree( loader snapshotLoader, baseSnaps []IncrementalBase, collections []data.BackupCollection, - globalExcludeSet map[string]map[string]struct{}, + globalExcludeSet prefixmatcher.StringSetReader, progress *corsoProgress, ) (fs.Directory, error) { roots, updatedPaths, err := inflateCollectionTree(ctx, collections, progress.toMerge) diff --git a/src/internal/kopia/upload_test.go b/src/internal/kopia/upload_test.go index 157e8b80c..e86826f27 100644 --- a/src/internal/kopia/upload_test.go +++ b/src/internal/kopia/upload_test.go @@ -19,6 +19,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + pmMock "github.com/alcionai/corso/src/internal/common/prefixmatcher/mock" exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/tester" @@ -183,16 +184,22 @@ func expectDirs( ) { t.Helper() - if exactly { - require.Len(t, entries, len(dirs)) - } - - names := make([]string, 0, len(entries)) + ents := make([]string, 0, len(entries)) for _, e := range entries { - names = append(names, e.Name()) + ents = append(ents, e.Name()) } - assert.Subset(t, names, dirs) + dd, err := decodeElements(dirs...) + require.NoError(t, err, clues.ToCore(err)) + + de, err := decodeElements(ents...) + require.NoError(t, err, clues.ToCore(err)) + + if exactly { + require.Lenf(t, entries, len(dirs), "expected exactly %+v\ngot %+v", dd, de) + } + + assert.Subsetf(t, dirs, ents, "expected at least %+v\ngot %+v", dd, de) } func getDirEntriesForEntry( @@ -702,7 +709,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree() { // - emails // - Inbox // - 42 separate files - dirTree, err := inflateDirTree(ctx, nil, nil, collections, nil, progress) + dirTree, err := inflateDirTree(ctx, nil, nil, collections, pmMock.NewPrefixMap(nil), progress) require.NoError(t, err, clues.ToCore(err)) assert.Equal(t, encodeAsPath(testTenant), dirTree.Name()) @@ -799,7 +806,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_MixedDirectory() errs: fault.New(true), } - dirTree, err := inflateDirTree(ctx, nil, nil, test.layout, nil, progress) + dirTree, err := inflateDirTree(ctx, nil, nil, test.layout, pmMock.NewPrefixMap(nil), progress) require.NoError(t, err, clues.ToCore(err)) assert.Equal(t, encodeAsPath(testTenant), dirTree.Name()) @@ -905,7 +912,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_Fails() { errs: fault.New(true), } - _, err := inflateDirTree(ctx, nil, nil, test.layout, nil, progress) + _, err := inflateDirTree(ctx, nil, nil, test.layout, pmMock.NewPrefixMap(nil), progress) assert.Error(t, err, clues.ToCore(err)) }) } @@ -922,15 +929,18 @@ func (msw *mockSnapshotWalker) SnapshotRoot(*snapshot.Manifest) (fs.Entry, error func mockIncrementalBase( id, tenant, resourceOwner string, service path.ServiceType, - category path.CategoryType, + categories ...path.CategoryType, ) IncrementalBase { + stps := []*path.Builder{} + for _, c := range categories { + stps = append(stps, path.Builder{}.Append(tenant, service.String(), resourceOwner, c.String())) + } + return IncrementalBase{ Manifest: &snapshot.Manifest{ ID: manifest.ID(id), }, - SubtreePaths: []*path.Builder{ - path.Builder{}.Append(tenant, service.String(), resourceOwner, category.String()), - }, + SubtreePaths: stps, } } @@ -1018,7 +1028,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeErrors() { cols = append(cols, mc) } - _, err := inflateDirTree(ctx, nil, nil, cols, nil, progress) + _, err := inflateDirTree(ctx, nil, nil, cols, pmMock.NewPrefixMap(nil), progress) require.Error(t, err, clues.ToCore(err)) }) } @@ -1102,6 +1112,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() { name: "AddsNewItems", inputCollections: func() []data.BackupCollection { mc := exchMock.NewCollection(storePath, locPath, 1) + mc.PrevPath = storePath mc.Names[0] = testFileName2 mc.Data[0] = testFileData2 mc.ColState = data.NotMovedState @@ -1137,6 +1148,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() { name: "SkipsUpdatedItems", inputCollections: func() []data.BackupCollection { mc := exchMock.NewCollection(storePath, locPath, 1) + mc.PrevPath = storePath mc.Names[0] = testFileName mc.Data[0] = testFileData2 mc.ColState = data.NotMovedState @@ -1301,9 +1313,8 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() { mockIncrementalBase("", testTenant, testUser, path.ExchangeService, path.EmailCategory), }, test.inputCollections(), - nil, - progress, - ) + pmMock.NewPrefixMap(nil), + progress) require.NoError(t, err, clues.ToCore(err)) expectTree(t, ctx, test.expected, dirTree) @@ -1422,7 +1433,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto table := []struct { name string inputCollections func(t *testing.T) []data.BackupCollection - inputExcludes map[string]map[string]struct{} + inputExcludes *pmMock.PrefixMap expected *expectedNode }{ { @@ -1430,11 +1441,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto inputCollections: func(t *testing.T) []data.BackupCollection { return nil }, - inputExcludes: map[string]map[string]struct{}{ + inputExcludes: pmMock.NewPrefixMap(map[string]map[string]struct{}{ "": { inboxFileName1: {}, }, - }, + }), expected: expectedTreeWithChildren( []string{ testTenant, @@ -2054,6 +2065,150 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto }, ), }, + { + // This could happen if a subfolder is moved out of the parent, the parent + // is deleted, a new folder at the same location as the parent is created, + // and then the subfolder is moved back to the same location. + name: "Delete Parent But Child Marked Not Moved Explicit New Parent", + inputCollections: func(t *testing.T) []data.BackupCollection { + inbox := exchMock.NewCollection(nil, inboxLocPath, 0) + inbox.PrevPath = inboxStorePath + inbox.ColState = data.DeletedState + + inbox2 := exchMock.NewCollection(inboxStorePath, inboxLocPath, 1) + inbox2.PrevPath = nil + inbox2.ColState = data.NewState + inbox2.Names[0] = workFileName1 + + personal := exchMock.NewCollection(personalStorePath, personalLocPath, 0) + personal.PrevPath = personalStorePath + personal.ColState = data.NotMovedState + + return []data.BackupCollection{inbox, inbox2, personal} + }, + expected: expectedTreeWithChildren( + []string{ + testTenant, + service, + testUser, + category, + }, + []*expectedNode{ + { + name: testInboxID, + children: []*expectedNode{ + { + name: workFileName1, + children: []*expectedNode{}, + }, + { + name: personalID, + children: []*expectedNode{ + { + name: personalFileName1, + children: []*expectedNode{}, + }, + { + name: personalFileName2, + children: []*expectedNode{}, + }, + }, + }, + }, + }, + }, + ), + }, + { + // This could happen if a subfolder is moved out of the parent, the parent + // is deleted, a new folder at the same location as the parent is created, + // and then the subfolder is moved back to the same location. + name: "Delete Parent But Child Marked Not Moved Implicit New Parent", + inputCollections: func(t *testing.T) []data.BackupCollection { + inbox := exchMock.NewCollection(nil, inboxLocPath, 0) + inbox.PrevPath = inboxStorePath + inbox.ColState = data.DeletedState + + // New folder not explicitly listed as it may not have had new items. + personal := exchMock.NewCollection(personalStorePath, personalLocPath, 0) + personal.PrevPath = personalStorePath + personal.ColState = data.NotMovedState + + return []data.BackupCollection{inbox, personal} + }, + expected: expectedTreeWithChildren( + []string{ + testTenant, + service, + testUser, + category, + }, + []*expectedNode{ + { + name: testInboxID, + children: []*expectedNode{ + { + name: personalID, + children: []*expectedNode{ + { + name: personalFileName1, + children: []*expectedNode{}, + }, + { + name: personalFileName2, + children: []*expectedNode{}, + }, + }, + }, + }, + }, + }, + ), + }, + { + // This could happen if a subfolder is moved out of the parent, the parent + // is deleted, a new folder at the same location as the parent is created, + // and then the subfolder is moved back to the same location. + name: "Delete Parent But Child Marked Not Moved Implicit New Parent Child Do Not Merge", + inputCollections: func(t *testing.T) []data.BackupCollection { + inbox := exchMock.NewCollection(nil, inboxLocPath, 0) + inbox.PrevPath = inboxStorePath + inbox.ColState = data.DeletedState + + // New folder not explicitly listed as it may not have had new items. + personal := exchMock.NewCollection(personalStorePath, personalLocPath, 1) + personal.PrevPath = personalStorePath + personal.ColState = data.NotMovedState + personal.DoNotMerge = true + personal.Names[0] = workFileName1 + + return []data.BackupCollection{inbox, personal} + }, + expected: expectedTreeWithChildren( + []string{ + testTenant, + service, + testUser, + category, + }, + []*expectedNode{ + { + name: testInboxID, + children: []*expectedNode{ + { + name: personalID, + children: []*expectedNode{ + { + name: workFileName1, + children: []*expectedNode{}, + }, + }, + }, + }, + }, + }, + ), + }, } for _, test := range table { @@ -2074,6 +2229,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto snapshotRoot: getBaseSnapshot(), } + ie := pmMock.NewPrefixMap(nil) + if test.inputExcludes != nil { + ie = test.inputExcludes + } + dirTree, err := inflateDirTree( ctx, msw, @@ -2081,7 +2241,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto mockIncrementalBase("", testTenant, testUser, path.ExchangeService, path.EmailCategory), }, test.inputCollections(t), - test.inputExcludes, + ie, progress) require.NoError(t, err, clues.ToCore(err)) @@ -2245,7 +2405,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSkipsDeletedSubtre mockIncrementalBase("", testTenant, testUser, path.ExchangeService, path.EmailCategory), }, collections, - nil, + pmMock.NewPrefixMap(nil), progress) require.NoError(t, err, clues.ToCore(err)) @@ -2350,7 +2510,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_HandleEmptyBase() mockIncrementalBase("", testTenant, testUser, path.ExchangeService, path.EmailCategory), }, collections, - nil, + pmMock.NewPrefixMap(nil), progress) require.NoError(t, err, clues.ToCore(err)) @@ -2601,9 +2761,172 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsCorrectSubt mockIncrementalBase("id2", testTenant, testUser, path.ExchangeService, path.EmailCategory), }, collections, - nil, - progress, - ) + pmMock.NewPrefixMap(nil), + progress) + require.NoError(t, err, clues.ToCore(err)) + + expectTree(t, ctx, expected, dirTree) +} + +func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsMigrateSubtrees() { + tester.LogTimeOfTest(suite.T()) + t := suite.T() + + ctx, flush := tester.NewContext() + defer flush() + + const ( + contactsDir = "contacts" + migratedUser = "user_migrate" + ) + + oldPrefixPathEmail, err := path.ServicePrefix(testTenant, testUser, path.ExchangeService, path.EmailCategory) + require.NoError(t, err, clues.ToCore(err)) + + newPrefixPathEmail, err := path.ServicePrefix(testTenant, migratedUser, path.ExchangeService, path.EmailCategory) + require.NoError(t, err, clues.ToCore(err)) + + oldPrefixPathCont, err := path.ServicePrefix(testTenant, testUser, path.ExchangeService, path.ContactsCategory) + require.NoError(t, err, clues.ToCore(err)) + + newPrefixPathCont, err := path.ServicePrefix(testTenant, migratedUser, path.ExchangeService, path.ContactsCategory) + require.NoError(t, err, clues.ToCore(err)) + + var ( + inboxFileName1 = testFileName + + inboxFileData1 = testFileData + // inboxFileData1v2 = testFileData5 + + contactsFileName1 = testFileName3 + contactsFileData1 = testFileData3 + ) + + // Must be a function that returns a new instance each time as StreamingFile + // can only return its Reader once. + // baseSnapshot with the following layout: + // - a-tenant + // - exchange + // - user1 + // - email + // - Inbox + // - file1 + // - contacts + // - contacts + // - file2 + getBaseSnapshot1 := func() fs.Entry { + return baseWithChildren( + []string{testTenant, service, testUser}, + []fs.Entry{ + virtualfs.NewStaticDirectory( + encodeElements(category)[0], + []fs.Entry{ + virtualfs.NewStaticDirectory( + encodeElements(testInboxID)[0], + []fs.Entry{ + virtualfs.StreamingFileWithModTimeFromReader( + encodeElements(inboxFileName1)[0], + time.Time{}, + newBackupStreamReader( + serializationVersion, + io.NopCloser(bytes.NewReader(inboxFileData1)))), + }), + }), + virtualfs.NewStaticDirectory( + encodeElements(path.ContactsCategory.String())[0], + []fs.Entry{ + virtualfs.NewStaticDirectory( + encodeElements(contactsDir)[0], + []fs.Entry{ + virtualfs.StreamingFileWithModTimeFromReader( + encodeElements(contactsFileName1)[0], + time.Time{}, + newBackupStreamReader( + serializationVersion, + io.NopCloser(bytes.NewReader(contactsFileData1)))), + }), + }), + }, + ) + } + + // Check the following: + // * contacts pulled from base1 unchanged even if no collections reference + // it + // * email pulled from base2 + // + // Expected output: + // - a-tenant + // - exchange + // - user1new + // - email + // - Inbox + // - file1 + // - contacts + // - contacts + // - file1 + expected := expectedTreeWithChildren( + []string{testTenant, service, migratedUser}, + []*expectedNode{ + { + name: category, + children: []*expectedNode{ + { + name: testInboxID, + children: []*expectedNode{ + { + name: inboxFileName1, + children: []*expectedNode{}, + data: inboxFileData1, + }, + }, + }, + }, + }, + { + name: path.ContactsCategory.String(), + children: []*expectedNode{ + { + name: contactsDir, + children: []*expectedNode{ + { + name: contactsFileName1, + children: []*expectedNode{}, + }, + }, + }, + }, + }, + }, + ) + + progress := &corsoProgress{ + pending: map[string]*itemDetails{}, + toMerge: newMergeDetails(), + errs: fault.New(true), + } + + mce := exchMock.NewCollection(newPrefixPathEmail, nil, 0) + mce.PrevPath = oldPrefixPathEmail + mce.ColState = data.MovedState + + mcc := exchMock.NewCollection(newPrefixPathCont, nil, 0) + mcc.PrevPath = oldPrefixPathCont + mcc.ColState = data.MovedState + + msw := &mockMultiSnapshotWalker{ + snaps: map[string]fs.Entry{"id1": getBaseSnapshot1()}, + } + + dirTree, err := inflateDirTree( + ctx, + msw, + []IncrementalBase{ + mockIncrementalBase("id1", testTenant, testUser, path.ExchangeService, path.EmailCategory, path.ContactsCategory), + }, + []data.BackupCollection{mce, mcc}, + pmMock.NewPrefixMap(nil), + progress) require.NoError(t, err, clues.ToCore(err)) expectTree(t, ctx, expected, dirTree) diff --git a/src/internal/kopia/wrapper.go b/src/internal/kopia/wrapper.go index 293b8df02..e4d73bb4c 100644 --- a/src/internal/kopia/wrapper.go +++ b/src/internal/kopia/wrapper.go @@ -2,20 +2,26 @@ package kopia import ( "context" + "errors" "strings" "github.com/alcionai/clues" "github.com/kopia/kopia/fs" "github.com/kopia/kopia/repo" + "github.com/kopia/kopia/repo/maintenance" "github.com/kopia/kopia/repo/manifest" "github.com/kopia/kopia/snapshot" "github.com/kopia/kopia/snapshot/policy" "github.com/kopia/kopia/snapshot/snapshotfs" + "github.com/kopia/kopia/snapshot/snapshotmaintenance" + "github.com/alcionai/corso/src/internal/common/prefixmatcher" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/diagnostics" + "github.com/alcionai/corso/src/internal/observe" "github.com/alcionai/corso/src/internal/stats" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/control/repository" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" @@ -134,7 +140,7 @@ func (w Wrapper) ConsumeBackupCollections( ctx context.Context, previousSnapshots []IncrementalBase, collections []data.BackupCollection, - globalExcludeSet map[string]map[string]struct{}, + globalExcludeSet prefixmatcher.StringSetReader, tags map[string]string, buildTreeWithBase bool, errs *fault.Bus, @@ -146,7 +152,7 @@ func (w Wrapper) ConsumeBackupCollections( ctx, end := diagnostics.Span(ctx, "kopia:consumeBackupCollections") defer end() - if len(collections) == 0 && len(globalExcludeSet) == 0 { + if len(collections) == 0 && (globalExcludeSet == nil || globalExcludeSet.Empty()) { return &BackupStats{}, &details.Builder{}, nil, nil } @@ -321,26 +327,24 @@ func (w Wrapper) getSnapshotRoot( return rootDirEntry, nil } -// getItemStream looks up the item at the given path starting from snapshotRoot. -// If the item is a file in kopia then it returns a data.Stream of the item. If -// the item does not exist in kopia or is not a file an error is returned. The -// UUID of the returned data.Stream will be the name of the kopia file the data -// is sourced from. -func getItemStream( +// getDir looks up the directory at the given path starting from snapshotRoot. +// If the item is a directory in kopia then it returns the kopia fs.Directory +// handle. If the item does not exist in kopia or is not a directory an error is +// returned. +func getDir( ctx context.Context, - itemPath path.Path, + dirPath path.Path, snapshotRoot fs.Entry, - bcounter ByteCounter, -) (data.Stream, error) { - if itemPath == nil { - return nil, clues.Wrap(errNoRestorePath, "getting item stream").WithClues(ctx) +) (fs.Directory, error) { + if dirPath == nil { + return nil, clues.Wrap(errNoRestorePath, "getting directory").WithClues(ctx) } // GetNestedEntry handles nil properly. e, err := snapshotfs.GetNestedEntry( ctx, snapshotRoot, - encodeElements(itemPath.PopFront().Elements()...)) + encodeElements(dirPath.PopFront().Elements()...)) if err != nil { if isErrEntryNotFound(err) { err = clues.Stack(data.ErrNotFound, err) @@ -349,39 +353,119 @@ func getItemStream( return nil, clues.Wrap(err, "getting nested object handle").WithClues(ctx) } - f, ok := e.(fs.File) + f, ok := e.(fs.Directory) if !ok { - return nil, clues.New("requested object is not a file").WithClues(ctx) + return nil, clues.New("requested object is not a directory").WithClues(ctx) } - if bcounter != nil { - bcounter.Count(f.Size()) - } - - r, err := f.Open(ctx) - if err != nil { - return nil, clues.Wrap(err, "opening file").WithClues(ctx) - } - - decodedName, err := decodeElement(f.Name()) - if err != nil { - return nil, clues.Wrap(err, "decoding file name").WithClues(ctx) - } - - return &kopiaDataStream{ - uuid: decodedName, - reader: &restoreStreamReader{ - ReadCloser: r, - expectedVersion: serializationVersion, - }, - size: f.Size() - int64(versionSize), - }, nil + return f, nil } type ByteCounter interface { Count(numBytes int64) } +type restoreCollection struct { + restorePath path.Path + storageDirs map[string]*dirAndItems +} + +type dirAndItems struct { + dir path.Path + items []string +} + +// loadDirsAndItems takes a set of ShortRef -> (directory path, []item names) +// and creates a collection for each tuple in the set. Non-fatal errors are +// accumulated into bus. Any fatal errors will stop processing and return the +// error directly. +// +// All data is loaded from the given snapshot. +func loadDirsAndItems( + ctx context.Context, + snapshotRoot fs.Entry, + bcounter ByteCounter, + toLoad map[string]*restoreCollection, + bus *fault.Bus, +) ([]data.RestoreCollection, error) { + var ( + el = bus.Local() + res = make([]data.RestoreCollection, 0, len(toLoad)) + loadCount = 0 + ) + + for _, col := range toLoad { + if el.Failure() != nil { + return nil, el.Failure() + } + + ictx := clues.Add(ctx, "restore_path", col.restorePath) + + mergeCol := &mergeCollection{fullPath: col.restorePath} + res = append(res, mergeCol) + + for _, dirItems := range col.storageDirs { + if el.Failure() != nil { + return nil, el.Failure() + } + + ictx = clues.Add(ictx, "storage_directory_path", dirItems.dir) + + dir, err := getDir(ictx, dirItems.dir, snapshotRoot) + if err != nil { + el.AddRecoverable(clues.Wrap(err, "loading storage directory"). + WithClues(ictx). + Label(fault.LabelForceNoBackupCreation)) + + continue + } + + dc := &kopiaDataCollection{ + path: col.restorePath, + dir: dir, + counter: bcounter, + expectedVersion: serializationVersion, + } + + if err := mergeCol.addCollection(dirItems.dir.String(), dc); err != nil { + el.AddRecoverable(clues.Wrap(err, "adding collection to merge collection"). + WithClues(ctx). + Label(fault.LabelForceNoBackupCreation)) + + continue + } + + for _, item := range dirItems.items { + if el.Failure() != nil { + return nil, el.Failure() + } + + err := dc.addStream(ictx, item) + if err != nil { + el.AddRecoverable(clues.Wrap(err, "loading item"). + WithClues(ictx). + Label(fault.LabelForceNoBackupCreation)) + + continue + } + + loadCount++ + if loadCount%1000 == 0 { + logger.Ctx(ctx).Infow( + "loading items from kopia", + "loaded_items", loadCount) + } + } + } + } + + logger.Ctx(ctx).Infow( + "done loading items from kopia", + "loaded_items", loadCount) + + return res, el.Failure() +} + // ProduceRestoreCollections looks up all paths- assuming each is an item declaration, // not a directory- in the snapshot with id snapshotID. The path should be the // full path of the item from the root. Returns the results as a slice of single- @@ -392,7 +476,7 @@ type ByteCounter interface { func (w Wrapper) ProduceRestoreCollections( ctx context.Context, snapshotID string, - paths []path.Path, + paths []path.RestorePaths, bcounter ByteCounter, errs *fault.Bus, ) ([]data.RestoreCollection, error) { @@ -403,57 +487,76 @@ func (w Wrapper) ProduceRestoreCollections( return nil, clues.Stack(errNoRestorePath).WithClues(ctx) } + // Used later on, but less confusing to follow error propagation if we just + // load it here. snapshotRoot, err := w.getSnapshotRoot(ctx, snapshotID) if err != nil { - return nil, err + return nil, clues.Wrap(err, "loading snapshot root") } var ( - // Maps short ID of parent path to data collection for that folder. - cols = map[string]*kopiaDataCollection{} - el = errs.Local() + loadCount int + // RestorePath -> []StoragePath directory -> set of items to load from the + // directory. + dirsToItems = map[string]*restoreCollection{} + el = errs.Local() ) - for _, itemPath := range paths { + for _, itemPaths := range paths { if el.Failure() != nil { return nil, el.Failure() } - ictx := clues.Add(ctx, "item_path", itemPath.String()) + // Group things by RestorePath and then StoragePath so we can load multiple + // items from a single directory instance lower down. + ictx := clues.Add( + ctx, + "item_path", itemPaths.StoragePath.String(), + "restore_path", itemPaths.RestorePath.String()) - ds, err := getItemStream(ictx, itemPath, snapshotRoot, bcounter) + parentStoragePath, err := itemPaths.StoragePath.Dir() if err != nil { - el.AddRecoverable(clues.Stack(err).Label(fault.LabelForceNoBackupCreation)) - continue - } - - parentPath, err := itemPath.Dir() - if err != nil { - el.AddRecoverable(clues.Wrap(err, "making directory collection"). + el.AddRecoverable(clues.Wrap(err, "getting storage directory path"). WithClues(ictx). Label(fault.LabelForceNoBackupCreation)) continue } - c, ok := cols[parentPath.ShortRef()] - if !ok { - cols[parentPath.ShortRef()] = &kopiaDataCollection{ - path: parentPath, - snapshotRoot: snapshotRoot, - counter: bcounter, + // Find the location this item is restored to. + rc := dirsToItems[itemPaths.RestorePath.ShortRef()] + if rc == nil { + dirsToItems[itemPaths.RestorePath.ShortRef()] = &restoreCollection{ + restorePath: itemPaths.RestorePath, + storageDirs: map[string]*dirAndItems{}, } - c = cols[parentPath.ShortRef()] + rc = dirsToItems[itemPaths.RestorePath.ShortRef()] } - c.streams = append(c.streams, ds) + // Find the collection this item is sourced from. + di := rc.storageDirs[parentStoragePath.ShortRef()] + if di == nil { + rc.storageDirs[parentStoragePath.ShortRef()] = &dirAndItems{ + dir: parentStoragePath, + } + di = rc.storageDirs[parentStoragePath.ShortRef()] + } + + di.items = append(di.items, itemPaths.StoragePath.Item()) + + loadCount++ + if loadCount%1000 == 0 { + logger.Ctx(ctx).Infow( + "grouping items to load from kopia", + "group_items", loadCount) + } } - // Can't use the maps package to extract the values because we need to convert - // from *kopiaDataCollection to data.RestoreCollection too. - res := make([]data.RestoreCollection, 0, len(cols)) - for _, c := range cols { - res = append(res, c) + // Now that we've grouped everything, go through and load each directory and + // then load the items from the directory. + res, err := loadDirsAndItems(ctx, snapshotRoot, bcounter, dirsToItems, errs) + if err != nil { + return nil, clues.Wrap(err, "loading items") } return res, el.Failure() @@ -512,6 +615,137 @@ func (w Wrapper) FetchPrevSnapshotManifests( } func isErrEntryNotFound(err error) bool { + // Calling Child on a directory may return this. + if errors.Is(err, fs.ErrEntryNotFound) { + return true + } + + // This is returned when walking the hierarchy of a backup. return strings.Contains(err.Error(), "entry not found") && !strings.Contains(err.Error(), "parent is not a directory") } + +func (w Wrapper) RepoMaintenance( + ctx context.Context, + opts repository.Maintenance, +) error { + kopiaSafety, err := translateSafety(opts.Safety) + if err != nil { + return clues.Wrap(err, "identifying safety level") + } + + mode, err := translateMode(opts.Type) + if err != nil { + return clues.Wrap(err, "identifying maintenance mode") + } + + currentOwner := w.c.ClientOptions().UsernameAtHost() + + ctx = clues.Add( + ctx, + "kopia_safety", kopiaSafety, + "kopia_maintenance_mode", mode, + "force", opts.Force, + "current_local_owner", clues.Hide(currentOwner)) + + dr, ok := w.c.Repository.(repo.DirectRepository) + if !ok { + return clues.New("unable to get valid handle to repo").WithClues(ctx) + } + + // Below write session options pulled from kopia's CLI code that runs + // maintenance. + err = repo.DirectWriteSession( + ctx, + dr, + repo.WriteSessionOptions{ + Purpose: "Corso maintenance", + }, + func(ctx context.Context, dw repo.DirectRepositoryWriter) error { + params, err := maintenance.GetParams(ctx, w.c) + if err != nil { + return clues.Wrap(err, "getting maintenance user@host").WithClues(ctx) + } + + // Need to do some fixup here as the user/host may not have been set. + if len(params.Owner) == 0 || (params.Owner != currentOwner && opts.Force) { + observe.Message( + ctx, + "updating maintenance user@host to ", + clues.Hide(currentOwner)) + + if err := w.setMaintenanceParams(ctx, dw, params, currentOwner); err != nil { + return clues.Wrap(err, "updating maintenance parameters"). + WithClues(ctx) + } + } + + ctx = clues.Add(ctx, "expected_owner", clues.Hide(params.Owner)) + + logger.Ctx(ctx).Info("running kopia maintenance") + + err = snapshotmaintenance.Run(ctx, dw, mode, opts.Force, kopiaSafety) + if err != nil { + return clues.Wrap(err, "running kopia maintenance").WithClues(ctx) + } + + return nil + }) + if err != nil { + return err + } + + return nil +} + +func translateSafety( + s repository.MaintenanceSafety, +) (maintenance.SafetyParameters, error) { + switch s { + case repository.FullMaintenanceSafety: + return maintenance.SafetyFull, nil + case repository.NoMaintenanceSafety: + return maintenance.SafetyNone, nil + default: + return maintenance.SafetyParameters{}, clues.New("bad safety value"). + With("input_safety", s.String()) + } +} + +func translateMode(t repository.MaintenanceType) (maintenance.Mode, error) { + switch t { + case repository.CompleteMaintenance: + return maintenance.ModeFull, nil + + case repository.MetadataMaintenance: + return maintenance.ModeQuick, nil + + default: + return maintenance.ModeNone, clues.New("bad maintenance type"). + With("input_maintenance_type", t.String()) + } +} + +// setMaintenanceUserHost sets the user and host for maintenance to the the +// user and host in the kopia config. +func (w Wrapper) setMaintenanceParams( + ctx context.Context, + drw repo.DirectRepositoryWriter, + p *maintenance.Params, + userAtHost string, +) error { + // This will source user/host from the kopia config file or fallback to + // fetching the values from the OS. + p.Owner = userAtHost + // Disable automatic maintenance for now since it can start matching on the + // user/host of at least one machine now. + p.QuickCycle.Enabled = false + p.FullCycle.Enabled = false + + err := maintenance.SetParams(ctx, drw, p) + if err != nil { + return clues.Wrap(err, "setting maintenance user/host") + } + + return nil +} diff --git a/src/internal/kopia/wrapper_test.go b/src/internal/kopia/wrapper_test.go index 684d3fae2..013ffdb2c 100644 --- a/src/internal/kopia/wrapper_test.go +++ b/src/internal/kopia/wrapper_test.go @@ -10,6 +10,7 @@ import ( "github.com/alcionai/clues" "github.com/google/uuid" "github.com/kopia/kopia/repo" + "github.com/kopia/kopia/repo/maintenance" "github.com/kopia/kopia/repo/manifest" "github.com/kopia/kopia/snapshot" "github.com/stretchr/testify/assert" @@ -17,12 +18,14 @@ import ( "github.com/stretchr/testify/suite" "golang.org/x/exp/maps" + pmMock "github.com/alcionai/corso/src/internal/common/prefixmatcher/mock" exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock" "github.com/alcionai/corso/src/internal/connector/onedrive/metadata" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data/mock" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/control/repository" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" @@ -56,14 +59,12 @@ var ( testFileData6 = testFileData ) -//revive:disable:context-as-argument func testForFiles( t *testing.T, - ctx context.Context, + ctx context.Context, //revive:disable-line:context-as-argument expected map[string][]byte, collections []data.RestoreCollection, ) { - //revive:enable:context-as-argument t.Helper() count := 0 @@ -72,7 +73,7 @@ func testForFiles( for s := range c.Items(ctx, fault.New(true)) { count++ - fullPath, err := c.FullPath().Append(s.UUID(), true) + fullPath, err := c.FullPath().AppendItem(s.UUID()) require.NoError(t, err, clues.ToCore(err)) expected, ok := expected[fullPath.String()] @@ -104,6 +105,19 @@ func checkSnapshotTags( assert.Equal(t, expectedTags, man.Tags) } +func toRestorePaths(t *testing.T, paths ...path.Path) []path.RestorePaths { + res := make([]path.RestorePaths, 0, len(paths)) + + for _, p := range paths { + dir, err := p.Dir() + require.NoError(t, err, clues.ToCore(err)) + + res = append(res, path.RestorePaths{StoragePath: p, RestorePath: dir}) + } + + return res +} + // --------------- // unit tests // --------------- @@ -143,7 +157,127 @@ func (suite *KopiaUnitSuite) TestCloseWithoutInitDoesNotPanic() { } // --------------- -// integration tests that use kopia +// integration tests that use kopia. +// --------------- +type BasicKopiaIntegrationSuite struct { + tester.Suite +} + +func TestBasicKopiaIntegrationSuite(t *testing.T) { + suite.Run(t, &BasicKopiaIntegrationSuite{ + Suite: tester.NewIntegrationSuite( + t, + [][]string{tester.AWSStorageCredEnvs}, + ), + }) +} + +// TestMaintenance checks that different username/hostname pairs will or won't +// cause maintenance to run. It treats kopia maintenance as a black box and +// only checks the returned error. +func (suite *BasicKopiaIntegrationSuite) TestMaintenance_FirstRun_NoChanges() { + ctx, flush := tester.NewContext() + defer flush() + + t := suite.T() + + k, err := openKopiaRepo(t, ctx) + require.NoError(t, err, clues.ToCore(err)) + + w := &Wrapper{k} + + opts := repository.Maintenance{ + Safety: repository.FullMaintenanceSafety, + Type: repository.MetadataMaintenance, + } + + err = w.RepoMaintenance(ctx, opts) + require.NoError(t, err, clues.ToCore(err)) +} + +func (suite *BasicKopiaIntegrationSuite) TestMaintenance_WrongUser_NoForce_Fails() { + ctx, flush := tester.NewContext() + defer flush() + + t := suite.T() + + k, err := openKopiaRepo(t, ctx) + require.NoError(t, err, clues.ToCore(err)) + + w := &Wrapper{k} + + mOpts := repository.Maintenance{ + Safety: repository.FullMaintenanceSafety, + Type: repository.MetadataMaintenance, + } + + // This will set the user. + err = w.RepoMaintenance(ctx, mOpts) + require.NoError(t, err, clues.ToCore(err)) + + err = k.Close(ctx) + require.NoError(t, err, clues.ToCore(err)) + + opts := repository.Options{ + User: "foo", + Host: "bar", + } + + err = k.Connect(ctx, opts) + require.NoError(t, err, clues.ToCore(err)) + + var notOwnedErr maintenance.NotOwnedError + + err = w.RepoMaintenance(ctx, mOpts) + assert.ErrorAs(t, err, ¬OwnedErr, clues.ToCore(err)) +} + +func (suite *BasicKopiaIntegrationSuite) TestMaintenance_WrongUser_Force_Succeeds() { + ctx, flush := tester.NewContext() + defer flush() + + t := suite.T() + + k, err := openKopiaRepo(t, ctx) + require.NoError(t, err, clues.ToCore(err)) + + w := &Wrapper{k} + + mOpts := repository.Maintenance{ + Safety: repository.FullMaintenanceSafety, + Type: repository.MetadataMaintenance, + } + + // This will set the user. + err = w.RepoMaintenance(ctx, mOpts) + require.NoError(t, err, clues.ToCore(err)) + + err = k.Close(ctx) + require.NoError(t, err, clues.ToCore(err)) + + opts := repository.Options{ + User: "foo", + Host: "bar", + } + + err = k.Connect(ctx, opts) + require.NoError(t, err, clues.ToCore(err)) + + mOpts.Force = true + + // This will set the user. + err = w.RepoMaintenance(ctx, mOpts) + require.NoError(t, err, clues.ToCore(err)) + + mOpts.Force = false + + // Running without force should succeed now. + err = w.RepoMaintenance(ctx, mOpts) + require.NoError(t, err, clues.ToCore(err)) +} + +// --------------- +// integration tests that use kopia and initialize a repo // --------------- type KopiaIntegrationSuite struct { tester.Suite @@ -555,10 +689,10 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() { dc1 := exchMock.NewCollection(suite.storePath1, suite.locPath1, 1) dc2 := exchMock.NewCollection(suite.storePath2, suite.locPath2, 1) - fp1, err := suite.storePath1.Append(dc1.Names[0], true) + fp1, err := suite.storePath1.AppendItem(dc1.Names[0]) require.NoError(t, err, clues.ToCore(err)) - fp2, err := suite.storePath2.Append(dc2.Names[0], true) + fp2, err := suite.storePath2.AppendItem(dc2.Names[0]) require.NoError(t, err, clues.ToCore(err)) stats, _, _, err := w.ConsumeBackupCollections( @@ -582,10 +716,7 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() { result, err := w.ProduceRestoreCollections( ctx, string(stats.SnapshotID), - []path.Path{ - fp1, - fp2, - }, + toRestorePaths(t, fp1, fp2), nil, fault.New(true)) require.NoError(t, err, clues.ToCore(err)) @@ -620,7 +751,7 @@ func (c mockBackupCollection) FullPath() path.Path { } func (c mockBackupCollection) PreviousPath() path.Path { - return nil + return c.path } func (c mockBackupCollection) LocationPath() *path.Builder { @@ -707,7 +838,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() { // 5 file and 2 folder entries. assert.Len(t, deets.Details().Entries, 5+2) - failedPath, err := suite.storePath2.Append(testFileName4, true) + failedPath, err := suite.storePath2.AppendItem(testFileName4) require.NoError(t, err, clues.ToCore(err)) ic := i64counter{} @@ -715,7 +846,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() { _, err = suite.w.ProduceRestoreCollections( suite.ctx, string(stats.SnapshotID), - []path.Path{failedPath}, + toRestorePaths(t, failedPath), &ic, fault.New(true)) // Files that had an error shouldn't make a dir entry in kopia. If they do we @@ -856,7 +987,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupSuite() { } for _, item := range filesInfo { - pth, err := item.parentPath.Append(item.name, true) + pth, err := item.parentPath.AppendItem(item.name) require.NoError(suite.T(), err, clues.ToCore(err)) mapKey := item.parentPath.String() @@ -875,8 +1006,13 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() { t := suite.T() expectedDirs := 6 expectedFiles := len(suite.filesByPath) + + ls := logger.Settings{ + Level: logger.LLDebug, + Format: logger.LFText, + } //nolint:forbidigo - suite.ctx, _ = logger.SeedLevel(context.Background(), logger.Development) + suite.ctx, _ = logger.CtxOrSeed(context.Background(), ls) c, err := openKopiaRepo(t, suite.ctx) require.NoError(t, err, clues.ToCore(err)) @@ -1034,6 +1170,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() { suite.testPath1, 1) c.ColState = data.NotMovedState + c.PrevPath = suite.testPath1 return []data.BackupCollection{c} }, @@ -1055,14 +1192,14 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() { prefix = itemPath.ToBuilder().Dir().Dir().String() } - var excluded map[string]map[string]struct{} + excluded := pmMock.NewPrefixMap(nil) if test.excludeItem { - excluded = map[string]map[string]struct{}{ + excluded = pmMock.NewPrefixMap(map[string]map[string]struct{}{ // Add a prefix if needed. prefix: { itemPath.Item(): {}, }, - } + }) } stats, _, _, err := suite.w.ConsumeBackupCollections( @@ -1095,9 +1232,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() { _, err = suite.w.ProduceRestoreCollections( suite.ctx, string(stats.SnapshotID), - []path.Path{ - suite.files[suite.testPath1.String()][0].itemPath, - }, + toRestorePaths(t, suite.files[suite.testPath1.String()][0].itemPath), &ic, fault.New(true)) test.restoreCheck(t, err, clues.ToCore(err)) @@ -1198,7 +1333,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections() { result, err := suite.w.ProduceRestoreCollections( suite.ctx, string(suite.snapshotID), - test.inputPaths, + toRestorePaths(t, test.inputPaths...), &ic, fault.New(true)) test.expectedErr(t, err, clues.ToCore(err)) @@ -1214,14 +1349,201 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections() { } } +// TestProduceRestoreCollections_PathChanges tests that having different +// Restore and Storage paths works properly. Having the same Restore and Storage +// paths is tested by TestProduceRestoreCollections. +func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections_PathChanges() { + rp1, err := path.Build( + testTenant, + testUser, + path.ExchangeService, + path.EmailCategory, + false, + "corso_restore", "Inbox") + require.NoError(suite.T(), err) + + rp2, err := path.Build( + testTenant, + testUser, + path.ExchangeService, + path.EmailCategory, + false, + "corso_restore", "Archive") + require.NoError(suite.T(), err) + + // Expected items is generated during the test by looking up paths in the + // suite's map of files. + table := []struct { + name string + inputPaths []path.RestorePaths + expectedCollections int + }{ + { + name: "SingleItem", + inputPaths: []path.RestorePaths{ + { + StoragePath: suite.files[suite.testPath1.String()][0].itemPath, + RestorePath: rp1, + }, + }, + expectedCollections: 1, + }, + { + name: "MultipleItemsSameCollection", + inputPaths: []path.RestorePaths{ + { + StoragePath: suite.files[suite.testPath1.String()][0].itemPath, + RestorePath: rp1, + }, + { + StoragePath: suite.files[suite.testPath1.String()][1].itemPath, + RestorePath: rp1, + }, + }, + expectedCollections: 1, + }, + { + name: "MultipleItemsDifferentCollections", + inputPaths: []path.RestorePaths{ + { + StoragePath: suite.files[suite.testPath1.String()][0].itemPath, + RestorePath: rp1, + }, + { + StoragePath: suite.files[suite.testPath2.String()][0].itemPath, + RestorePath: rp2, + }, + }, + expectedCollections: 2, + }, + { + name: "Multiple Items From Different Collections To Same Collection", + inputPaths: []path.RestorePaths{ + { + StoragePath: suite.files[suite.testPath1.String()][0].itemPath, + RestorePath: rp1, + }, + { + StoragePath: suite.files[suite.testPath2.String()][0].itemPath, + RestorePath: rp1, + }, + }, + expectedCollections: 1, + }, + } + + for _, test := range table { + suite.Run(test.name, func() { + ctx, flush := tester.NewContext() + defer flush() + + t := suite.T() + expected := make(map[string][]byte, len(test.inputPaths)) + + for _, pth := range test.inputPaths { + item, ok := suite.filesByPath[pth.StoragePath.String()] + require.True(t, ok, "getting expected file data") + + itemPath, err := pth.RestorePath.AppendItem(pth.StoragePath.Item()) + require.NoError(t, err, "getting expected item path") + + expected[itemPath.String()] = item.data + } + + ic := i64counter{} + + result, err := suite.w.ProduceRestoreCollections( + suite.ctx, + string(suite.snapshotID), + test.inputPaths, + &ic, + fault.New(true)) + require.NoError(t, err, clues.ToCore(err)) + + assert.Len(t, result, test.expectedCollections) + assert.Less(t, int64(0), ic.i) + testForFiles(t, ctx, expected, result) + }) + } +} + +// TestProduceRestoreCollections_Fetch tests that the Fetch function still works +// properly even with different Restore and Storage paths and items from +// different kopia directories. +func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections_Fetch() { + ctx, flush := tester.NewContext() + defer flush() + + t := suite.T() + + rp1, err := path.Build( + testTenant, + testUser, + path.ExchangeService, + path.EmailCategory, + false, + "corso_restore", "Inbox") + require.NoError(suite.T(), err) + + inputPaths := []path.RestorePaths{ + { + StoragePath: suite.files[suite.testPath1.String()][0].itemPath, + RestorePath: rp1, + }, + { + StoragePath: suite.files[suite.testPath2.String()][0].itemPath, + RestorePath: rp1, + }, + } + + // Really only interested in getting the collection so we can call fetch on + // it. + ic := i64counter{} + + result, err := suite.w.ProduceRestoreCollections( + suite.ctx, + string(suite.snapshotID), + inputPaths, + &ic, + fault.New(true)) + require.NoError(t, err, "getting collection", clues.ToCore(err)) + require.Len(t, result, 1) + + // Item from first kopia directory. + f := suite.files[suite.testPath1.String()][0] + + item, err := result[0].Fetch(ctx, f.itemPath.Item()) + require.NoError(t, err, "fetching file", clues.ToCore(err)) + + r := item.ToReader() + + buf, err := io.ReadAll(r) + require.NoError(t, err, "reading file data", clues.ToCore(err)) + + assert.Equal(t, f.data, buf) + + // Item from second kopia directory. + f = suite.files[suite.testPath2.String()][0] + + item, err = result[0].Fetch(ctx, f.itemPath.Item()) + require.NoError(t, err, "fetching file", clues.ToCore(err)) + + r = item.ToReader() + + buf, err = io.ReadAll(r) + require.NoError(t, err, "reading file data", clues.ToCore(err)) + + assert.Equal(t, f.data, buf) +} + func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections_Errors() { - itemPath, err := suite.testPath1.Append(testFileName, true) + itemPath, err := suite.testPath1.AppendItem(testFileName) require.NoError(suite.T(), err, clues.ToCore(err)) table := []struct { name string snapshotID string - paths []path.Path + paths []path.RestorePaths }{ { "NilPaths", @@ -1231,12 +1553,12 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections_Erro { "EmptyPaths", string(suite.snapshotID), - []path.Path{}, + []path.RestorePaths{}, }, { "NoSnapshot", "foo", - []path.Path{itemPath}, + toRestorePaths(suite.T(), itemPath), }, } @@ -1269,7 +1591,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestDeleteSnapshot() { c, err := suite.w.ProduceRestoreCollections( suite.ctx, string(suite.snapshotID), - []path.Path{itemPath}, + toRestorePaths(t, itemPath), &ic, fault.New(true)) assert.Error(t, err, "snapshot should be deleted", clues.ToCore(err)) diff --git a/src/internal/model/model.go b/src/internal/model/model.go index 41b118a73..b33762545 100644 --- a/src/internal/model/model.go +++ b/src/internal/model/model.go @@ -59,9 +59,9 @@ type BaseModel struct { // to refer to this one. This field may change if the model is updated. This // field should be treated as read-only by users. ModelStoreID manifest.ID `json:"-"` - // Version is a version number that can help track changes across models. + // ModelVersion is a version number that can help track changes across models. // TODO(ashmrtn): Reference version control documentation. - Version int `json:"-"` + ModelVersion int `json:"-"` // Tags associated with this model in the store to facilitate lookup. Tags in // the struct are not serialized directly into the stored model, but are part // of the metadata for the model. diff --git a/src/internal/operations/backup.go b/src/internal/operations/backup.go index ac185d3d0..033fca8bb 100644 --- a/src/internal/operations/backup.go +++ b/src/internal/operations/backup.go @@ -7,8 +7,10 @@ import ( "github.com/alcionai/clues" "github.com/google/uuid" - "github.com/alcionai/corso/src/internal/common" "github.com/alcionai/corso/src/internal/common/crash" + "github.com/alcionai/corso/src/internal/common/dttm" + "github.com/alcionai/corso/src/internal/common/idname" + "github.com/alcionai/corso/src/internal/common/prefixmatcher" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/diagnostics" "github.com/alcionai/corso/src/internal/events" @@ -18,6 +20,7 @@ import ( "github.com/alcionai/corso/src/internal/operations/inject" "github.com/alcionai/corso/src/internal/stats" "github.com/alcionai/corso/src/internal/streamstore" + "github.com/alcionai/corso/src/internal/version" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" @@ -33,12 +36,19 @@ import ( type BackupOperation struct { operation - ResourceOwner common.IDNamer + ResourceOwner idname.Provider Results BackupResults `json:"results"` Selectors selectors.Selector `json:"selectors"` Version string `json:"version"` + // backupVersion ONLY controls the value that gets persisted to the + // backup model after operation. It does NOT modify the operation behavior + // to match the version. Its inclusion here is, unfortunately, purely to + // facilitate integration testing that requires a certain backup version, and + // should be removed when we have a more controlled workaround. + backupVersion int + account account.Account bp inject.BackupProducer @@ -62,7 +72,7 @@ func NewBackupOperation( bp inject.BackupProducer, acct account.Account, selector selectors.Selector, - owner common.IDNamer, + owner idname.Provider, bus events.Eventer, ) (BackupOperation, error) { op := BackupOperation{ @@ -70,6 +80,7 @@ func NewBackupOperation( ResourceOwner: owner, Selectors: selector, Version: "v0", + backupVersion: version.Backup, account: acct, incremental: useIncrementalBackup(selector, opts), bp: bp, @@ -115,7 +126,7 @@ type backupStats struct { // Run begins a synchronous backup operation. func (op *BackupOperation) Run(ctx context.Context) (err error) { defer func() { - if crErr := crash.Recovery(ctx, recover()); crErr != nil { + if crErr := crash.Recovery(ctx, recover(), "backup"); crErr != nil { err = crErr } }() @@ -159,6 +170,22 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) { events.BackupID: op.Results.BackupID, }) + defer func() { + op.bus.Event( + ctx, + events.BackupEnd, + map[string]any{ + events.BackupID: op.Results.BackupID, + events.DataStored: op.Results.BytesUploaded, + events.Duration: op.Results.CompletedAt.Sub(op.Results.StartedAt), + events.EndTime: dttm.Format(op.Results.CompletedAt), + events.Resources: op.Results.ResourceOwners, + events.Service: op.Selectors.PathService().String(), + events.StartTime: dttm.Format(op.Results.StartedAt), + events.Status: op.Status.String(), + }) + }() + // ----- // Execution // ----- @@ -172,9 +199,7 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) { op.Results.BackupID) if err != nil { // No return here! We continue down to persistResults, even in case of failure. - logger.Ctx(ctx). - With("err", err). - Errorw("running backup", clues.InErr(err).Slice()...) + logger.CtxErr(ctx, err).Error("running backup") op.Errors.Fail(clues.Wrap(err, "running backup")) } @@ -210,6 +235,7 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) { sstore, opStats.k.SnapshotID, op.Results.BackupID, + op.backupVersion, deets.Details()) if err != nil { op.Errors.Fail(clues.Wrap(err, "persisting backup")) @@ -230,11 +256,15 @@ func (op *BackupOperation) do( backupID model.StableID, ) (*details.Builder, error) { var ( - reasons = selectorToReasons(op.Selectors, false) - fallbackReasons = makeFallbackReasons(op.Selectors) + reasons = selectorToReasons(op.Selectors, false) + fallbackReasons = makeFallbackReasons(op.Selectors) + lastBackupVersion = version.NoBackup ) - logger.Ctx(ctx).With("selectors", op.Selectors).Info("backing up selection") + logger.Ctx(ctx).With( + "control_options", op.Options, + "selectors", op.Selectors). + Info("backing up selection") // should always be 1, since backups are 1:1 with resourceOwners. opStats.resourceCount = 1 @@ -250,12 +280,20 @@ func (op *BackupOperation) do( return nil, clues.Wrap(err, "producing manifests and metadata") } - cs, excludes, err := produceBackupDataCollections( + if canUseMetaData { + _, lastBackupVersion, err = lastCompleteBackups(ctx, op.store, mans) + if err != nil { + return nil, clues.Wrap(err, "retrieving prior backups") + } + } + + cs, ssmb, err := produceBackupDataCollections( ctx, op.bp, op.ResourceOwner, op.Selectors, mdColls, + lastBackupVersion, op.Options, op.Errors) if err != nil { @@ -271,7 +309,7 @@ func (op *BackupOperation) do( reasons, mans, cs, - excludes, + ssmb, backupID, op.incremental && canUseMetaData, op.Errors) @@ -312,14 +350,7 @@ func makeFallbackReasons(sel selectors.Selector) []kopia.Reason { // checker to see if conditions are correct for incremental backup behavior such as // retrieving metadata like delta tokens and previous paths. func useIncrementalBackup(sel selectors.Selector, opts control.Options) bool { - enabled := !opts.ToggleFeatures.DisableIncrementals - - if sel.Service == selectors.ServiceExchange || - sel.Service == selectors.ServiceOneDrive { - return enabled - } - - return false + return !opts.ToggleFeatures.DisableIncrementals } // --------------------------------------------------------------------------- @@ -330,12 +361,13 @@ func useIncrementalBackup(sel selectors.Selector, opts control.Options) bool { func produceBackupDataCollections( ctx context.Context, bp inject.BackupProducer, - resourceOwner common.IDNamer, + resourceOwner idname.Provider, sel selectors.Selector, metadata []data.RestoreCollection, + lastBackupVersion int, ctrlOpts control.Options, errs *fault.Bus, -) ([]data.BackupCollection, map[string]map[string]struct{}, error) { +) ([]data.BackupCollection, prefixmatcher.StringSetReader, error) { complete, closer := observe.MessageWithCompletion(ctx, "Discovering items to backup") defer func() { complete <- struct{}{} @@ -343,7 +375,7 @@ func produceBackupDataCollections( closer() }() - return bp.ProduceBackupCollections(ctx, resourceOwner, sel, metadata, ctrlOpts, errs) + return bp.ProduceBackupCollections(ctx, resourceOwner, sel, metadata, lastBackupVersion, ctrlOpts, errs) } // --------------------------------------------------------------------------- @@ -407,7 +439,7 @@ func consumeBackupCollections( reasons []kopia.Reason, mans []*kopia.ManifestEntry, cs []data.BackupCollection, - excludes map[string]map[string]struct{}, + pmr prefixmatcher.StringSetReader, backupID model.StableID, isIncremental bool, errs *fault.Bus, @@ -480,7 +512,7 @@ func consumeBackupCollections( ctx, bases, cs, - excludes, + pmr, tags, isIncremental, errs) @@ -494,11 +526,16 @@ func consumeBackupCollections( "kopia_ignored_errors", kopiaStats.IgnoredErrorCount) } - if kopiaStats.ErrorCount > 0 || - (kopiaStats.IgnoredErrorCount > kopiaStats.ExpectedIgnoredErrorCount) { - err = clues.New("building kopia snapshot").With( - "kopia_errors", kopiaStats.ErrorCount, - "kopia_ignored_errors", kopiaStats.IgnoredErrorCount) + ctx = clues.Add( + ctx, + "kopia_errors", kopiaStats.ErrorCount, + "kopia_ignored_errors", kopiaStats.IgnoredErrorCount, + "kopia_expected_ignored_errors", kopiaStats.ExpectedIgnoredErrorCount) + + if kopiaStats.ErrorCount > 0 { + err = clues.New("building kopia snapshot").WithClues(ctx) + } else if kopiaStats.IgnoredErrorCount > kopiaStats.ExpectedIgnoredErrorCount { + err = clues.New("downloading items for persistence").WithClues(ctx) } return kopiaStats, deets, itemsSourcedFromBase, err @@ -523,7 +560,7 @@ func matchesReason(reasons []kopia.Reason, p path.Path) bool { // 4. any errors encountered func getNewPathRefs( dataFromBackup kopia.DetailsMergeInfoer, - entry *details.DetailsEntry, + entry *details.Entry, repoRef path.Path, backupVersion int, ) (path.Path, *path.Builder, bool, error) { @@ -582,6 +619,56 @@ func getNewPathRefs( return newPath, newLoc, updated, nil } +func lastCompleteBackups( + ctx context.Context, + ms *store.Wrapper, + mans []*kopia.ManifestEntry, +) (map[string]*backup.Backup, int, error) { + var ( + oldestVersion = version.NoBackup + result = map[string]*backup.Backup{} + ) + + if len(mans) == 0 { + return result, -1, nil + } + + for _, man := range mans { + // For now skip snapshots that aren't complete. We will need to revisit this + // when we tackle restartability. + if len(man.IncompleteReason) > 0 { + continue + } + + var ( + mctx = clues.Add(ctx, "base_manifest_id", man.ID) + reasons = man.Reasons + ) + + bID, ok := man.GetTag(kopia.TagBackupID) + if !ok { + return result, oldestVersion, clues.New("no backup ID in snapshot manifest").WithClues(mctx) + } + + mctx = clues.Add(mctx, "base_manifest_backup_id", bID) + + bup, err := getBackupFromID(mctx, model.StableID(bID), ms) + if err != nil { + return result, oldestVersion, err + } + + for _, r := range reasons { + result[r.Key()] = bup + } + + if oldestVersion == -1 || bup.Version < oldestVersion { + oldestVersion = bup.Version + } + } + + return result, oldestVersion, nil +} + func mergeDetails( ctx context.Context, ms *store.Wrapper, @@ -624,7 +711,7 @@ func mergeDetails( detailsStore, errs) if err != nil { - return clues.New("fetching base details for backup").WithClues(mctx) + return clues.New("fetching base details for backup") } for _, entry := range baseDeets.Items() { @@ -690,12 +777,14 @@ func mergeDetails( "base_item_count_added", manifestAddedEntries) } - if addedEntries != dataFromBackup.ItemsToMerge() { + checkCount := dataFromBackup.ItemsToMerge() + + if addedEntries != checkCount { return clues.New("incomplete migration of backup details"). WithClues(ctx). With( "item_count", addedEntries, - "expected_item_count", dataFromBackup.ItemsToMerge()) + "expected_item_count", checkCount) } return nil @@ -746,6 +835,7 @@ func (op *BackupOperation) createBackupModels( sscw streamstore.CollectorWriter, snapID string, backupID model.StableID, + backupVersion int, deets *details.Details, ) error { ctx = clues.Add(ctx, "snapshot_id", snapID, "backup_id", backupID) @@ -780,6 +870,7 @@ func (op *BackupOperation) createBackupModels( b := backup.New( snapID, ssid, op.Status.String(), + backupVersion, backupID, op.Selectors, op.ResourceOwner.ID(), @@ -794,19 +885,5 @@ func (op *BackupOperation) createBackupModels( return clues.Wrap(err, "creating backup model").WithClues(ctx) } - op.bus.Event( - ctx, - events.BackupEnd, - map[string]any{ - events.BackupID: b.ID, - events.DataStored: op.Results.BytesUploaded, - events.Duration: op.Results.CompletedAt.Sub(op.Results.StartedAt), - events.EndTime: common.FormatTime(op.Results.CompletedAt), - events.Resources: op.Results.ResourceOwners, - events.Service: op.Selectors.PathService().String(), - events.StartTime: common.FormatTime(op.Results.StartedAt), - events.Status: op.Status.String(), - }) - return nil } diff --git a/src/internal/operations/backup_integration_test.go b/src/internal/operations/backup_integration_test.go index e322bfebe..a658a9267 100644 --- a/src/internal/operations/backup_integration_test.go +++ b/src/internal/operations/backup_integration_test.go @@ -3,12 +3,13 @@ package operations import ( "context" "fmt" + "strings" "testing" "time" "github.com/alcionai/clues" "github.com/google/uuid" - "github.com/microsoftgraph/msgraph-sdk-go/drive" + "github.com/microsoftgraph/msgraph-sdk-go/drives" "github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/microsoftgraph/msgraph-sdk-go/users" "github.com/stretchr/testify/assert" @@ -16,15 +17,18 @@ import ( "github.com/stretchr/testify/suite" "golang.org/x/exp/maps" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" + "github.com/alcionai/corso/src/internal/common/idname" + inMock "github.com/alcionai/corso/src/internal/common/idname/mock" "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/connector" "github.com/alcionai/corso/src/internal/connector/exchange" - "github.com/alcionai/corso/src/internal/connector/exchange/api" exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/mock" "github.com/alcionai/corso/src/internal/connector/onedrive" + odConsts "github.com/alcionai/corso/src/internal/connector/onedrive/consts" + "github.com/alcionai/corso/src/internal/connector/onedrive/metadata" "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/events" @@ -32,19 +36,26 @@ import ( "github.com/alcionai/corso/src/internal/kopia" "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/internal/operations/inject" + "github.com/alcionai/corso/src/internal/streamstore" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/version" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" + deeTD "github.com/alcionai/corso/src/pkg/backup/details/testdata" "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/control/repository" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" - "github.com/alcionai/corso/src/pkg/selectors/testdata" + selTD "github.com/alcionai/corso/src/pkg/selectors/testdata" + "github.com/alcionai/corso/src/pkg/services/m365/api" "github.com/alcionai/corso/src/pkg/store" ) +// Does not use the tester.DefaultTestRestoreDestination syntax as some of these +// items are created directly, not as a result of restoration, and we want to ensure +// they get clearly selected without accidental overlap. const incrementalsDestContainerPrefix = "incrementals_ci_" // --------------------------------------------------------------------------- @@ -54,23 +65,23 @@ const incrementalsDestContainerPrefix = "incrementals_ci_" // prepNewTestBackupOp generates all clients required to run a backup operation, // returning both a backup operation created with those clients, as well as // the clients themselves. -// -//revive:disable:context-as-argument func prepNewTestBackupOp( t *testing.T, - ctx context.Context, + ctx context.Context, //revive:disable-line:context-as-argument bus events.Eventer, sel selectors.Selector, featureToggles control.Toggles, + backupVersion int, ) ( BackupOperation, account.Account, *kopia.Wrapper, *kopia.ModelStore, + streamstore.Streamer, *connector.GraphConnector, + selectors.Selector, func(), ) { - //revive:enable:context-as-argument var ( acct = tester.NewM365Account(t) // need to initialize the repository before we can test connecting to it. @@ -78,7 +89,7 @@ func prepNewTestBackupOp( k = kopia.NewConn(st) ) - err := k.Initialize(ctx) + err := k.Initialize(ctx, repository.Options{}) require.NoError(t, err, clues.ToCore(err)) // kopiaRef comes with a count of 1 and Wrapper bumps it again so safe @@ -113,35 +124,21 @@ func prepNewTestBackupOp( connectorResource = connector.Sites } - gc, err := connector.NewGraphConnector( - ctx, - acct, - connectorResource, - fault.New(true)) - if !assert.NoError(t, err, clues.ToCore(err)) { - closer() - t.FailNow() - } - - id, name, err := gc.PopulateOwnerIDAndNamesFrom(ctx, sel.DiscreteOwner, nil) - require.NoError(t, err, clues.ToCore(err)) - - sel.SetDiscreteOwnerIDName(id, name) - + gc, sel := GCWithSelector(t, ctx, acct, connectorResource, sel, nil, closer) bo := newTestBackupOp(t, ctx, kw, ms, gc, acct, sel, bus, featureToggles, closer) - return bo, acct, kw, ms, gc, closer + ss := streamstore.NewStreamer(kw, acct.ID(), sel.PathService()) + + return bo, acct, kw, ms, ss, gc, sel, closer } // newTestBackupOp accepts the clients required to compose a backup operation, plus // any other metadata, and uses them to generate a new backup operation. This // allows backup chains to utilize the same temp directory and configuration // details. -// -//revive:disable:context-as-argument func newTestBackupOp( t *testing.T, - ctx context.Context, + ctx context.Context, //revive:disable-line:context-as-argument kw *kopia.Wrapper, ms *kopia.ModelStore, gc *connector.GraphConnector, @@ -151,13 +148,13 @@ func newTestBackupOp( featureToggles control.Toggles, closer func(), ) BackupOperation { - //revive:enable:context-as-argument var ( sw = store.NewKopiaStore(ms) - opts = control.Options{} + opts = control.Defaults() ) opts.ToggleFeatures = featureToggles + gc.IDNameLookup = idname.NewCache(map[string]string{sel.ID(): sel.Name()}) bo, err := NewBackupOperation(ctx, opts, kw, sw, gc, acct, sel, sel, bus) if !assert.NoError(t, err, clues.ToCore(err)) { @@ -168,15 +165,13 @@ func newTestBackupOp( return bo } -//revive:disable:context-as-argument func runAndCheckBackup( t *testing.T, - ctx context.Context, + ctx context.Context, //revive:disable-line:context-as-argument bo *BackupOperation, mb *evmock.Bus, acceptNoData bool, ) { - //revive:enable:context-as-argument err := bo.Run(ctx) require.NoError(t, err, clues.ToCore(err)) require.NotEmpty(t, bo.Results, "the backup had non-zero results") @@ -209,17 +204,15 @@ func runAndCheckBackup( bo.Results.BackupID, "backupID pre-declaration") } -//revive:disable:context-as-argument func checkBackupIsInManifests( t *testing.T, - ctx context.Context, + ctx context.Context, //revive:disable-line:context-as-argument kw *kopia.Wrapper, bo *BackupOperation, sel selectors.Selector, resourceOwner string, categories ...path.CategoryType, ) { - //revive:enable:context-as-argument for _, category := range categories { t.Run(category.String(), func(t *testing.T) { var ( @@ -254,18 +247,16 @@ func checkBackupIsInManifests( } } -//revive:disable:context-as-argument func checkMetadataFilesExist( t *testing.T, - ctx context.Context, + ctx context.Context, //revive:disable-line:context-as-argument backupID model.StableID, kw *kopia.Wrapper, ms *kopia.ModelStore, - tenant, user string, + tenant, resourceOwner string, service path.ServiceType, filesByCat map[path.CategoryType][]string, ) { - //revive:enable:context-as-argument for category, files := range filesByCat { t.Run(category.String(), func(t *testing.T) { bup := &backup.Backup{} @@ -275,13 +266,13 @@ func checkMetadataFilesExist( return } - paths := []path.Path{} + paths := []path.RestorePaths{} pathsByRef := map[string][]string{} for _, fName := range files { p, err := path.Builder{}. Append(fName). - ToServiceCategoryMetadataPath(tenant, user, service, category, true) + ToServiceCategoryMetadataPath(tenant, resourceOwner, service, category, true) if !assert.NoError(t, err, "bad metadata path", clues.ToCore(err)) { continue } @@ -291,11 +282,18 @@ func checkMetadataFilesExist( continue } - paths = append(paths, p) + paths = append( + paths, + path.RestorePaths{StoragePath: p, RestorePath: dir}) pathsByRef[dir.ShortRef()] = append(pathsByRef[dir.ShortRef()], fName) } - cols, err := kw.ProduceRestoreCollections(ctx, bup.SnapshotID, paths, nil, fault.New(true)) + cols, err := kw.ProduceRestoreCollections( + ctx, + bup.SnapshotID, + paths, + nil, + fault.New(true)) assert.NoError(t, err, clues.ToCore(err)) for _, col := range cols { @@ -341,27 +339,25 @@ func checkMetadataFilesExist( // the callback provider can use them, or not, as wanted. type dataBuilderFunc func(id, timeStamp, subject, body string) []byte -//revive:disable:context-as-argument func generateContainerOfItems( t *testing.T, - ctx context.Context, + ctx context.Context, //revive:disable-line:context-as-argument gc *connector.GraphConnector, service path.ServiceType, acct account.Account, cat path.CategoryType, sel selectors.Selector, - tenantID, userID, driveID, destFldr string, + tenantID, resourceOwner, driveID, destFldr string, howManyItems int, backupVersion int, dbf dataBuilderFunc, ) *details.Details { - //revive:enable:context-as-argument t.Helper() items := make([]incrementalItem, 0, howManyItems) for i := 0; i < howManyItems; i++ { - id, d := generateItemData(t, cat, userID, dbf) + id, d := generateItemData(t, cat, resourceOwner, dbf) items = append(items, incrementalItem{ name: id, @@ -370,8 +366,10 @@ func generateContainerOfItems( } pathFolders := []string{destFldr} - if service == path.OneDriveService { - pathFolders = []string{"drives", driveID, "root:", destFldr} + + switch service { + case path.OneDriveService, path.SharePointService: + pathFolders = []string{odConsts.DrivesPathDir, driveID, odConsts.RootPathDir, destFldr} } collections := []incrementalCollection{{ @@ -380,23 +378,26 @@ func generateContainerOfItems( items: items, }} - dest := control.DefaultRestoreDestination(common.SimpleTimeTesting) + dest := control.DefaultRestoreDestination(dttm.SafeForTesting) dest.ContainerName = destFldr dataColls := buildCollections( t, service, - tenantID, userID, + tenantID, resourceOwner, dest, collections) + opts := control.Defaults() + opts.RestorePermissions = true + deets, err := gc.ConsumeRestoreCollections( ctx, backupVersion, acct, sel, dest, - control.Options{RestorePermissions: true}, + opts, dataColls, fault.New(true)) require.NoError(t, err, clues.ToCore(err)) @@ -415,8 +416,8 @@ func generateItemData( dbf dataBuilderFunc, ) (string, []byte) { var ( - now = common.Now() - nowLegacy = common.FormatLegacyTime(time.Now()) + now = dttm.Now() + nowLegacy = dttm.FormatToLegacy(time.Now()) id = uuid.NewString() subject = "incr_test " + now[:16] + " - " + id[:8] body = "incr_test " + category.String() + " generation for " + resourceOwner + " at " + now + " - " + id @@ -473,7 +474,7 @@ func buildCollections( func toDataLayerPath( t *testing.T, service path.ServiceType, - tenant, user string, + tenant, resourceOwner string, category path.CategoryType, elements []string, isItem bool, @@ -488,9 +489,11 @@ func toDataLayerPath( switch service { case path.ExchangeService: - p, err = pb.ToDataLayerExchangePathForCategory(tenant, user, category, isItem) + p, err = pb.ToDataLayerExchangePathForCategory(tenant, resourceOwner, category, isItem) case path.OneDriveService: - p, err = pb.ToDataLayerOneDrivePath(tenant, user, isItem) + p, err = pb.ToDataLayerOneDrivePath(tenant, resourceOwner, isItem) + case path.SharePointService: + p, err = pb.ToDataLayerSharePointPath(tenant, resourceOwner, category, isItem) default: err = clues.New(fmt.Sprintf("unknown service: %s", service)) } @@ -500,29 +503,6 @@ func toDataLayerPath( return p } -func mustGetDefaultDriveID( - t *testing.T, - ctx context.Context, //revive:disable-line:context-as-argument - service graph.Servicer, - userID string, -) string { - d, err := service.Client().UsersById(userID).Drive().Get(ctx, nil) - if err != nil { - err = graph.Wrap( - ctx, - err, - "retrieving default user drive"). - With("user", userID) - } - - require.Nil(t, clues.ToCore(err)) - - id := ptr.Val(d.GetId()) - require.NotEmpty(t, id, "drive ID not set") - - return id -} - // --------------------------------------------------------------------------- // integration tests // --------------------------------------------------------------------------- @@ -546,14 +526,16 @@ func (suite *BackupOpIntegrationSuite) SetupSuite() { } func (suite *BackupOpIntegrationSuite) TestNewBackupOperation() { - kw := &kopia.Wrapper{} - sw := &store.Wrapper{} - gc := &mock.GraphConnector{} - acct := tester.NewM365Account(suite.T()) + var ( + kw = &kopia.Wrapper{} + sw = &store.Wrapper{} + gc = &mock.GraphConnector{} + acct = tester.NewM365Account(suite.T()) + opts = control.Defaults() + ) table := []struct { name string - opts control.Options kw *kopia.Wrapper sw *store.Wrapper bp inject.BackupProducer @@ -561,10 +543,10 @@ func (suite *BackupOpIntegrationSuite) TestNewBackupOperation() { targets []string errCheck assert.ErrorAssertionFunc }{ - {"good", control.Options{}, kw, sw, gc, acct, nil, assert.NoError}, - {"missing kopia", control.Options{}, nil, sw, gc, acct, nil, assert.Error}, - {"missing modelstore", control.Options{}, kw, nil, gc, acct, nil, assert.Error}, - {"missing backup producer", control.Options{}, kw, sw, nil, acct, nil, assert.Error}, + {"good", kw, sw, gc, acct, nil, assert.NoError}, + {"missing kopia", nil, sw, gc, acct, nil, assert.Error}, + {"missing modelstore", kw, nil, gc, acct, nil, assert.Error}, + {"missing backup producer", kw, sw, nil, acct, nil, assert.Error}, } for _, test := range table { suite.Run(test.name, func() { @@ -575,7 +557,7 @@ func (suite *BackupOpIntegrationSuite) TestNewBackupOperation() { _, err := NewBackupOperation( ctx, - test.opts, + opts, test.kw, test.sw, test.bp, @@ -598,50 +580,41 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchange() { ctx, flush := tester.NewContext() defer flush() - owners := []string{suite.user} - tests := []struct { - name string - selector func() *selectors.ExchangeBackup - resourceOwner string - category path.CategoryType - metadataFiles []string - runIncremental bool + name string + selector func() *selectors.ExchangeBackup + category path.CategoryType + metadataFiles []string }{ { name: "Mail", selector: func() *selectors.ExchangeBackup { - sel := selectors.NewExchangeBackup(owners) + sel := selectors.NewExchangeBackup([]string{suite.user}) sel.Include(sel.MailFolders([]string{exchange.DefaultMailFolder}, selectors.PrefixMatch())) sel.DiscreteOwner = suite.user return sel }, - resourceOwner: suite.user, - category: path.EmailCategory, - metadataFiles: exchange.MetadataFileNames(path.EmailCategory), - runIncremental: true, + category: path.EmailCategory, + metadataFiles: exchange.MetadataFileNames(path.EmailCategory), }, { name: "Contacts", selector: func() *selectors.ExchangeBackup { - sel := selectors.NewExchangeBackup(owners) + sel := selectors.NewExchangeBackup([]string{suite.user}) sel.Include(sel.ContactFolders([]string{exchange.DefaultContactFolder}, selectors.PrefixMatch())) return sel }, - resourceOwner: suite.user, - category: path.ContactsCategory, - metadataFiles: exchange.MetadataFileNames(path.ContactsCategory), - runIncremental: true, + category: path.ContactsCategory, + metadataFiles: exchange.MetadataFileNames(path.ContactsCategory), }, { name: "Calendar Events", selector: func() *selectors.ExchangeBackup { - sel := selectors.NewExchangeBackup(owners) + sel := selectors.NewExchangeBackup([]string{suite.user}) sel.Include(sel.EventCalendars([]string{exchange.DefaultCalendar}, selectors.PrefixMatch())) return sel }, - resourceOwner: suite.user, category: path.EventsCategory, metadataFiles: exchange.MetadataFileNames(path.EventsCategory), }, @@ -649,21 +622,24 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchange() { for _, test := range tests { suite.Run(test.name, func() { var ( - t = suite.T() - mb = evmock.NewBus() - sel = test.selector().Selector - ffs = control.Toggles{} + t = suite.T() + mb = evmock.NewBus() + sel = test.selector().Selector + ffs = control.Toggles{} + whatSet = deeTD.CategoryFromRepoRef ) - bo, acct, kw, ms, gc, closer := prepNewTestBackupOp(t, ctx, mb, sel, ffs) + bo, acct, kw, ms, ss, gc, sel, closer := prepNewTestBackupOp(t, ctx, mb, sel, ffs, version.Backup) defer closer() + userID := sel.ID() + m365, err := acct.M365Config() require.NoError(t, err, clues.ToCore(err)) // run the tests runAndCheckBackup(t, ctx, &bo, mb, false) - checkBackupIsInManifests(t, ctx, kw, &bo, sel, test.resourceOwner, test.category) + checkBackupIsInManifests(t, ctx, kw, &bo, sel, userID, test.category) checkMetadataFilesExist( t, ctx, @@ -671,13 +647,21 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchange() { kw, ms, m365.AzureTenantID, - test.resourceOwner, + userID, path.ExchangeService, map[path.CategoryType][]string{test.category: test.metadataFiles}) - if !test.runIncremental { - return - } + _, expectDeets := deeTD.GetDeetsInBackup( + t, + ctx, + bo.Results.BackupID, + acct.ID(), + userID, + path.ExchangeService, + whatSet, + ms, + ss) + deeTD.CheckBackupDetails(t, ctx, bo.Results.BackupID, whatSet, ms, ss, expectDeets, false) // Basic, happy path incremental test. No changes are dictated or expected. // This only tests that an incremental backup is runnable at all, and that it @@ -688,7 +672,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchange() { ) runAndCheckBackup(t, ctx, &incBO, incMB, true) - checkBackupIsInManifests(t, ctx, kw, &incBO, sel, test.resourceOwner, test.category) + checkBackupIsInManifests(t, ctx, kw, &incBO, sel, userID, test.category) checkMetadataFilesExist( t, ctx, @@ -696,9 +680,18 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchange() { kw, ms, m365.AzureTenantID, - test.resourceOwner, + userID, path.ExchangeService, map[path.CategoryType][]string{test.category: test.metadataFiles}) + deeTD.CheckBackupDetails( + t, + ctx, + incBO.Results.BackupID, + whatSet, + ms, + ss, + expectDeets, + false) // do some additional checks to ensure the incremental dealt with fewer items. assert.Greater(t, bo.Results.ItemsWritten, incBO.Results.ItemsWritten, "incremental items written") @@ -717,9 +710,15 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchange() { } } -// TestBackup_Run ensures that Integration Testing works -// for the following scopes: Contacts, Events, and Mail -func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() { +func (suite *BackupOpIntegrationSuite) TestBackup_Run_incrementalExchange() { + testExchangeContinuousBackups(suite, control.Toggles{}) +} + +func (suite *BackupOpIntegrationSuite) TestBackup_Run_nonIncrementalExchange() { + testExchangeContinuousBackups(suite, control.Toggles{DisableDelta: true}) +} + +func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles control.Toggles) { ctx, flush := tester.NewContext() defer flush() @@ -728,10 +727,9 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() { var ( t = suite.T() acct = tester.NewM365Account(t) - ffs = control.Toggles{} mb = evmock.NewBus() - now = common.Now() - owners = []string{suite.user} + now = dttm.Now() + service = path.ExchangeService categories = map[path.CategoryType][]string{ path.EmailCategory: exchange.MetadataFileNames(path.EmailCategory), path.ContactsCategory: exchange.MetadataFileNames(path.ContactsCategory), @@ -742,16 +740,26 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() { container2 = fmt.Sprintf("%s%d_%s", incrementalsDestContainerPrefix, 2, now) container3 = fmt.Sprintf("%s%d_%s", incrementalsDestContainerPrefix, 3, now) containerRename = fmt.Sprintf("%s%d_%s", incrementalsDestContainerPrefix, 4, now) + + // container3 and containerRename don't exist yet. Those will get created + // later on during the tests. Putting their identifiers into the selector + // at this point is harmless. + containers = []string{container1, container2, container3, containerRename} + sel = selectors.NewExchangeBackup([]string{suite.user}) + whatSet = deeTD.CategoryFromRepoRef ) - m365, err := acct.M365Config() - require.NoError(t, err, clues.ToCore(err)) + gc, sels := GCWithSelector(t, ctx, acct, connector.Users, sel.Selector, nil, nil) + sel.DiscreteOwner = sels.ID() + sel.DiscreteOwnerName = sels.Name() - gc, err := connector.NewGraphConnector( - ctx, - acct, - connector.Users, - fault.New(true)) + uidn := inMock.NewProvider(sels.ID(), sels.Name()) + + sel.Include( + sel.MailFolders(containers, selectors.PrefixMatch()), + sel.ContactFolders(containers, selectors.PrefixMatch())) + + m365, err := acct.M365Config() require.NoError(t, err, clues.ToCore(err)) ac, err := api.NewClient(m365) @@ -765,7 +773,8 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() { // container into another generates a delta for both addition and deletion. type contDeets struct { containerID string - deets *details.Details + locRef string + itemRefs []string // cached for populating expected deets, otherwise not used } mailDBF := func(id, timeStamp, subject, body string) []byte { @@ -782,8 +791,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() { given+" "+sur, sur+", "+given, given, mid, sur, - "123-456-7890", - ) + "123-456-7890") } eventDBF := func(id, timeStamp, subject, body string) []byte { @@ -824,64 +832,120 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() { // populate initial test data for category, gen := range dataset { for destName := range gen.dests { + // TODO: the details.Builder returned by restore can contain entries with + // incorrect information. non-representative repo-refs and the like. Until + // that gets fixed, we can't consume that info for testing. deets := generateContainerOfItems( t, ctx, gc, - path.ExchangeService, + service, acct, category, - selectors.NewExchangeRestore(owners).Selector, - m365.AzureTenantID, suite.user, "", destName, + selectors.NewExchangeRestore([]string{uidn.ID()}).Selector, + m365.AzureTenantID, uidn.ID(), "", destName, 2, version.Backup, gen.dbf) - dataset[category].dests[destName] = contDeets{"", deets} + itemRefs := []string{} + + for _, ent := range deets.Entries { + if ent.Exchange == nil || ent.Folder != nil { + continue + } + + if len(ent.ItemRef) > 0 { + itemRefs = append(itemRefs, ent.ItemRef) + } + } + + // save the item ids for building expectedDeets later on + cd := dataset[category].dests[destName] + cd.itemRefs = itemRefs + dataset[category].dests[destName] = cd } } - // verify test data was populated, and track it for comparisons - for category, gen := range dataset { - ss := selectors.Selector{}.SetDiscreteOwnerIDName(suite.user, suite.user) - - qp := graph.QueryParams{ - Category: category, - ResourceOwner: ss, - Credentials: m365, - } - cr, err := exchange.PopulateExchangeContainerResolver(ctx, qp, fault.New(true)) - require.NoError(t, err, "populating container resolver", category, clues.ToCore(err)) - - for destName, dest := range gen.dests { - p, err := path.FromDataLayerPath(dest.deets.Entries[0].RepoRef, true) - require.NoError(t, err, clues.ToCore(err)) - - id, ok := cr.LocationInCache(p.Folder(false)) - require.True(t, ok, "dir %s found in %s cache", p.Folder(false), category) - - d := dataset[category].dests[destName] - d.containerID = id - dataset[category].dests[destName] = d - } - } - - // container3 and containerRename don't exist yet. Those will get created - // later on during the tests. Putting their identifiers into the selector - // at this point is harmless. - containers := []string{container1, container2, container3, containerRename} - sel := selectors.NewExchangeBackup(owners) - sel.Include( - sel.MailFolders(containers, selectors.PrefixMatch()), - sel.ContactFolders(containers, selectors.PrefixMatch()), - ) - - bo, _, kw, ms, gc, closer := prepNewTestBackupOp(t, ctx, mb, sel.Selector, ffs) + bo, acct, kw, ms, ss, gc, sels, closer := prepNewTestBackupOp(t, ctx, mb, sel.Selector, toggles, version.Backup) defer closer() // run the initial backup runAndCheckBackup(t, ctx, &bo, mb, false) + rrPfx, err := path.ServicePrefix(acct.ID(), uidn.ID(), service, path.EmailCategory) + require.NoError(t, err, clues.ToCore(err)) + + // strip the category from the prefix; we primarily want the tenant and resource owner. + expectDeets := deeTD.NewInDeets(rrPfx.ToBuilder().Dir().String()) + bupDeets, _ := deeTD.GetDeetsInBackup(t, ctx, bo.Results.BackupID, acct.ID(), uidn.ID(), service, whatSet, ms, ss) + + // update the datasets with their location refs + for category, gen := range dataset { + for destName, cd := range gen.dests { + var longestLR string + + for _, ent := range bupDeets.Entries { + // generated destinations should always contain items + if ent.Folder != nil { + continue + } + + p, err := path.FromDataLayerPath(ent.RepoRef, false) + require.NoError(t, err, clues.ToCore(err)) + + // category must match, and the owning folder must be this destination + if p.Category() != category || strings.HasSuffix(ent.LocationRef, destName) { + continue + } + + // emails, due to folder nesting and our design for populating data via restore, + // will duplicate the dest folder as both the restore destination, and the "old parent + // folder". we'll get both a prefix/destName and a prefix/destName/destName folder. + // since we want future comparison to only use the leaf dir, we select for the longest match. + if len(ent.LocationRef) > len(longestLR) { + longestLR = ent.LocationRef + } + } + + require.NotEmptyf(t, longestLR, "must find an expected details entry matching the generated folder: %s", destName) + + cd.locRef = longestLR + + dataset[category].dests[destName] = cd + expectDeets.AddLocation(category.String(), cd.locRef) + + for _, i := range dataset[category].dests[destName].itemRefs { + expectDeets.AddItem(category.String(), cd.locRef, i) + } + } + } + + // verify test data was populated, and track it for comparisons + // TODO: this can be swapped out for InDeets checks if we add itemRefs to folder ents. + for category, gen := range dataset { + qp := graph.QueryParams{ + Category: category, + ResourceOwner: uidn, + Credentials: m365, + } + + cr, err := exchange.PopulateExchangeContainerResolver(ctx, qp, fault.New(true)) + require.NoError(t, err, "populating container resolver", category, clues.ToCore(err)) + + for destName, dest := range gen.dests { + id, ok := cr.LocationInCache(dest.locRef) + require.True(t, ok, "dir %s found in %s cache", dest.locRef, category) + + dest.containerID = id + dataset[category].dests[destName] = dest + } + } + + // precheck to ensure the expectedDeets are correct. + // if we fail here, the expectedDeets were populated incorrectly. + deeTD.CheckBackupDetails(t, ctx, bo.Results.BackupID, whatSet, ms, ss, expectDeets, true) + // Although established as a table, these tests are no isolated from each other. // Assume that every test's side effects cascade to all following test cases. // The changes are split across the table so that we can monitor the deltas @@ -889,36 +953,49 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() { table := []struct { name string // performs the incremental update required for the test. - updateUserData func(t *testing.T) - itemsRead int - itemsWritten int + updateUserData func(t *testing.T) + deltaItemsRead int + deltaItemsWritten int + nonDeltaItemsRead int + nonDeltaItemsWritten int }{ { - name: "clean incremental, no changes", - updateUserData: func(t *testing.T) {}, - itemsRead: 0, - itemsWritten: 0, + name: "clean, no changes", + updateUserData: func(t *testing.T) {}, + deltaItemsRead: 0, + deltaItemsWritten: 0, + nonDeltaItemsRead: 8, + nonDeltaItemsWritten: 0, // unchanged items are not counted towards write }, { name: "move an email folder to a subfolder", updateUserData: func(t *testing.T) { + cat := path.EmailCategory + // contacts and events cannot be sufoldered; this is an email-only change - toContainer := dataset[path.EmailCategory].dests[container1].containerID - fromContainer := dataset[path.EmailCategory].dests[container2].containerID + from := dataset[cat].dests[container2] + to := dataset[cat].dests[container1] body := users.NewItemMailFoldersItemMovePostRequestBody() - body.SetDestinationId(&toContainer) + body.SetDestinationId(ptr.To(to.containerID)) _, err := gc.Service. Client(). - UsersById(suite.user). - MailFoldersById(fromContainer). + Users(). + ByUserId(uidn.ID()). + MailFolders(). + ByMailFolderId(from.containerID). Move(). Post(ctx, body, nil) require.NoError(t, err, clues.ToCore(err)) + + newLoc := expectDeets.MoveLocation(cat.String(), from.locRef, to.locRef) + from.locRef = newLoc }, - itemsRead: 0, // zero because we don't count container reads - itemsWritten: 2, + deltaItemsRead: 0, // zero because we don't count container reads + deltaItemsWritten: 2, + nonDeltaItemsRead: 8, + nonDeltaItemsWritten: 2, }, { name: "delete a folder", @@ -928,19 +1005,23 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() { switch category { case path.EmailCategory: - err := ac.Mail().DeleteContainer(ctx, suite.user, containerID) + err := ac.Mail().DeleteContainer(ctx, uidn.ID(), containerID) require.NoError(t, err, "deleting an email folder", clues.ToCore(err)) case path.ContactsCategory: - err := ac.Contacts().DeleteContainer(ctx, suite.user, containerID) + err := ac.Contacts().DeleteContainer(ctx, uidn.ID(), containerID) require.NoError(t, err, "deleting a contacts folder", clues.ToCore(err)) case path.EventsCategory: - err := ac.Events().DeleteContainer(ctx, suite.user, containerID) + err := ac.Events().DeleteContainer(ctx, uidn.ID(), containerID) require.NoError(t, err, "deleting a calendar", clues.ToCore(err)) } + + expectDeets.RemoveLocation(category.String(), d.dests[container2].locRef) } }, - itemsRead: 0, - itemsWritten: 0, // deletions are not counted as "writes" + deltaItemsRead: 0, + deltaItemsWritten: 0, // deletions are not counted as "writes" + nonDeltaItemsRead: 4, + nonDeltaItemsWritten: 0, }, { name: "add a new folder", @@ -950,55 +1031,75 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() { t, ctx, gc, - path.ExchangeService, + service, acct, category, - selectors.NewExchangeRestore(owners).Selector, + selectors.NewExchangeRestore([]string{uidn.ID()}).Selector, m365.AzureTenantID, suite.user, "", container3, 2, version.Backup, gen.dbf) - ss := selectors.Selector{}.SetDiscreteOwnerIDName(suite.user, suite.user) - qp := graph.QueryParams{ Category: category, - ResourceOwner: ss, + ResourceOwner: uidn, Credentials: m365, } + + expectedLocRef := container3 + if category == path.EmailCategory { + expectedLocRef = path.Builder{}.Append(container3, container3).String() + } + cr, err := exchange.PopulateExchangeContainerResolver(ctx, qp, fault.New(true)) require.NoError(t, err, "populating container resolver", category, clues.ToCore(err)) - p, err := path.FromDataLayerPath(deets.Entries[0].RepoRef, true) - require.NoError(t, err, clues.ToCore(err)) + id, ok := cr.LocationInCache(expectedLocRef) + require.Truef(t, ok, "dir %s found in %s cache", expectedLocRef, category) - id, ok := cr.LocationInCache(p.Folder(false)) - require.Truef(t, ok, "dir %s found in %s cache", p.Folder(false), category) + dataset[category].dests[container3] = contDeets{ + containerID: id, + locRef: expectedLocRef, + itemRefs: nil, // not needed at this point + } - dataset[category].dests[container3] = contDeets{id, deets} + for _, ent := range deets.Entries { + if ent.Folder == nil { + expectDeets.AddItem(category.String(), expectedLocRef, ent.ItemRef) + } + } } }, - itemsRead: 4, - itemsWritten: 4, + deltaItemsRead: 4, + deltaItemsWritten: 4, + nonDeltaItemsRead: 8, + nonDeltaItemsWritten: 4, }, { name: "rename a folder", updateUserData: func(t *testing.T) { for category, d := range dataset { + cli := gc.Service.Client().Users().ByUserId(uidn.ID()) containerID := d.dests[container3].containerID - cli := gc.Service.Client().UsersById(suite.user) + newLoc := containerRename - // copy the container info, since both names should - // reference the same container by id. Though the - // details refs won't line up, so those get deleted. - d.dests[containerRename] = contDeets{ - containerID: d.dests[container3].containerID, - deets: nil, + if category == path.EmailCategory { + newLoc = path.Builder{}.Append(container3, containerRename).String() } + d.dests[containerRename] = contDeets{ + containerID: containerID, + locRef: newLoc, + } + + expectDeets.RenameLocation( + category.String(), + d.dests[container3].containerID, + newLoc) + switch category { case path.EmailCategory: - cmf := cli.MailFoldersById(containerID) + cmf := cli.MailFolders().ByMailFolderId(containerID) body, err := cmf.Get(ctx, nil) require.NoError(t, err, "getting mail folder", clues.ToCore(err)) @@ -1008,7 +1109,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() { require.NoError(t, err, "updating mail folder name", clues.ToCore(err)) case path.ContactsCategory: - ccf := cli.ContactFoldersById(containerID) + ccf := cli.ContactFolders().ByContactFolderId(containerID) body, err := ccf.Get(ctx, nil) require.NoError(t, err, "getting contact folder", clues.ToCore(err)) @@ -1018,7 +1119,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() { require.NoError(t, err, "updating contact folder name", clues.ToCore(err)) case path.EventsCategory: - cbi := cli.CalendarsById(containerID) + cbi := cli.Calendars().ByCalendarId(containerID) body, err := cbi.Get(ctx, nil) require.NoError(t, err, "getting calendar", clues.ToCore(err)) @@ -1029,119 +1130,160 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() { } } }, - itemsRead: 0, // containers are not counted as reads - itemsWritten: 4, // two items per category + deltaItemsRead: 0, // containers are not counted as reads + // Renaming a folder doesn't cause kopia changes as the folder ID doesn't + // change. + deltaItemsWritten: 0, // two items per category + nonDeltaItemsRead: 8, + nonDeltaItemsWritten: 0, }, { name: "add a new item", updateUserData: func(t *testing.T) { for category, d := range dataset { containerID := d.dests[container1].containerID - cli := gc.Service.Client().UsersById(suite.user) + cli := gc.Service.Client().Users().ByUserId(uidn.ID()) switch category { case path.EmailCategory: - _, itemData := generateItemData(t, category, suite.user, mailDBF) + _, itemData := generateItemData(t, category, uidn.ID(), mailDBF) body, err := support.CreateMessageFromBytes(itemData) require.NoError(t, err, "transforming mail bytes to messageable", clues.ToCore(err)) - _, err = cli.MailFoldersById(containerID).Messages().Post(ctx, body, nil) + itm, err := cli.MailFolders().ByMailFolderId(containerID).Messages().Post(ctx, body, nil) require.NoError(t, err, "posting email item", clues.ToCore(err)) + expectDeets.AddItem( + category.String(), + d.dests[category.String()].locRef, + ptr.Val(itm.GetId())) + case path.ContactsCategory: - _, itemData := generateItemData(t, category, suite.user, contactDBF) + _, itemData := generateItemData(t, category, uidn.ID(), contactDBF) body, err := support.CreateContactFromBytes(itemData) require.NoError(t, err, "transforming contact bytes to contactable", clues.ToCore(err)) - _, err = cli.ContactFoldersById(containerID).Contacts().Post(ctx, body, nil) + itm, err := cli.ContactFolders().ByContactFolderId(containerID).Contacts().Post(ctx, body, nil) require.NoError(t, err, "posting contact item", clues.ToCore(err)) + expectDeets.AddItem( + category.String(), + d.dests[category.String()].locRef, + ptr.Val(itm.GetId())) + case path.EventsCategory: - _, itemData := generateItemData(t, category, suite.user, eventDBF) + _, itemData := generateItemData(t, category, uidn.ID(), eventDBF) body, err := support.CreateEventFromBytes(itemData) require.NoError(t, err, "transforming event bytes to eventable", clues.ToCore(err)) - _, err = cli.CalendarsById(containerID).Events().Post(ctx, body, nil) + itm, err := cli.Calendars().ByCalendarId(containerID).Events().Post(ctx, body, nil) require.NoError(t, err, "posting events item", clues.ToCore(err)) + + expectDeets.AddItem( + category.String(), + d.dests[category.String()].locRef, + ptr.Val(itm.GetId())) } } }, - itemsRead: 2, - itemsWritten: 2, + deltaItemsRead: 2, + deltaItemsWritten: 2, + nonDeltaItemsRead: 10, + nonDeltaItemsWritten: 2, }, { name: "delete an existing item", updateUserData: func(t *testing.T) { for category, d := range dataset { containerID := d.dests[container1].containerID - cli := gc.Service.Client().UsersById(suite.user) + cli := gc.Service.Client().Users().ByUserId(uidn.ID()) switch category { case path.EmailCategory: - ids, _, _, err := ac.Mail().GetAddedAndRemovedItemIDs(ctx, suite.user, containerID, "", false) + ids, _, _, err := ac.Mail().GetAddedAndRemovedItemIDs(ctx, uidn.ID(), containerID, "", false, true) require.NoError(t, err, "getting message ids", clues.ToCore(err)) require.NotEmpty(t, ids, "message ids in folder") - err = cli.MessagesById(ids[0]).Delete(ctx, nil) + err = cli.Messages().ByMessageId(ids[0]).Delete(ctx, nil) require.NoError(t, err, "deleting email item", clues.ToCore(err)) + expectDeets.RemoveItem( + category.String(), + d.dests[category.String()].locRef, + ids[0]) + case path.ContactsCategory: - ids, _, _, err := ac.Contacts().GetAddedAndRemovedItemIDs(ctx, suite.user, containerID, "", false) + ids, _, _, err := ac.Contacts().GetAddedAndRemovedItemIDs(ctx, uidn.ID(), containerID, "", false, true) require.NoError(t, err, "getting contact ids", clues.ToCore(err)) require.NotEmpty(t, ids, "contact ids in folder") - err = cli.ContactsById(ids[0]).Delete(ctx, nil) + err = cli.Contacts().ByContactId(ids[0]).Delete(ctx, nil) require.NoError(t, err, "deleting contact item", clues.ToCore(err)) + expectDeets.RemoveItem( + category.String(), + d.dests[category.String()].locRef, + ids[0]) + case path.EventsCategory: - ids, _, _, err := ac.Events().GetAddedAndRemovedItemIDs(ctx, suite.user, containerID, "", false) + ids, _, _, err := ac.Events().GetAddedAndRemovedItemIDs(ctx, uidn.ID(), containerID, "", false, true) require.NoError(t, err, "getting event ids", clues.ToCore(err)) require.NotEmpty(t, ids, "event ids in folder") - err = cli.CalendarsById(ids[0]).Delete(ctx, nil) + err = cli.Calendars().ByCalendarId(ids[0]).Delete(ctx, nil) require.NoError(t, err, "deleting calendar", clues.ToCore(err)) + + expectDeets.RemoveItem( + category.String(), + d.dests[category.String()].locRef, + ids[0]) } } }, - itemsRead: 2, - itemsWritten: 0, // deletes are not counted as "writes" + deltaItemsRead: 2, + deltaItemsWritten: 0, // deletes are not counted as "writes" + nonDeltaItemsRead: 8, + nonDeltaItemsWritten: 0, }, } + for _, test := range table { suite.Run(test.name, func() { var ( t = suite.T() incMB = evmock.NewBus() - incBO = newTestBackupOp(t, ctx, kw, ms, gc, acct, sel.Selector, incMB, ffs, closer) + incBO = newTestBackupOp(t, ctx, kw, ms, gc, acct, sels, incMB, toggles, closer) + atid = m365.AzureTenantID ) test.updateUserData(t) err := incBO.Run(ctx) require.NoError(t, err, clues.ToCore(err)) - checkBackupIsInManifests(t, ctx, kw, &incBO, sel.Selector, suite.user, maps.Keys(categories)...) - checkMetadataFilesExist( - t, - ctx, - incBO.Results.BackupID, - kw, - ms, - m365.AzureTenantID, - suite.user, - path.ExchangeService, - categories) + + bupID := incBO.Results.BackupID + + checkBackupIsInManifests(t, ctx, kw, &incBO, sels, uidn.ID(), maps.Keys(categories)...) + checkMetadataFilesExist(t, ctx, bupID, kw, ms, atid, uidn.ID(), service, categories) + deeTD.CheckBackupDetails(t, ctx, bupID, whatSet, ms, ss, expectDeets, true) // do some additional checks to ensure the incremental dealt with fewer items. // +4 on read/writes to account for metadata: 1 delta and 1 path for each type. - assert.Equal(t, test.itemsWritten+4, incBO.Results.ItemsWritten, "incremental items written") - assert.Equal(t, test.itemsRead+4, incBO.Results.ItemsRead, "incremental items read") + if !toggles.DisableDelta { + assert.Equal(t, test.deltaItemsRead+4, incBO.Results.ItemsRead, "incremental items read") + assert.Equal(t, test.deltaItemsWritten+4, incBO.Results.ItemsWritten, "incremental items written") + } else { + assert.Equal(t, test.nonDeltaItemsRead+4, incBO.Results.ItemsRead, "non delta items read") + assert.Equal(t, test.nonDeltaItemsWritten+4, incBO.Results.ItemsWritten, "non delta items written") + } + assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(incBO.Errors.Failure())) assert.Empty(t, incBO.Errors.Recovered(), "incremental recoverable/iteration errors") assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events") assert.Equal(t, 1, incMB.TimesCalled[events.BackupEnd], "incremental backup-end events") assert.Equal(t, incMB.CalledWith[events.BackupStart][0][events.BackupID], - incBO.Results.BackupID, "incremental backupID pre-declaration") + bupID, "incremental backupID pre-declaration") }) } } @@ -1155,22 +1297,116 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDrive() { defer flush() var ( - t = suite.T() - mb = evmock.NewBus() - m365UserID = tester.SecondaryM365UserID(t) - sel = selectors.NewOneDriveBackup([]string{m365UserID}) + t = suite.T() + tenID = tester.M365TenantID(t) + mb = evmock.NewBus() + userID = tester.SecondaryM365UserID(t) + osel = selectors.NewOneDriveBackup([]string{userID}) + ws = deeTD.DriveIDFromRepoRef + svc = path.OneDriveService ) - sel.Include(sel.AllData()) + osel.Include(selTD.OneDriveBackupFolderScope(osel)) - bo, _, _, _, _, closer := prepNewTestBackupOp(t, ctx, mb, sel.Selector, control.Toggles{}) + bo, _, _, ms, ss, _, sel, closer := prepNewTestBackupOp(t, ctx, mb, osel.Selector, control.Toggles{}, version.Backup) defer closer() runAndCheckBackup(t, ctx, &bo, mb, false) + + bID := bo.Results.BackupID + + _, expectDeets := deeTD.GetDeetsInBackup(t, ctx, bID, tenID, sel.ID(), svc, ws, ms, ss) + deeTD.CheckBackupDetails(t, ctx, bID, ws, ms, ss, expectDeets, false) } -// TestBackup_Run ensures that Integration Testing works for OneDrive -func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDriveIncrementals() { +func (suite *BackupOpIntegrationSuite) TestBackup_Run_incrementalOneDrive() { + sel := selectors.NewOneDriveRestore([]string{suite.user}) + + ic := func(cs []string) selectors.Selector { + sel.Include(sel.Folders(cs, selectors.PrefixMatch())) + return sel.Selector + } + + gtdi := func( + t *testing.T, + ctx context.Context, + gs graph.Servicer, + ) string { + d, err := api.GetUsersDrive(ctx, gs, suite.user) + if err != nil { + err = graph.Wrap(ctx, err, "retrieving default user drive"). + With("user", suite.user) + } + + require.NoError(t, err, clues.ToCore(err)) + + id := ptr.Val(d.GetId()) + require.NotEmpty(t, id, "drive ID") + + return id + } + + runDriveIncrementalTest( + suite, + suite.user, + suite.user, + connector.Users, + path.OneDriveService, + path.FilesCategory, + ic, + gtdi, + false) +} + +func (suite *BackupOpIntegrationSuite) TestBackup_Run_incrementalSharePoint() { + sel := selectors.NewSharePointRestore([]string{suite.site}) + + ic := func(cs []string) selectors.Selector { + sel.Include(sel.LibraryFolders(cs, selectors.PrefixMatch())) + return sel.Selector + } + + gtdi := func( + t *testing.T, + ctx context.Context, + gs graph.Servicer, + ) string { + d, err := api.GetSitesDefaultDrive(ctx, gs, suite.site) + if err != nil { + err = graph.Wrap(ctx, err, "retrieving default site drive"). + With("site", suite.site) + } + + require.NoError(t, err, clues.ToCore(err)) + + id := ptr.Val(d.GetId()) + require.NotEmpty(t, id, "drive ID") + + return id + } + + runDriveIncrementalTest( + suite, + suite.site, + suite.user, + connector.Sites, + path.SharePointService, + path.LibrariesCategory, + ic, + gtdi, + true) +} + +func runDriveIncrementalTest( + suite *BackupOpIntegrationSuite, + owner, permissionsUser string, + resource connector.Resource, + service path.ServiceType, + category path.CategoryType, + includeContainers func([]string) selectors.Selector, + getTestDriveID func(*testing.T, context.Context, graph.Servicer) string, + skipPermissionsTests bool, +) { ctx, flush := tester.NewContext() defer flush() @@ -1179,38 +1415,53 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDriveIncrementals() { acct = tester.NewM365Account(t) ffs = control.Toggles{} mb = evmock.NewBus() + ws = deeTD.DriveIDFromRepoRef - // `now` has to be formatted with SimpleDateTimeOneDrive as - // some onedrive cannot have `:` in file/folder names - now = common.FormatNow(common.SimpleTimeTesting) - - owners = []string{suite.user} + // `now` has to be formatted with SimpleDateTimeTesting as + // some drives cannot have `:` in file/folder names + now = dttm.FormatNow(dttm.SafeForTesting) categories = map[path.CategoryType][]string{ - path.FilesCategory: {graph.DeltaURLsFileName, graph.PreviousPathFileName}, + category: {graph.DeltaURLsFileName, graph.PreviousPathFileName}, } - container1 = fmt.Sprintf("%s%d_%s", incrementalsDestContainerPrefix, 1, now) - container2 = fmt.Sprintf("%s%d_%s", incrementalsDestContainerPrefix, 2, now) - container3 = fmt.Sprintf("%s%d_%s", incrementalsDestContainerPrefix, 3, now) + container1 = fmt.Sprintf("%s%d_%s", incrementalsDestContainerPrefix, 1, now) + container2 = fmt.Sprintf("%s%d_%s", incrementalsDestContainerPrefix, 2, now) + container3 = fmt.Sprintf("%s%d_%s", incrementalsDestContainerPrefix, 3, now) + containerRename = "renamed_folder" genDests = []string{container1, container2} + + // container3 does not exist yet. It will get created later on + // during the tests. + containers = []string{container1, container2, container3} ) + sel := includeContainers(containers) + creds, err := acct.M365Config() require.NoError(t, err, clues.ToCore(err)) - gc, err := connector.NewGraphConnector( - ctx, - acct, - connector.Users, - fault.New(true)) + gc, sel := GCWithSelector(t, ctx, acct, resource, sel, nil, nil) + + roidn := inMock.NewProvider(sel.ID(), sel.Name()) + + var ( + atid = creds.AzureTenantID + driveID = getTestDriveID(t, ctx, gc.Service) + fileDBF = func(id, timeStamp, subject, body string) []byte { + return []byte(id + subject) + } + makeLocRef = func(flds ...string) string { + elems := append([]string{driveID, "root:"}, flds...) + return path.Builder{}.Append(elems...).String() + } + ) + + rrPfx, err := path.ServicePrefix(atid, roidn.ID(), service, category) require.NoError(t, err, clues.ToCore(err)) - driveID := mustGetDefaultDriveID(t, ctx, gc.Service, suite.user) - - fileDBF := func(id, timeStamp, subject, body string) []byte { - return []byte(id + subject) - } + // strip the category from the prefix; we primarily want the tenant and resource owner. + expectDeets := deeTD.NewInDeets(rrPfx.ToBuilder().Dir().String()) // Populate initial test data. // Generate 2 new folders with two items each. Only the first two @@ -1219,19 +1470,27 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDriveIncrementals() { // through the changes. This should be enough to cover most delta // actions. for _, destName := range genDests { - generateContainerOfItems( + deets := generateContainerOfItems( t, ctx, gc, - path.OneDriveService, + service, acct, - path.FilesCategory, - selectors.NewOneDriveRestore(owners).Selector, - creds.AzureTenantID, suite.user, driveID, destName, + category, + sel, + atid, roidn.ID(), driveID, destName, 2, // Use an old backup version so we don't need metadata files. 0, fileDBF) + + for _, ent := range deets.Entries { + if ent.Folder != nil { + continue + } + + expectDeets.AddItem(driveID, makeLocRef(destName), ent.ItemRef) + } } containerIDs := map[string]string{} @@ -1240,38 +1499,35 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDriveIncrementals() { for _, destName := range genDests { // Use path-based indexing to get the folder's ID. This is sourced from the // onedrive package `getFolder` function. - itemURL := fmt.Sprintf( - "https://graph.microsoft.com/v1.0/drives/%s/root:/%s", - driveID, - destName) - resp, err := drive.NewItemsDriveItemItemRequestBuilder(itemURL, gc.Service.Adapter()). + itemURL := fmt.Sprintf("https://graph.microsoft.com/v1.0/drives/%s/root:/%s", driveID, destName) + resp, err := drives. + NewItemItemsDriveItemItemRequestBuilder(itemURL, gc.Service.Adapter()). Get(ctx, nil) require.NoError(t, err, "getting drive folder ID", "folder name", destName, clues.ToCore(err)) containerIDs[destName] = ptr.Val(resp.GetId()) } - // container3 does not exist yet. It will get created later on - // during the tests. - containers := []string{container1, container2, container3} - sel := selectors.NewOneDriveBackup(owners) - sel.Include(sel.Folders(containers, selectors.PrefixMatch())) - - bo, _, kw, ms, gc, closer := prepNewTestBackupOp(t, ctx, mb, sel.Selector, ffs) + bo, _, kw, ms, ss, gc, _, closer := prepNewTestBackupOp(t, ctx, mb, sel, ffs, version.Backup) defer closer() // run the initial backup runAndCheckBackup(t, ctx, &bo, mb, false) + // precheck to ensure the expectedDeets are correct. + // if we fail here, the expectedDeets were populated incorrectly. + deeTD.CheckBackupDetails(t, ctx, bo.Results.BackupID, ws, ms, ss, expectDeets, true) + var ( newFile models.DriveItemable newFileName = "new_file.txt" + newFileID string permissionIDMappings = map[string]string{} - writePerm = onedrive.UserPermission{ + writePerm = metadata.Permission{ ID: "perm-id", Roles: []string{"write"}, - EntityID: suite.user, + EntityID: permissionsUser, } ) @@ -1282,19 +1538,19 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDriveIncrementals() { table := []struct { name string // performs the incremental update required for the test. - updateUserData func(t *testing.T) - itemsRead int - itemsWritten int + updateFiles func(t *testing.T) + itemsRead int + itemsWritten int }{ { - name: "clean incremental, no changes", - updateUserData: func(t *testing.T) {}, - itemsRead: 0, - itemsWritten: 0, + name: "clean incremental, no changes", + updateFiles: func(t *testing.T) {}, + itemsRead: 0, + itemsWritten: 0, }, { name: "create a new file", - updateUserData: func(t *testing.T) { + updateFiles: func(t *testing.T) { targetContainer := containerIDs[container1] driveItem := models.NewDriveItem() driveItem.SetName(&newFileName) @@ -1305,14 +1561,18 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDriveIncrementals() { driveID, targetContainer, driveItem) - require.NoError(t, err, "creating new file", clues.ToCore(err)) + require.NoErrorf(t, err, "creating new file %v", clues.ToCore(err)) + + newFileID = ptr.Val(newFile.GetId()) + + expectDeets.AddItem(driveID, makeLocRef(container1), newFileID) }, itemsRead: 1, // .data file for newitem itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent }, { name: "add permission to new file", - updateUserData: func(t *testing.T) { + updateFiles: func(t *testing.T) { driveItem := models.NewDriveItem() driveItem.SetName(&newFileName) driveItem.SetFile(models.NewFile()) @@ -1322,18 +1582,19 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDriveIncrementals() { gc.Service, driveID, *newFile.GetId(), - []onedrive.UserPermission{writePerm}, - []onedrive.UserPermission{}, + []metadata.Permission{writePerm}, + []metadata.Permission{}, permissionIDMappings, ) - require.NoErrorf(t, err, "add permission to file %v", clues.ToCore(err)) + require.NoErrorf(t, err, "adding permission to file %v", clues.ToCore(err)) + // no expectedDeets: metadata isn't tracked }, itemsRead: 1, // .data file for newitem itemsWritten: 2, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated) }, { name: "remove permission from new file", - updateUserData: func(t *testing.T) { + updateFiles: func(t *testing.T) { driveItem := models.NewDriveItem() driveItem.SetName(&newFileName) driveItem.SetFile(models.NewFile()) @@ -1343,18 +1604,18 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDriveIncrementals() { gc.Service, driveID, *newFile.GetId(), - []onedrive.UserPermission{}, - []onedrive.UserPermission{writePerm}, - permissionIDMappings, - ) - require.NoError(t, err, "add permission to file", clues.ToCore(err)) + []metadata.Permission{}, + []metadata.Permission{writePerm}, + permissionIDMappings) + require.NoErrorf(t, err, "removing permission from file %v", clues.ToCore(err)) + // no expectedDeets: metadata isn't tracked }, itemsRead: 1, // .data file for newitem itemsWritten: 2, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated) }, { name: "add permission to container", - updateUserData: func(t *testing.T) { + updateFiles: func(t *testing.T) { targetContainer := containerIDs[container1] driveItem := models.NewDriveItem() driveItem.SetName(&newFileName) @@ -1365,18 +1626,18 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDriveIncrementals() { gc.Service, driveID, targetContainer, - []onedrive.UserPermission{writePerm}, - []onedrive.UserPermission{}, - permissionIDMappings, - ) - require.NoError(t, err, "add permission to file", clues.ToCore(err)) + []metadata.Permission{writePerm}, + []metadata.Permission{}, + permissionIDMappings) + require.NoErrorf(t, err, "adding permission to container %v", clues.ToCore(err)) + // no expectedDeets: metadata isn't tracked }, itemsRead: 0, itemsWritten: 1, // .dirmeta for collection }, { name: "remove permission from container", - updateUserData: func(t *testing.T) { + updateFiles: func(t *testing.T) { targetContainer := containerIDs[container1] driveItem := models.NewDriveItem() driveItem.SetName(&newFileName) @@ -1387,32 +1648,35 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDriveIncrementals() { gc.Service, driveID, targetContainer, - []onedrive.UserPermission{}, - []onedrive.UserPermission{writePerm}, - permissionIDMappings, - ) - require.NoError(t, err, "add permission to file", clues.ToCore(err)) + []metadata.Permission{}, + []metadata.Permission{writePerm}, + permissionIDMappings) + require.NoErrorf(t, err, "removing permission from container %v", clues.ToCore(err)) + // no expectedDeets: metadata isn't tracked }, itemsRead: 0, itemsWritten: 1, // .dirmeta for collection }, { name: "update contents of a file", - updateUserData: func(t *testing.T) { - err := gc.Service. + updateFiles: func(t *testing.T) { + _, err := gc.Service. Client(). - DrivesById(driveID). - ItemsById(ptr.Val(newFile.GetId())). + Drives(). + ByDriveId(driveID). + Items(). + ByDriveItemId(ptr.Val(newFile.GetId())). Content(). Put(ctx, []byte("new content"), nil) - require.NoError(t, err, "updating file content") + require.NoErrorf(t, err, "updating file contents: %v", clues.ToCore(err)) + // no expectedDeets: neither file id nor location changed }, itemsRead: 1, // .data file for newitem itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent }, { name: "rename a file", - updateUserData: func(t *testing.T) { + updateFiles: func(t *testing.T) { container := containerIDs[container1] driveItem := models.NewDriveItem() @@ -1424,18 +1688,21 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDriveIncrementals() { _, err := gc.Service. Client(). - DrivesById(driveID). - ItemsById(ptr.Val(newFile.GetId())). + Drives(). + ByDriveId(driveID). + Items(). + ByDriveItemId(ptr.Val(newFile.GetId())). Patch(ctx, driveItem, nil) - require.NoError(t, err, "renaming file", clues.ToCore(err)) + require.NoError(t, err, "renaming file %v", clues.ToCore(err)) }, itemsRead: 1, // .data file for newitem itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent + // no expectedDeets: neither file id nor location changed }, { name: "move a file between folders", - updateUserData: func(t *testing.T) { - dest := containerIDs[container1] + updateFiles: func(t *testing.T) { + dest := containerIDs[container2] driveItem := models.NewDriveItem() driveItem.SetName(&newFileName) @@ -1445,102 +1712,133 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDriveIncrementals() { _, err := gc.Service. Client(). - DrivesById(driveID). - ItemsById(ptr.Val(newFile.GetId())). + Drives(). + ByDriveId(driveID). + Items(). + ByDriveItemId(ptr.Val(newFile.GetId())). Patch(ctx, driveItem, nil) - require.NoError(t, err, "moving file between folders", clues.ToCore(err)) + require.NoErrorf(t, err, "moving file between folders %v", clues.ToCore(err)) + + expectDeets.MoveItem( + driveID, + makeLocRef(container1), + makeLocRef(container2), + ptr.Val(newFile.GetId())) }, itemsRead: 1, // .data file for newitem itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent }, { name: "delete file", - updateUserData: func(t *testing.T) { + updateFiles: func(t *testing.T) { // deletes require unique http clients // https://github.com/alcionai/corso/issues/2707 err = newDeleteServicer(t). Client(). - DrivesById(driveID). - ItemsById(ptr.Val(newFile.GetId())). + Drives(). + ByDriveId(driveID). + Items(). + ByDriveItemId(ptr.Val(newFile.GetId())). Delete(ctx, nil) - require.NoError(t, err, "deleting file", clues.ToCore(err)) + require.NoErrorf(t, err, "deleting file %v", clues.ToCore(err)) + + expectDeets.RemoveItem(driveID, makeLocRef(container2), ptr.Val(newFile.GetId())) }, itemsRead: 0, itemsWritten: 0, }, { name: "move a folder to a subfolder", - updateUserData: func(t *testing.T) { - dest := containerIDs[container1] - source := containerIDs[container2] - - driveItem := models.NewDriveItem() - driveItem.SetName(&container2) - parentRef := models.NewItemReference() - parentRef.SetId(&dest) - driveItem.SetParentReference(parentRef) - - _, err := gc.Service. - Client(). - DrivesById(driveID). - ItemsById(source). - Patch(ctx, driveItem, nil) - require.NoError(t, err, "moving folder", clues.ToCore(err)) - }, - itemsRead: 0, - itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target) - }, - { - name: "rename a folder", - updateUserData: func(t *testing.T) { + updateFiles: func(t *testing.T) { parent := containerIDs[container1] child := containerIDs[container2] driveItem := models.NewDriveItem() - name := "renamed_folder" - driveItem.SetName(&name) + driveItem.SetName(&container2) parentRef := models.NewItemReference() parentRef.SetId(&parent) driveItem.SetParentReference(parentRef) _, err := gc.Service. Client(). - DrivesById(driveID). - ItemsById(child). + Drives(). + ByDriveId(driveID). + Items(). + ByDriveItemId(child). + Patch(ctx, driveItem, nil) + require.NoError(t, err, "moving folder", clues.ToCore(err)) + + expectDeets.MoveLocation( + driveID, + makeLocRef(container2), + makeLocRef(container1)) + }, + itemsRead: 0, + itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target) + }, + { + name: "rename a folder", + updateFiles: func(t *testing.T) { + parent := containerIDs[container1] + child := containerIDs[container2] + + driveItem := models.NewDriveItem() + driveItem.SetName(&containerRename) + parentRef := models.NewItemReference() + parentRef.SetId(&parent) + driveItem.SetParentReference(parentRef) + + _, err := gc.Service. + Client(). + Drives(). + ByDriveId(driveID). + Items(). + ByDriveItemId(child). Patch(ctx, driveItem, nil) require.NoError(t, err, "renaming folder", clues.ToCore(err)) + + containerIDs[containerRename] = containerIDs[container2] + + expectDeets.RenameLocation( + driveID, + makeLocRef(container1, container2), + makeLocRef(container1, containerRename)) }, itemsRead: 0, itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target) }, { name: "delete a folder", - updateUserData: func(t *testing.T) { - container := containerIDs[container2] + updateFiles: func(t *testing.T) { + container := containerIDs[containerRename] // deletes require unique http clients // https://github.com/alcionai/corso/issues/2707 err = newDeleteServicer(t). Client(). - DrivesById(driveID). - ItemsById(container). + Drives(). + ByDriveId(driveID). + Items(). + ByDriveItemId(container). Delete(ctx, nil) require.NoError(t, err, "deleting folder", clues.ToCore(err)) + + expectDeets.RemoveLocation(driveID, makeLocRef(container1, containerRename)) }, itemsRead: 0, itemsWritten: 0, }, { name: "add a new folder", - updateUserData: func(t *testing.T) { + updateFiles: func(t *testing.T) { generateContainerOfItems( t, ctx, gc, - path.OneDriveService, + service, acct, - path.FilesCategory, - selectors.NewOneDriveRestore(owners).Selector, - creds.AzureTenantID, suite.user, driveID, container3, + category, + sel, + atid, roidn.ID(), driveID, container3, 2, 0, fileDBF) @@ -1550,11 +1848,13 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDriveIncrementals() { "https://graph.microsoft.com/v1.0/drives/%s/root:/%s", driveID, container3) - resp, err := drive.NewItemsDriveItemItemRequestBuilder(itemURL, gc.Service.Adapter()). + resp, err := drives.NewItemItemsDriveItemItemRequestBuilder(itemURL, gc.Service.Adapter()). Get(ctx, nil) require.NoError(t, err, "getting drive folder ID", "folder name", container3, clues.ToCore(err)) containerIDs[container3] = ptr.Val(resp.GetId()) + + expectDeets.AddLocation(driveID, container3) }, itemsRead: 2, // 2 .data for 2 files itemsWritten: 6, // read items + 2 directory meta @@ -1562,45 +1862,176 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDriveIncrementals() { } for _, test := range table { suite.Run(test.name, func() { + cleanGC, err := connector.NewGraphConnector(ctx, acct, resource) + require.NoError(t, err, clues.ToCore(err)) + var ( t = suite.T() incMB = evmock.NewBus() - incBO = newTestBackupOp(t, ctx, kw, ms, gc, acct, sel.Selector, incMB, ffs, closer) + incBO = newTestBackupOp(t, ctx, kw, ms, cleanGC, acct, sel, incMB, ffs, closer) ) tester.LogTimeOfTest(suite.T()) - test.updateUserData(t) + test.updateFiles(t) - err := incBO.Run(ctx) + err = incBO.Run(ctx) require.NoError(t, err, clues.ToCore(err)) - checkBackupIsInManifests(t, ctx, kw, &incBO, sel.Selector, suite.user, maps.Keys(categories)...) - checkMetadataFilesExist( - t, - ctx, - incBO.Results.BackupID, - kw, - ms, - creds.AzureTenantID, - suite.user, - path.OneDriveService, - categories) + + bupID := incBO.Results.BackupID + + checkBackupIsInManifests(t, ctx, kw, &incBO, sel, roidn.ID(), maps.Keys(categories)...) + checkMetadataFilesExist(t, ctx, bupID, kw, ms, atid, roidn.ID(), service, categories) + deeTD.CheckBackupDetails(t, ctx, bupID, ws, ms, ss, expectDeets, true) // do some additional checks to ensure the incremental dealt with fewer items. // +2 on read/writes to account for metadata: 1 delta and 1 path. - assert.Equal(t, test.itemsWritten+2, incBO.Results.ItemsWritten, "incremental items written") - assert.Equal(t, test.itemsRead+2, incBO.Results.ItemsRead, "incremental items read") + var ( + expectWrites = test.itemsWritten + 2 + expectReads = test.itemsRead + 2 + assertReadWrite = assert.Equal + ) + + // Sharepoint can produce a superset of permissions by nature of + // its drive type. Since this counter comparison is a bit hacky + // to begin with, it's easiest to assert a <= comparison instead + // of fine tuning each test case. + if service == path.SharePointService { + assertReadWrite = assert.LessOrEqual + } + + assertReadWrite(t, expectWrites, incBO.Results.ItemsWritten, "incremental items written") + assertReadWrite(t, expectReads, incBO.Results.ItemsRead, "incremental items read") + assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(incBO.Errors.Failure())) assert.Empty(t, incBO.Errors.Recovered(), "incremental recoverable/iteration errors") assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events") assert.Equal(t, 1, incMB.TimesCalled[events.BackupEnd], "incremental backup-end events") assert.Equal(t, incMB.CalledWith[events.BackupStart][0][events.BackupID], - incBO.Results.BackupID, "incremental backupID pre-declaration") + bupID, "incremental backupID pre-declaration") }) } } +func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDriveOwnerMigration() { + ctx, flush := tester.NewContext() + defer flush() + + var ( + t = suite.T() + acct = tester.NewM365Account(t) + ffs = control.Toggles{} + mb = evmock.NewBus() + + categories = map[path.CategoryType][]string{ + path.FilesCategory: {graph.DeltaURLsFileName, graph.PreviousPathFileName}, + } + ) + + creds, err := acct.M365Config() + require.NoError(t, err, clues.ToCore(err)) + + gc, err := connector.NewGraphConnector( + ctx, + acct, + connector.Users) + require.NoError(t, err, clues.ToCore(err)) + + userable, err := gc.Discovery.Users().GetByID(ctx, suite.user) + require.NoError(t, err, clues.ToCore(err)) + + uid := ptr.Val(userable.GetId()) + uname := ptr.Val(userable.GetUserPrincipalName()) + + oldsel := selectors.NewOneDriveBackup([]string{uname}) + oldsel.Include(selTD.OneDriveBackupFolderScope(oldsel)) + + bo, _, kw, ms, _, gc, sel, closer := prepNewTestBackupOp(t, ctx, mb, oldsel.Selector, ffs, 0) + defer closer() + + // ensure the initial owner uses name in both cases + bo.ResourceOwner = sel.SetDiscreteOwnerIDName(uname, uname) + // required, otherwise we don't run the migration + bo.backupVersion = version.All8MigrateUserPNToID - 1 + + require.Equalf( + t, + bo.ResourceOwner.Name(), + bo.ResourceOwner.ID(), + "historical representation of user id [%s] should match pn [%s]", + bo.ResourceOwner.ID(), + bo.ResourceOwner.Name()) + + // run the initial backup + runAndCheckBackup(t, ctx, &bo, mb, false) + + newsel := selectors.NewOneDriveBackup([]string{uid}) + newsel.Include(selTD.OneDriveBackupFolderScope(newsel)) + sel = newsel.SetDiscreteOwnerIDName(uid, uname) + + var ( + incMB = evmock.NewBus() + // the incremental backup op should have a proper user ID for the id. + incBO = newTestBackupOp(t, ctx, kw, ms, gc, acct, sel, incMB, ffs, closer) + ) + + require.NotEqualf( + t, + incBO.ResourceOwner.Name(), + incBO.ResourceOwner.ID(), + "current representation of user: id [%s] should differ from PN [%s]", + incBO.ResourceOwner.ID(), + incBO.ResourceOwner.Name()) + + err = incBO.Run(ctx) + require.NoError(t, err, clues.ToCore(err)) + checkBackupIsInManifests(t, ctx, kw, &incBO, sel, uid, maps.Keys(categories)...) + checkMetadataFilesExist( + t, + ctx, + incBO.Results.BackupID, + kw, + ms, + creds.AzureTenantID, + uid, + path.OneDriveService, + categories) + + // 2 on read/writes to account for metadata: 1 delta and 1 path. + assert.LessOrEqual(t, 2, incBO.Results.ItemsWritten, "items written") + assert.LessOrEqual(t, 2, incBO.Results.ItemsRead, "items read") + assert.NoError(t, incBO.Errors.Failure(), "non-recoverable error", clues.ToCore(incBO.Errors.Failure())) + assert.Empty(t, incBO.Errors.Recovered(), "recoverable/iteration errors") + assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "backup-start events") + assert.Equal(t, 1, incMB.TimesCalled[events.BackupEnd], "backup-end events") + assert.Equal(t, + incMB.CalledWith[events.BackupStart][0][events.BackupID], + incBO.Results.BackupID, "backupID pre-declaration") + + bid := incBO.Results.BackupID + bup := &backup.Backup{} + + err = ms.Get(ctx, model.BackupSchema, bid, bup) + require.NoError(t, err, clues.ToCore(err)) + + var ( + ssid = bup.StreamStoreID + deets details.Details + ss = streamstore.NewStreamer(kw, creds.AzureTenantID, path.OneDriveService) + ) + + err = ss.Read(ctx, ssid, streamstore.DetailsReader(details.UnmarshalTo(&deets)), fault.New(true)) + require.NoError(t, err, clues.ToCore(err)) + + for _, ent := range deets.Entries { + // 46 is the tenant uuid + "onedrive" + two slashes + if len(ent.RepoRef) > 46 { + assert.Contains(t, ent.RepoRef, uid) + } + } +} + // --------------------------------------------------------------------------- // SharePoint // --------------------------------------------------------------------------- @@ -1615,13 +2046,13 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_sharePoint() { sel = selectors.NewSharePointBackup([]string{suite.site}) ) - sel.Include(testdata.SharePointBackupFolderScope(sel)) + sel.Include(selTD.SharePointBackupFolderScope(sel)) - bo, _, kw, _, _, closer := prepNewTestBackupOp(t, ctx, mb, sel.Selector, control.Toggles{}) + bo, _, kw, _, _, _, sels, closer := prepNewTestBackupOp(t, ctx, mb, sel.Selector, control.Toggles{}, version.Backup) defer closer() runAndCheckBackup(t, ctx, &bo, mb, false) - checkBackupIsInManifests(t, ctx, kw, &bo, sel.Selector, suite.site, path.LibrariesCategory) + checkBackupIsInManifests(t, ctx, kw, &bo, sels, suite.site, path.LibrariesCategory) } // --------------------------------------------------------------------------- diff --git a/src/internal/operations/backup_test.go b/src/internal/operations/backup_test.go index 3e9e36805..608f6a20a 100644 --- a/src/internal/operations/backup_test.go +++ b/src/internal/operations/backup_test.go @@ -14,7 +14,9 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/alcionai/corso/src/internal/common/prefixmatcher" "github.com/alcionai/corso/src/internal/connector/mock" + odConsts "github.com/alcionai/corso/src/internal/connector/onedrive/consts" "github.com/alcionai/corso/src/internal/data" evmock "github.com/alcionai/corso/src/internal/events/mock" "github.com/alcionai/corso/src/internal/kopia" @@ -45,16 +47,28 @@ type mockRestoreProducer struct { onRestore restoreFunc } -type restoreFunc func(id string, ps []path.Path) ([]data.RestoreCollection, error) +type restoreFunc func( + id string, + ps []path.RestorePaths, +) ([]data.RestoreCollection, error) func (mr *mockRestoreProducer) buildRestoreFunc( t *testing.T, oid string, ops []path.Path, ) { - mr.onRestore = func(id string, ps []path.Path) ([]data.RestoreCollection, error) { + mr.onRestore = func( + id string, + ps []path.RestorePaths, + ) ([]data.RestoreCollection, error) { + gotPaths := make([]path.Path, 0, len(ps)) + + for _, rp := range ps { + gotPaths = append(gotPaths, rp.StoragePath) + } + assert.Equal(t, oid, id, "manifest id") - checkPaths(t, ops, ps) + checkPaths(t, ops, gotPaths) return mr.colls, mr.err } @@ -63,11 +77,13 @@ func (mr *mockRestoreProducer) buildRestoreFunc( func (mr *mockRestoreProducer) ProduceRestoreCollections( ctx context.Context, snapshotID string, - paths []path.Path, + paths []path.RestorePaths, bc kopia.ByteCounter, errs *fault.Bus, ) ([]data.RestoreCollection, error) { - mr.gotPaths = append(mr.gotPaths, paths...) + for _, ps := range paths { + mr.gotPaths = append(mr.gotPaths, ps.StoragePath) + } if mr.onRestore != nil { return mr.onRestore(snapshotID, paths) @@ -98,7 +114,7 @@ func (mbu mockBackupConsumer) ConsumeBackupCollections( ctx context.Context, bases []kopia.IncrementalBase, cs []data.BackupCollection, - excluded map[string]map[string]struct{}, + excluded prefixmatcher.StringSetReader, tags map[string]string, buildTreeWithBase bool, errs *fault.Bus, @@ -271,10 +287,10 @@ func makeFolderEntry( size int64, modTime time.Time, dt details.ItemType, -) *details.DetailsEntry { +) *details.Entry { t.Helper() - return &details.DetailsEntry{ + return &details.Entry{ RepoRef: pb.String(), ShortRef: pb.ShortRef(), ParentRef: pb.Dir().ShortRef(), @@ -308,7 +324,7 @@ func makeDetailsEntry( l *path.Builder, size int, updated bool, -) *details.DetailsEntry { +) *details.Entry { t.Helper() var lr string @@ -316,7 +332,7 @@ func makeDetailsEntry( lr = l.String() } - res := &details.DetailsEntry{ + res := &details.Entry{ RepoRef: p.String(), ShortRef: p.ShortRef(), ParentRef: p.ToBuilder().Dir().ShortRef(), @@ -451,7 +467,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_PersistResults() { op, err := NewBackupOperation( ctx, - control.Options{}, + control.Defaults(), kw, sw, gc, @@ -642,15 +658,15 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems path.OneDriveService.String(), ro, path.FilesCategory.String(), - "drives", + odConsts.DrivesPathDir, "drive-id", - "root:", + odConsts.RootPathDir, "work", "item1", }, true, ) - locationPath1 = path.Builder{}.Append("root:", "work-display-name") + locationPath1 = path.Builder{}.Append(odConsts.RootPathDir, "work-display-name") itemPath2 = makePath( suite.T(), []string{ @@ -658,15 +674,15 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems path.OneDriveService.String(), ro, path.FilesCategory.String(), - "drives", + odConsts.DrivesPathDir, "drive-id", - "root:", + odConsts.RootPathDir, "personal", "item2", }, true, ) - locationPath2 = path.Builder{}.Append("root:", "personal-display-name") + locationPath2 = path.Builder{}.Append(odConsts.RootPathDir, "personal-display-name") itemPath3 = makePath( suite.T(), []string{ @@ -718,20 +734,20 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems mdm *mockDetailsMergeInfoer errCheck assert.ErrorAssertionFunc - expectedEntries []*details.DetailsEntry + expectedEntries []*details.Entry }{ { name: "NilShortRefsFromPrevBackup", errCheck: assert.NoError, // Use empty slice so we don't error out on nil != empty. - expectedEntries: []*details.DetailsEntry{}, + expectedEntries: []*details.Entry{}, }, { name: "EmptyShortRefsFromPrevBackup", mdm: newMockDetailsMergeInfoer(), errCheck: assert.NoError, // Use empty slice so we don't error out on nil != empty. - expectedEntries: []*details.DetailsEntry{}, + expectedEntries: []*details.Entry{}, }, { name: "BackupIDNotFound", @@ -800,7 +816,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems populatedDetails: map[string]*details.Details{ backup1.DetailsID: { DetailsModel: details.DetailsModel{ - Entries: []details.DetailsEntry{ + Entries: []details.Entry{ *makeDetailsEntry(suite.T(), itemPath1, locationPath1, 42, false), }, }, @@ -836,7 +852,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems populatedDetails: map[string]*details.Details{ backup1.DetailsID: { DetailsModel: details.DetailsModel{ - Entries: []details.DetailsEntry{ + Entries: []details.Entry{ *makeDetailsEntry(suite.T(), itemPath1, locationPath1, 42, false), }, }, @@ -866,7 +882,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems populatedDetails: map[string]*details.Details{ backup1.DetailsID: { DetailsModel: details.DetailsModel{ - Entries: []details.DetailsEntry{ + Entries: []details.Entry{ { RepoRef: stdpath.Join( append( @@ -928,7 +944,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems populatedDetails: map[string]*details.Details{ backup1.DetailsID: { DetailsModel: details.DetailsModel{ - Entries: []details.DetailsEntry{ + Entries: []details.Entry{ *makeDetailsEntry(suite.T(), itemPath1, locationPath1, 42, false), }, }, @@ -958,14 +974,14 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems populatedDetails: map[string]*details.Details{ backup1.DetailsID: { DetailsModel: details.DetailsModel{ - Entries: []details.DetailsEntry{ + Entries: []details.Entry{ *makeDetailsEntry(suite.T(), itemPath1, locationPath1, 42, false), }, }, }, }, errCheck: assert.NoError, - expectedEntries: []*details.DetailsEntry{ + expectedEntries: []*details.Entry{ makeDetailsEntry(suite.T(), itemPath1, locationPath1, 42, false), }, }, @@ -991,14 +1007,14 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems populatedDetails: map[string]*details.Details{ backup1.DetailsID: { DetailsModel: details.DetailsModel{ - Entries: []details.DetailsEntry{ + Entries: []details.Entry{ *makeDetailsEntry(suite.T(), itemPath1, locationPath1, 42, false), }, }, }, }, errCheck: assert.NoError, - expectedEntries: []*details.DetailsEntry{ + expectedEntries: []*details.Entry{ makeDetailsEntry(suite.T(), itemPath1, locationPath1, 42, false), }, }, @@ -1024,7 +1040,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems populatedDetails: map[string]*details.Details{ backup1.DetailsID: { DetailsModel: details.DetailsModel{ - Entries: []details.DetailsEntry{ + Entries: []details.Entry{ *makeDetailsEntry(suite.T(), itemPath1, locationPath1, 42, false), *makeDetailsEntry(suite.T(), itemPath2, locationPath2, 84, false), }, @@ -1032,7 +1048,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems }, }, errCheck: assert.NoError, - expectedEntries: []*details.DetailsEntry{ + expectedEntries: []*details.Entry{ makeDetailsEntry(suite.T(), itemPath1, locationPath1, 42, false), }, }, @@ -1058,14 +1074,14 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems populatedDetails: map[string]*details.Details{ backup1.DetailsID: { DetailsModel: details.DetailsModel{ - Entries: []details.DetailsEntry{ + Entries: []details.Entry{ *makeDetailsEntry(suite.T(), itemPath1, locationPath1, 42, false), }, }, }, }, errCheck: assert.NoError, - expectedEntries: []*details.DetailsEntry{ + expectedEntries: []*details.Entry{ makeDetailsEntry(suite.T(), itemPath2, locationPath2, 42, true), }, }, @@ -1099,14 +1115,14 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems populatedDetails: map[string]*details.Details{ backup1.DetailsID: { DetailsModel: details.DetailsModel{ - Entries: []details.DetailsEntry{ + Entries: []details.Entry{ *makeDetailsEntry(suite.T(), itemPath1, locationPath1, 42, false), }, }, }, backup2.DetailsID: { DetailsModel: details.DetailsModel{ - Entries: []details.DetailsEntry{ + Entries: []details.Entry{ // This entry should not be picked due to a mismatch on Reasons. *makeDetailsEntry(suite.T(), itemPath1, locationPath1, 84, false), // This item should be picked. @@ -1116,7 +1132,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems }, }, errCheck: assert.NoError, - expectedEntries: []*details.DetailsEntry{ + expectedEntries: []*details.Entry{ makeDetailsEntry(suite.T(), itemPath1, locationPath1, 42, false), makeDetailsEntry(suite.T(), itemPath3, locationPath3, 37, false), }, @@ -1150,14 +1166,14 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems populatedDetails: map[string]*details.Details{ backup1.DetailsID: { DetailsModel: details.DetailsModel{ - Entries: []details.DetailsEntry{ + Entries: []details.Entry{ *makeDetailsEntry(suite.T(), itemPath1, locationPath1, 42, false), }, }, }, backup2.DetailsID: { DetailsModel: details.DetailsModel{ - Entries: []details.DetailsEntry{ + Entries: []details.Entry{ // This entry should not be picked due to being incomplete. *makeDetailsEntry(suite.T(), itemPath1, locationPath1, 84, false), }, @@ -1165,7 +1181,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems }, }, errCheck: assert.NoError, - expectedEntries: []*details.DetailsEntry{ + expectedEntries: []*details.Entry{ makeDetailsEntry(suite.T(), itemPath1, locationPath1, 42, false), }, }, @@ -1265,12 +1281,12 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde populatedDetails := map[string]*details.Details{ backup1.DetailsID: { DetailsModel: details.DetailsModel{ - Entries: []details.DetailsEntry{*itemDetails}, + Entries: []details.Entry{*itemDetails}, }, }, } - expectedEntries := []details.DetailsEntry{*itemDetails} + expectedEntries := []details.Entry{*itemDetails} // update the details itemDetails.Exchange.Modified = now @@ -1312,7 +1328,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde // assert.elementsMatch to fail. func compareDeetEntries( t *testing.T, - expect, result []details.DetailsEntry, + expect, result []details.Entry, ) { if !assert.Equal(t, len(expect), len(result), "entry slices should be equal len") { require.ElementsMatch(t, expect, result) @@ -1321,8 +1337,8 @@ func compareDeetEntries( var ( // repoRef -> modified time eMods = map[string]time.Time{} - es = make([]details.DetailsEntry, 0, len(expect)) - rs = make([]details.DetailsEntry, 0, len(expect)) + es = make([]details.Entry, 0, len(expect)) + rs = make([]details.Entry, 0, len(expect)) ) for _, e := range expect { @@ -1347,7 +1363,7 @@ func compareDeetEntries( assert.ElementsMatch(t, es, rs) } -func withoutModified(de details.DetailsEntry) details.DetailsEntry { +func withoutModified(de details.Entry) details.Entry { switch { case de.Exchange != nil: de.Exchange.Modified = time.Time{} diff --git a/src/internal/operations/common.go b/src/internal/operations/common.go index feec1e8d7..70c53d2cb 100644 --- a/src/internal/operations/common.go +++ b/src/internal/operations/common.go @@ -13,6 +13,19 @@ import ( "github.com/alcionai/corso/src/pkg/store" ) +func getBackupFromID( + ctx context.Context, + backupID model.StableID, + ms *store.Wrapper, +) (*backup.Backup, error) { + bup, err := ms.GetBackup(ctx, backupID) + if err != nil { + return nil, clues.Wrap(err, "getting backup") + } + + return bup, nil +} + func getBackupAndDetailsFromID( ctx context.Context, backupID model.StableID, @@ -22,7 +35,7 @@ func getBackupAndDetailsFromID( ) (*backup.Backup, *details.Details, error) { bup, err := ms.GetBackup(ctx, backupID) if err != nil { - return nil, nil, clues.Wrap(err, "getting backup details ID") + return nil, nil, clues.Wrap(err, "getting backup") } var ( diff --git a/src/internal/operations/help_test.go b/src/internal/operations/help_test.go new file mode 100644 index 000000000..f5b01dc9b --- /dev/null +++ b/src/internal/operations/help_test.go @@ -0,0 +1,49 @@ +package operations + +import ( + "context" + "testing" + + "github.com/alcionai/clues" + "github.com/stretchr/testify/assert" + + "github.com/alcionai/corso/src/internal/common/idname" + "github.com/alcionai/corso/src/internal/connector" + "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/selectors" +) + +// A QoL builder for live GC instances that updates +// the selector's owner id and name in the process +// to help avoid gotchas. +func GCWithSelector( + t *testing.T, + ctx context.Context, //revive:disable-line:context-as-argument + acct account.Account, + cr connector.Resource, + sel selectors.Selector, + ins idname.Cacher, + onFail func(), +) (*connector.GraphConnector, selectors.Selector) { + gc, err := connector.NewGraphConnector(ctx, acct, cr) + if !assert.NoError(t, err, clues.ToCore(err)) { + if onFail != nil { + onFail() + } + + t.FailNow() + } + + id, name, err := gc.PopulateOwnerIDAndNamesFrom(ctx, sel.DiscreteOwner, ins) + if !assert.NoError(t, err, clues.ToCore(err)) { + if onFail != nil { + onFail() + } + + t.FailNow() + } + + sel = sel.SetDiscreteOwnerIDName(id, name) + + return gc, sel +} diff --git a/src/internal/operations/helpers.go b/src/internal/operations/helpers.go index 1d6cef406..0c5c9c049 100644 --- a/src/internal/operations/helpers.go +++ b/src/internal/operations/helpers.go @@ -59,18 +59,18 @@ func LogFaultErrors(ctx context.Context, fe *fault.Errors, prefix string) { } if fe.Failure != nil { - log.With("error", fe.Failure).Error(pfxMsg + " primary failure") + log.With("error", fe.Failure).Errorf("%s primary failure: %s", pfxMsg, fe.Failure.Msg) } for i, item := range fe.Items { - log.With("failed_item", item).Errorf("%s item failure %d of %d", pfxMsg, i+1, li) + log.With("failed_item", item).Errorf("%s item failure %d of %d: %s", pfxMsg, i+1, li, item.Cause) } for i, item := range fe.Skipped { - log.With("skipped_item", item).Errorf("%s skipped item %d of %d", pfxMsg, i+1, ls) + log.With("skipped_item", item).Errorf("%s skipped item %d of %d: %s", pfxMsg, i+1, ls, item.Item.Cause) } for i, err := range fe.Recovered { - log.With("recovered_error", err).Errorf("%s recoverable error %d of %d", pfxMsg, i+1, lr) + log.With("recovered_error", err).Errorf("%s recoverable error %d of %d: %s", pfxMsg, i+1, lr, err.Msg) } } diff --git a/src/internal/operations/inject/inject.go b/src/internal/operations/inject/inject.go index 8d92f3c80..55c472f7c 100644 --- a/src/internal/operations/inject/inject.go +++ b/src/internal/operations/inject/inject.go @@ -3,12 +3,14 @@ package inject import ( "context" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/idname" + "github.com/alcionai/corso/src/internal/common/prefixmatcher" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/kopia" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/control/repository" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" @@ -18,12 +20,13 @@ type ( BackupProducer interface { ProduceBackupCollections( ctx context.Context, - resourceOwner common.IDNamer, + resourceOwner idname.Provider, sels selectors.Selector, metadata []data.RestoreCollection, + lastBackupVersion int, ctrlOpts control.Options, errs *fault.Bus, - ) ([]data.BackupCollection, map[string]map[string]struct{}, error) + ) ([]data.BackupCollection, prefixmatcher.StringSetReader, error) Wait() *data.CollectionStats } @@ -33,7 +36,7 @@ type ( ctx context.Context, bases []kopia.IncrementalBase, cs []data.BackupCollection, - excluded map[string]map[string]struct{}, + pmr prefixmatcher.StringSetReader, tags map[string]string, buildTreeWithBase bool, errs *fault.Bus, @@ -44,7 +47,7 @@ type ( ProduceRestoreCollections( ctx context.Context, snapshotID string, - paths []path.Path, + paths []path.RestorePaths, bc kopia.ByteCounter, errs *fault.Bus, ) ([]data.RestoreCollection, error) @@ -64,4 +67,8 @@ type ( Wait() *data.CollectionStats } + + RepoMaintenancer interface { + RepoMaintenance(ctx context.Context, opts repository.Maintenance) error + } ) diff --git a/src/internal/operations/maintenance.go b/src/internal/operations/maintenance.go new file mode 100644 index 000000000..9233cc0b2 --- /dev/null +++ b/src/internal/operations/maintenance.go @@ -0,0 +1,94 @@ +package operations + +import ( + "context" + "time" + + "github.com/alcionai/clues" + + "github.com/alcionai/corso/src/internal/common/crash" + "github.com/alcionai/corso/src/internal/common/dttm" + "github.com/alcionai/corso/src/internal/events" + "github.com/alcionai/corso/src/internal/kopia" + "github.com/alcionai/corso/src/internal/stats" + "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/control/repository" +) + +// MaintenanceOperation wraps an operation with restore-specific props. +type MaintenanceOperation struct { + operation + Results MaintenanceResults + mOpts repository.Maintenance +} + +// MaintenanceResults aggregate the details of the results of the operation. +type MaintenanceResults struct { + stats.StartAndEndTime +} + +// NewMaintenanceOperation constructs and validates a maintenance operation. +func NewMaintenanceOperation( + ctx context.Context, + opts control.Options, + kw *kopia.Wrapper, + mOpts repository.Maintenance, + bus events.Eventer, +) (MaintenanceOperation, error) { + op := MaintenanceOperation{ + operation: newOperation(opts, bus, kw, nil), + mOpts: mOpts, + } + + // Don't run validation because we don't populate the model store. + + return op, nil +} + +func (op *MaintenanceOperation) Run(ctx context.Context) (err error) { + defer func() { + if crErr := crash.Recovery(ctx, recover(), "maintenance"); crErr != nil { + err = crErr + } + }() + + op.Results.StartedAt = time.Now() + + op.bus.Event( + ctx, + events.MaintenanceStart, + map[string]any{ + events.StartTime: op.Results.StartedAt, + }) + + defer func() { + op.bus.Event( + ctx, + events.MaintenanceEnd, + map[string]any{ + events.StartTime: op.Results.StartedAt, + events.Duration: op.Results.CompletedAt.Sub(op.Results.StartedAt), + events.EndTime: dttm.Format(op.Results.CompletedAt), + events.Status: op.Status.String(), + events.Resources: op.mOpts.Type.String(), + }) + }() + + return op.do(ctx) +} + +func (op *MaintenanceOperation) do(ctx context.Context) error { + defer func() { + op.Results.CompletedAt = time.Now() + }() + + err := op.operation.kopia.RepoMaintenance(ctx, op.mOpts) + if err != nil { + op.Status = Failed + return clues.Wrap(err, "running maintenance operation") + } + + op.Status = Completed + + return nil +} diff --git a/src/internal/operations/maintenance_test.go b/src/internal/operations/maintenance_test.go new file mode 100644 index 000000000..99791a17b --- /dev/null +++ b/src/internal/operations/maintenance_test.go @@ -0,0 +1,65 @@ +package operations + +import ( + "testing" + + "github.com/alcionai/clues" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + evmock "github.com/alcionai/corso/src/internal/events/mock" + "github.com/alcionai/corso/src/internal/kopia" + "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/control/repository" +) + +type MaintenanceOpIntegrationSuite struct { + tester.Suite +} + +func TestMaintenanceOpIntegrationSuite(t *testing.T) { + suite.Run(t, &MaintenanceOpIntegrationSuite{ + Suite: tester.NewIntegrationSuite( + t, + [][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs}), + }) +} + +func (suite *MaintenanceOpIntegrationSuite) TestRepoMaintenance() { + var ( + t = suite.T() + // need to initialize the repository before we can test connecting to it. + st = tester.NewPrefixedS3Storage(t) + k = kopia.NewConn(st) + ) + + ctx, flush := tester.NewContext() + defer flush() + + err := k.Initialize(ctx, repository.Options{}) + require.NoError(t, err, clues.ToCore(err)) + + kw, err := kopia.NewWrapper(k) + // kopiaRef comes with a count of 1 and Wrapper bumps it again so safe + // to close here. + k.Close(ctx) + + require.NoError(t, err, clues.ToCore(err)) + + defer kw.Close(ctx) + + mo, err := NewMaintenanceOperation( + ctx, + control.Defaults(), + kw, + repository.Maintenance{ + Type: repository.MetadataMaintenance, + }, + evmock.NewBus()) + require.NoError(t, err, clues.ToCore(err)) + + err = mo.Run(ctx) + assert.NoError(t, err, clues.ToCore(err)) +} diff --git a/src/internal/operations/manifests.go b/src/internal/operations/manifests.go index a402808f2..16e2029f9 100644 --- a/src/internal/operations/manifests.go +++ b/src/internal/operations/manifests.go @@ -308,7 +308,7 @@ func collectMetadata( tenantID string, errs *fault.Bus, ) ([]data.RestoreCollection, error) { - paths := []path.Path{} + paths := []path.RestorePaths{} for _, fn := range fileNames { for _, reason := range man.Reasons { @@ -326,7 +326,14 @@ func collectMetadata( With("metadata_file", fn, "category", reason.Category) } - paths = append(paths, p) + dir, err := p.Dir() + if err != nil { + return nil, clues. + Wrap(err, "building metadata collection path"). + With("metadata_file", fn, "category", reason.Category) + } + + paths = append(paths, path.RestorePaths{StoragePath: p, RestorePath: dir}) } } diff --git a/src/internal/operations/manifests_test.go b/src/internal/operations/manifests_test.go index aa481ade7..ccef6e248 100644 --- a/src/internal/operations/manifests_test.go +++ b/src/internal/operations/manifests_test.go @@ -140,7 +140,7 @@ func (suite *OperationsManifestsUnitSuite) TestCollectMetadata() { ps := make([]path.Path, 0, len(files)) for _, f := range files { - p, err := emailPath.Append(f, true) + p, err := emailPath.AppendItem(f) assert.NoError(t, err, clues.ToCore(err)) ps = append(ps, p) } @@ -163,7 +163,7 @@ func (suite *OperationsManifestsUnitSuite) TestCollectMetadata() { ps := make([]path.Path, 0, len(files)) for _, f := range files { - p, err := emailPath.Append(f, true) + p, err := emailPath.AppendItem(f) assert.NoError(t, err, clues.ToCore(err)) ps = append(ps, p) } @@ -191,10 +191,10 @@ func (suite *OperationsManifestsUnitSuite) TestCollectMetadata() { ps := make([]path.Path, 0, len(files)) for _, f := range files { - p, err := emailPath.Append(f, true) + p, err := emailPath.AppendItem(f) assert.NoError(t, err, clues.ToCore(err)) ps = append(ps, p) - p, err = contactPath.Append(f, true) + p, err = contactPath.AppendItem(f) assert.NoError(t, err, clues.ToCore(err)) ps = append(ps, p) } @@ -222,10 +222,10 @@ func (suite *OperationsManifestsUnitSuite) TestCollectMetadata() { ps := make([]path.Path, 0, len(files)) for _, f := range files { - p, err := emailPath.Append(f, true) + p, err := emailPath.AppendItem(f) assert.NoError(t, err, clues.ToCore(err)) ps = append(ps, p) - p, err = contactPath.Append(f, true) + p, err = contactPath.AppendItem(f) assert.NoError(t, err, clues.ToCore(err)) ps = append(ps, p) } diff --git a/src/internal/operations/operation_test.go b/src/internal/operations/operation_test.go index 27cf6185f..e95f942b5 100644 --- a/src/internal/operations/operation_test.go +++ b/src/internal/operations/operation_test.go @@ -25,7 +25,7 @@ func TestOperationSuite(t *testing.T) { func (suite *OperationSuite) TestNewOperation() { t := suite.T() - op := newOperation(control.Options{}, events.Bus{}, nil, nil) + op := newOperation(control.Defaults(), events.Bus{}, nil, nil) assert.Greater(t, op.CreatedAt, time.Time{}) } @@ -45,7 +45,7 @@ func (suite *OperationSuite) TestOperation_Validate() { } for _, test := range table { suite.Run(test.name, func() { - err := newOperation(control.Options{}, events.Bus{}, test.kw, test.sw).validate() + err := newOperation(control.Defaults(), events.Bus{}, test.kw, test.sw).validate() test.errCheck(suite.T(), err, clues.ToCore(err)) }) } diff --git a/src/internal/operations/pathtransformer/restore_path_transformer.go b/src/internal/operations/pathtransformer/restore_path_transformer.go new file mode 100644 index 000000000..8993328f3 --- /dev/null +++ b/src/internal/operations/pathtransformer/restore_path_transformer.go @@ -0,0 +1,181 @@ +package pathtransformer + +import ( + "context" + + "github.com/alcionai/clues" + + "github.com/alcionai/corso/src/internal/version" + "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/logger" + "github.com/alcionai/corso/src/pkg/path" +) + +func locationRef( + ent *details.Entry, + repoRef path.Path, + backupVersion int, +) (*path.Builder, error) { + loc := ent.LocationRef + + // At this backup version all data types should populate LocationRef. + if len(loc) > 0 || backupVersion >= version.OneDrive7LocationRef { + return path.Builder{}.SplitUnescapeAppend(loc) + } + + // We could get an empty LocationRef either because it wasn't populated or it + // was in the root of the data type. + elems := repoRef.Folders() + + if ent.OneDrive != nil || ent.SharePoint != nil { + dp, err := path.ToDrivePath(repoRef) + if err != nil { + return nil, clues.Wrap(err, "fallback for LocationRef") + } + + elems = append([]string{dp.Root}, dp.Folders...) + } + + return path.Builder{}.Append(elems...), nil +} + +func basicLocationPath(repoRef path.Path, locRef *path.Builder) (path.Path, error) { + if len(locRef.Elements()) == 0 { + res, err := path.ServicePrefix( + repoRef.Tenant(), + repoRef.ResourceOwner(), + repoRef.Service(), + repoRef.Category()) + if err != nil { + return nil, clues.Wrap(err, "getting prefix for empty location") + } + + return res, nil + } + + return locRef.ToDataLayerPath( + repoRef.Tenant(), + repoRef.ResourceOwner(), + repoRef.Service(), + repoRef.Category(), + false) +} + +func drivePathMerge( + ent *details.Entry, + repoRef path.Path, + locRef *path.Builder, +) (path.Path, error) { + // Try getting the drive ID from the item. Not all details versions had it + // though. + var driveID string + + if ent.SharePoint != nil { + driveID = ent.SharePoint.DriveID + } else if ent.OneDrive != nil { + driveID = ent.OneDrive.DriveID + } + + // Fallback to trying to get from RepoRef. + if len(driveID) == 0 { + odp, err := path.ToDrivePath(repoRef) + if err != nil { + return nil, clues.Wrap(err, "fallback getting DriveID") + } + + driveID = odp.DriveID + } + + return basicLocationPath( + repoRef, + path.BuildDriveLocation(driveID, locRef.Elements()...)) +} + +func makeRestorePathsForEntry( + ctx context.Context, + backupVersion int, + ent *details.Entry, +) (path.RestorePaths, error) { + res := path.RestorePaths{} + + repoRef, err := path.FromDataLayerPath(ent.RepoRef, true) + if err != nil { + err = clues.Wrap(err, "parsing RepoRef"). + WithClues(ctx). + With("repo_ref", clues.Hide(ent.RepoRef), "location_ref", clues.Hide(ent.LocationRef)) + + return res, err + } + + res.StoragePath = repoRef + ctx = clues.Add(ctx, "repo_ref", repoRef) + + // Get the LocationRef so we can munge it onto our path. + locRef, err := locationRef(ent, repoRef, backupVersion) + if err != nil { + err = clues.Wrap(err, "parsing LocationRef after reduction"). + WithClues(ctx). + With("location_ref", clues.Hide(ent.LocationRef)) + + return res, err + } + + ctx = clues.Add(ctx, "location_ref", locRef) + + // Now figure out what type of ent it is and munge the path accordingly. + // Eventually we're going to need munging for: + // * Exchange Calendars (different folder handling) + // * Exchange Email/Contacts + // * OneDrive/SharePoint (needs drive information) + switch true { + case ent.Exchange != nil: + // TODO(ashmrtn): Eventually make Events have it's own function to handle + // setting the restore destination properly. + res.RestorePath, err = basicLocationPath(repoRef, locRef) + case ent.OneDrive != nil || + (ent.SharePoint != nil && ent.SharePoint.ItemType == details.SharePointLibrary) || + (ent.SharePoint != nil && ent.SharePoint.ItemType == details.OneDriveItem): + res.RestorePath, err = drivePathMerge(ent, repoRef, locRef) + default: + return res, clues.New("unknown entry type").WithClues(ctx) + } + + if err != nil { + return res, clues.Wrap(err, "generating RestorePath").WithClues(ctx) + } + + return res, nil +} + +// GetPaths takes a set of filtered details entries and returns a set of +// RestorePaths for the entries. +func GetPaths( + ctx context.Context, + backupVersion int, + items []*details.Entry, + errs *fault.Bus, +) ([]path.RestorePaths, error) { + var ( + paths = make([]path.RestorePaths, len(items)) + el = errs.Local() + ) + + for i, ent := range items { + if el.Failure() != nil { + break + } + + restorePaths, err := makeRestorePathsForEntry(ctx, backupVersion, ent) + if err != nil { + el.AddRecoverable(clues.Wrap(err, "getting restore paths")) + continue + } + + paths[i] = restorePaths + } + + logger.Ctx(ctx).Infof("found %d details entries to restore", len(paths)) + + return paths, el.Failure() +} diff --git a/src/internal/operations/pathtransformer/restore_path_transformer_test.go b/src/internal/operations/pathtransformer/restore_path_transformer_test.go new file mode 100644 index 000000000..57381c3cf --- /dev/null +++ b/src/internal/operations/pathtransformer/restore_path_transformer_test.go @@ -0,0 +1,340 @@ +package pathtransformer_test + +import ( + "testing" + + "github.com/alcionai/clues" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/operations/pathtransformer" + "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/internal/version" + "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/backup/details/testdata" + "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/path" +) + +type RestorePathTransformerUnitSuite struct { + tester.Suite +} + +func TestRestorePathTransformerUnitSuite(t *testing.T) { + suite.Run(t, &RestorePathTransformerUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *RestorePathTransformerUnitSuite) TestGetPaths() { + type expectPaths struct { + storage string + restore string + isRestorePrefix bool + } + + toRestore := func( + repoRef path.Path, + unescapedFolders ...string, + ) string { + return path.Builder{}. + Append( + repoRef.Tenant(), + repoRef.Service().String(), + repoRef.ResourceOwner(), + repoRef.Category().String()). + Append(unescapedFolders...). + String() + } + + var ( + driveID = "some-drive-id" + extraItemName = "some-item" + SharePointRootItemPath = testdata.SharePointRootPath.MustAppend(extraItemName, true) + ) + + table := []struct { + name string + backupVersion int + input []*details.Entry + expectErr assert.ErrorAssertionFunc + expected []expectPaths + }{ + { + name: "SharePoint List Errors", + // No version bump for the change so we always have to check for this. + backupVersion: version.All8MigrateUserPNToID, + input: []*details.Entry{ + { + RepoRef: SharePointRootItemPath.RR.String(), + LocationRef: SharePointRootItemPath.Loc.String(), + ItemInfo: details.ItemInfo{ + SharePoint: &details.SharePointInfo{ + ItemType: details.SharePointList, + }, + }, + }, + }, + expectErr: assert.Error, + }, + { + name: "SharePoint Page Errors", + // No version bump for the change so we always have to check for this. + backupVersion: version.All8MigrateUserPNToID, + input: []*details.Entry{ + { + RepoRef: SharePointRootItemPath.RR.String(), + LocationRef: SharePointRootItemPath.Loc.String(), + ItemInfo: details.ItemInfo{ + SharePoint: &details.SharePointInfo{ + ItemType: details.SharePointPage, + }, + }, + }, + }, + expectErr: assert.Error, + }, + { + name: "SharePoint old format, item in root", + // No version bump for the change so we always have to check for this. + backupVersion: version.All8MigrateUserPNToID, + input: []*details.Entry{ + { + RepoRef: SharePointRootItemPath.RR.String(), + LocationRef: SharePointRootItemPath.Loc.String(), + ItemInfo: details.ItemInfo{ + SharePoint: &details.SharePointInfo{ + ItemType: details.OneDriveItem, + DriveID: driveID, + }, + }, + }, + }, + expectErr: assert.NoError, + expected: []expectPaths{ + { + storage: SharePointRootItemPath.RR.String(), + restore: toRestore( + SharePointRootItemPath.RR, + append( + []string{"drives", driveID}, + SharePointRootItemPath.Loc.Elements()...)...), + }, + }, + }, + { + name: "SharePoint, no LocationRef, no DriveID, item in root", + backupVersion: version.OneDrive6NameInMeta, + input: []*details.Entry{ + { + RepoRef: SharePointRootItemPath.RR.String(), + ItemInfo: details.ItemInfo{ + SharePoint: &details.SharePointInfo{ + ItemType: details.SharePointLibrary, + }, + }, + }, + }, + expectErr: assert.NoError, + expected: []expectPaths{ + { + storage: SharePointRootItemPath.RR.String(), + restore: toRestore( + SharePointRootItemPath.RR, + append( + []string{"drives"}, + // testdata path has '.d' on the drives folder we need to remove. + SharePointRootItemPath.RR.Folders()[1:]...)...), + }, + }, + }, + { + name: "OneDrive, nested item", + backupVersion: version.All8MigrateUserPNToID, + input: []*details.Entry{ + { + RepoRef: testdata.OneDriveItemPath2.RR.String(), + LocationRef: testdata.OneDriveItemPath2.Loc.String(), + ItemInfo: details.ItemInfo{ + OneDrive: &details.OneDriveInfo{ + ItemType: details.OneDriveItem, + DriveID: driveID, + }, + }, + }, + }, + expectErr: assert.NoError, + expected: []expectPaths{ + { + storage: testdata.OneDriveItemPath2.RR.String(), + restore: toRestore( + testdata.OneDriveItemPath2.RR, + append( + []string{"drives", driveID}, + testdata.OneDriveItemPath2.Loc.Elements()...)...), + }, + }, + }, + { + name: "Exchange Email, extra / in path", + backupVersion: version.All8MigrateUserPNToID, + input: []*details.Entry{ + { + RepoRef: testdata.ExchangeEmailItemPath3.RR.String(), + LocationRef: testdata.ExchangeEmailItemPath3.Loc.String(), + ItemInfo: details.ItemInfo{ + Exchange: &details.ExchangeInfo{ + ItemType: details.ExchangeMail, + }, + }, + }, + }, + expectErr: assert.NoError, + expected: []expectPaths{ + { + storage: testdata.ExchangeEmailItemPath3.RR.String(), + restore: toRestore( + testdata.ExchangeEmailItemPath3.RR, + testdata.ExchangeEmailItemPath3.Loc.Elements()...), + }, + }, + }, + { + name: "Exchange Email, no LocationRef, extra / in path", + backupVersion: version.OneDrive7LocationRef, + input: []*details.Entry{ + { + RepoRef: testdata.ExchangeEmailItemPath3.RR.String(), + LocationRef: testdata.ExchangeEmailItemPath3.Loc.String(), + ItemInfo: details.ItemInfo{ + Exchange: &details.ExchangeInfo{ + ItemType: details.ExchangeMail, + }, + }, + }, + }, + expectErr: assert.NoError, + expected: []expectPaths{ + { + storage: testdata.ExchangeEmailItemPath3.RR.String(), + restore: toRestore( + testdata.ExchangeEmailItemPath3.RR, + testdata.ExchangeEmailItemPath3.Loc.Elements()...), + }, + }, + }, + { + name: "Exchange Contact", + backupVersion: version.All8MigrateUserPNToID, + input: []*details.Entry{ + { + RepoRef: testdata.ExchangeContactsItemPath1.RR.String(), + LocationRef: testdata.ExchangeContactsItemPath1.Loc.String(), + ItemInfo: details.ItemInfo{ + Exchange: &details.ExchangeInfo{ + ItemType: details.ExchangeContact, + }, + }, + }, + }, + expectErr: assert.NoError, + expected: []expectPaths{ + { + storage: testdata.ExchangeContactsItemPath1.RR.String(), + restore: toRestore( + testdata.ExchangeContactsItemPath1.RR, + testdata.ExchangeContactsItemPath1.Loc.Elements()...), + }, + }, + }, + { + name: "Exchange Contact, root dir", + backupVersion: version.All8MigrateUserPNToID, + input: []*details.Entry{ + { + RepoRef: testdata.ExchangeContactsItemPath1.RR.String(), + ItemInfo: details.ItemInfo{ + Exchange: &details.ExchangeInfo{ + ItemType: details.ExchangeContact, + }, + }, + }, + }, + expectErr: assert.NoError, + expected: []expectPaths{ + { + storage: testdata.ExchangeContactsItemPath1.RR.String(), + restore: toRestore(testdata.ExchangeContactsItemPath1.RR, "tmp"), + isRestorePrefix: true, + }, + }, + }, + { + name: "Exchange Event", + backupVersion: version.All8MigrateUserPNToID, + input: []*details.Entry{ + { + RepoRef: testdata.ExchangeEmailItemPath3.RR.String(), + LocationRef: testdata.ExchangeEmailItemPath3.Loc.String(), + ItemInfo: details.ItemInfo{ + Exchange: &details.ExchangeInfo{ + ItemType: details.ExchangeMail, + }, + }, + }, + }, + expectErr: assert.NoError, + expected: []expectPaths{ + { + storage: testdata.ExchangeEmailItemPath3.RR.String(), + restore: toRestore( + testdata.ExchangeEmailItemPath3.RR, + testdata.ExchangeEmailItemPath3.Loc.Elements()...), + }, + }, + }, + } + + for _, test := range table { + suite.Run(test.name, func() { + ctx, flush := tester.NewContext() + defer flush() + + t := suite.T() + + paths, err := pathtransformer.GetPaths( + ctx, + test.backupVersion, + test.input, + fault.New(true)) + test.expectErr(t, err, clues.ToCore(err)) + + if err != nil { + return + } + + expected := make([]path.RestorePaths, 0, len(test.expected)) + + for _, e := range test.expected { + tmp := path.RestorePaths{} + p, err := path.FromDataLayerPath(e.storage, true) + require.NoError(t, err, "parsing expected storage path", clues.ToCore(err)) + + tmp.StoragePath = p + + p, err = path.FromDataLayerPath(e.restore, false) + require.NoError(t, err, "parsing expected restore path", clues.ToCore(err)) + + if e.isRestorePrefix { + p, err = p.Dir() + require.NoError(t, err, "getting service prefix", clues.ToCore(err)) + } + + tmp.RestorePath = p + + expected = append(expected, tmp) + } + + assert.ElementsMatch(t, expected, paths) + }) + } +} diff --git a/src/internal/operations/restore.go b/src/internal/operations/restore.go index f11b3e56b..55103cec7 100644 --- a/src/internal/operations/restore.go +++ b/src/internal/operations/restore.go @@ -8,8 +8,8 @@ import ( "github.com/alcionai/clues" "github.com/google/uuid" - "github.com/alcionai/corso/src/internal/common" "github.com/alcionai/corso/src/internal/common/crash" + "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/internal/connector/onedrive" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/diagnostics" @@ -18,6 +18,7 @@ import ( "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/internal/observe" "github.com/alcionai/corso/src/internal/operations/inject" + "github.com/alcionai/corso/src/internal/operations/pathtransformer" "github.com/alcionai/corso/src/internal/stats" "github.com/alcionai/corso/src/internal/streamstore" "github.com/alcionai/corso/src/pkg/account" @@ -104,7 +105,7 @@ type restoreStats struct { // Run begins a synchronous restore operation. func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.Details, err error) { defer func() { - if crErr := crash.Recovery(ctx, recover()); crErr != nil { + if crErr := crash.Recovery(ctx, recover(), "restore"); crErr != nil { err = crErr } }() @@ -139,6 +140,25 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De "service", op.Selectors.Service, "destination_container", clues.Hide(op.Destination.ContainerName)) + defer func() { + op.bus.Event( + ctx, + events.RestoreEnd, + map[string]any{ + events.BackupID: op.BackupID, + events.DataRetrieved: op.Results.BytesRead, + events.Duration: op.Results.CompletedAt.Sub(op.Results.StartedAt), + events.EndTime: dttm.Format(op.Results.CompletedAt), + events.ItemsRead: op.Results.ItemsRead, + events.ItemsWritten: op.Results.ItemsWritten, + events.Resources: op.Results.ResourceOwners, + events.RestoreID: opStats.restoreID, + events.Service: op.Selectors.Service.String(), + events.StartTime: dttm.Format(op.Results.StartedAt), + events.Status: op.Status.String(), + }) + }() + // ----- // Execution // ----- @@ -146,9 +166,7 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De deets, err := op.do(ctx, &opStats, sstore, start) if err != nil { // No return here! We continue down to persistResults, even in case of failure. - logger.Ctx(ctx). - With("err", err). - Errorw("running restore", clues.InErr(err).Slice()...) + logger.CtxErr(ctx, err).Error("running restore") op.Errors.Fail(clues.Wrap(err, "running restore")) } @@ -212,7 +230,7 @@ func (op *RestoreOperation) do( }) observe.Message(ctx, fmt.Sprintf("Discovered %d items in backup %s to restore", len(paths), op.BackupID)) - logger.Ctx(ctx).With("selectors", op.Selectors).Info("restoring selection") + logger.Ctx(ctx).With("control_options", op.Options, "selectors", op.Selectors).Info("restoring selection") kopiaComplete, closer := observe.MessageWithCompletion(ctx, "Enumerating items in repository") defer closer() @@ -282,24 +300,6 @@ func (op *RestoreOperation) persistResults( op.Results.ItemsWritten = opStats.gc.Successes - op.bus.Event( - ctx, - events.RestoreEnd, - map[string]any{ - events.BackupID: op.BackupID, - events.DataRetrieved: op.Results.BytesRead, - events.Duration: op.Results.CompletedAt.Sub(op.Results.StartedAt), - events.EndTime: common.FormatTime(op.Results.CompletedAt), - events.ItemsRead: op.Results.ItemsRead, - events.ItemsWritten: op.Results.ItemsWritten, - events.Resources: op.Results.ResourceOwners, - events.RestoreID: opStats.restoreID, - events.Service: op.Selectors.Service.String(), - events.StartTime: common.FormatTime(op.Results.StartedAt), - events.Status: op.Status.String(), - }, - ) - return op.Errors.Failure() } @@ -349,46 +349,23 @@ func formatDetailsForRestoration( sel selectors.Selector, deets *details.Details, errs *fault.Bus, -) ([]path.Path, error) { +) ([]path.RestorePaths, error) { fds, err := sel.Reduce(ctx, deets, errs) if err != nil { return nil, err } - var ( - fdsPaths = fds.Paths() - paths = make([]path.Path, len(fdsPaths)) - shortRefs = make([]string, len(fdsPaths)) - el = errs.Local() - ) - - for i := range fdsPaths { - if el.Failure() != nil { - break - } - - p, err := path.FromDataLayerPath(fdsPaths[i], true) - if err != nil { - el.AddRecoverable(clues. - Wrap(err, "parsing details path after reduction"). - WithMap(clues.In(ctx)). - With("path", fdsPaths[i])) - - continue - } - - paths[i] = p - shortRefs[i] = p.ShortRef() + paths, err := pathtransformer.GetPaths(ctx, backupVersion, fds.Items(), errs) + if err != nil { + return nil, clues.Wrap(err, "getting restore paths") } - if sel.Service == selectors.ServiceOneDrive { + if sel.Service == selectors.ServiceOneDrive || sel.Service == selectors.ServiceSharePoint { paths, err = onedrive.AugmentRestorePaths(backupVersion, paths) if err != nil { return nil, clues.Wrap(err, "augmenting paths") } } - logger.Ctx(ctx).With("short_refs", shortRefs).Infof("found %d details entries to restore", len(shortRefs)) - - return paths, el.Failure() + return paths, nil } diff --git a/src/internal/operations/restore_test.go b/src/internal/operations/restore_test.go index 9b00e122e..e35d47ffc 100644 --- a/src/internal/operations/restore_test.go +++ b/src/internal/operations/restore_test.go @@ -10,13 +10,13 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" + inMock "github.com/alcionai/corso/src/internal/common/idname/mock" "github.com/alcionai/corso/src/internal/connector" "github.com/alcionai/corso/src/internal/connector/exchange" exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/mock" - "github.com/alcionai/corso/src/internal/connector/onedrive/api" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/events" evmock "github.com/alcionai/corso/src/internal/events/mock" @@ -27,8 +27,9 @@ import ( "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/control" - "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/control/repository" "github.com/alcionai/corso/src/pkg/selectors" + "github.com/alcionai/corso/src/pkg/services/m365/api" "github.com/alcionai/corso/src/pkg/store" ) @@ -54,7 +55,7 @@ func (suite *RestoreOpSuite) TestRestoreOperation_PersistResults() { gc = &mock.GraphConnector{} acct = account.Account{} now = time.Now() - dest = tester.DefaultTestRestoreDestination() + dest = tester.DefaultTestRestoreDestination("") ) table := []struct { @@ -107,7 +108,7 @@ func (suite *RestoreOpSuite) TestRestoreOperation_PersistResults() { op, err := NewRestoreOperation( ctx, - control.Options{}, + control.Defaults(), kw, sw, gc, @@ -175,7 +176,7 @@ func (suite *RestoreOpIntegrationSuite) SetupSuite() { suite.acct = tester.NewM365Account(t) - err := k.Initialize(ctx) + err := k.Initialize(ctx, repository.Options{}) require.NoError(t, err, clues.ToCore(err)) suite.kopiaCloser = func(ctx context.Context) { @@ -214,15 +215,17 @@ func (suite *RestoreOpIntegrationSuite) TearDownSuite() { } func (suite *RestoreOpIntegrationSuite) TestNewRestoreOperation() { - kw := &kopia.Wrapper{} - sw := &store.Wrapper{} - gc := &mock.GraphConnector{} - acct := tester.NewM365Account(suite.T()) - dest := tester.DefaultTestRestoreDestination() + var ( + kw = &kopia.Wrapper{} + sw = &store.Wrapper{} + gc = &mock.GraphConnector{} + acct = tester.NewM365Account(suite.T()) + dest = tester.DefaultTestRestoreDestination("") + opts = control.Defaults() + ) table := []struct { name string - opts control.Options kw *kopia.Wrapper sw *store.Wrapper rc inject.RestoreConsumer @@ -230,10 +233,10 @@ func (suite *RestoreOpIntegrationSuite) TestNewRestoreOperation() { targets []string errCheck assert.ErrorAssertionFunc }{ - {"good", control.Options{}, kw, sw, gc, acct, nil, assert.NoError}, - {"missing kopia", control.Options{}, nil, sw, gc, acct, nil, assert.Error}, - {"missing modelstore", control.Options{}, kw, nil, gc, acct, nil, assert.Error}, - {"missing restore consumer", control.Options{}, kw, sw, nil, acct, nil, assert.Error}, + {"good", kw, sw, gc, acct, nil, assert.NoError}, + {"missing kopia", nil, sw, gc, acct, nil, assert.Error}, + {"missing modelstore", kw, nil, gc, acct, nil, assert.Error}, + {"missing restore consumer", kw, sw, nil, acct, nil, assert.Error}, } for _, test := range table { suite.Run(test.name, func() { @@ -242,7 +245,7 @@ func (suite *RestoreOpIntegrationSuite) TestNewRestoreOperation() { _, err := NewRestoreOperation( ctx, - test.opts, + opts, test.kw, test.sw, test.rc, @@ -268,37 +271,26 @@ func setupExchangeBackup( var ( users = []string{owner} - bsel = selectors.NewExchangeBackup(users) + esel = selectors.NewExchangeBackup(users) ) - gc, err := connector.NewGraphConnector( - ctx, - acct, - connector.Users, - fault.New(true)) - require.NoError(t, err, clues.ToCore(err)) + esel.DiscreteOwner = owner + esel.Include( + esel.MailFolders([]string{exchange.DefaultMailFolder}, selectors.PrefixMatch()), + esel.ContactFolders([]string{exchange.DefaultContactFolder}, selectors.PrefixMatch()), + esel.EventCalendars([]string{exchange.DefaultCalendar}, selectors.PrefixMatch())) - id, name, err := gc.PopulateOwnerIDAndNamesFrom(ctx, owner, nil) - require.NoError(t, err, clues.ToCore(err)) - - bsel.DiscreteOwner = owner - bsel.Include( - bsel.MailFolders([]string{exchange.DefaultMailFolder}, selectors.PrefixMatch()), - bsel.ContactFolders([]string{exchange.DefaultContactFolder}, selectors.PrefixMatch()), - bsel.EventCalendars([]string{exchange.DefaultCalendar}, selectors.PrefixMatch()), - ) - - bsel.SetDiscreteOwnerIDName(id, name) + gc, sel := GCWithSelector(t, ctx, acct, connector.Users, esel.Selector, nil, nil) bo, err := NewBackupOperation( ctx, - control.Options{}, + control.Defaults(), kw, sw, gc, acct, - bsel.Selector, - bsel.Selector, + sel, + inMock.NewProvider(owner, owner), evmock.NewBus()) require.NoError(t, err, clues.ToCore(err)) @@ -329,37 +321,27 @@ func setupSharePointBackup( var ( sites = []string{owner} - spsel = selectors.NewSharePointBackup(sites) + ssel = selectors.NewSharePointBackup(sites) ) - gc, err := connector.NewGraphConnector( - ctx, - acct, - connector.Sites, - fault.New(true)) - require.NoError(t, err, clues.ToCore(err)) - - id, name, err := gc.PopulateOwnerIDAndNamesFrom(ctx, owner, nil) - require.NoError(t, err, clues.ToCore(err)) - - spsel.DiscreteOwner = owner // assume a folder name "test" exists in the drive. // this is brittle, and requires us to backfill anytime // the site under test changes, but also prevents explosive // growth from re-backup/restore of restored files. - spsel.Include(spsel.LibraryFolders([]string{"test"}, selectors.PrefixMatch())) + ssel.Include(ssel.LibraryFolders([]string{"test"}, selectors.PrefixMatch())) + ssel.DiscreteOwner = owner - spsel.SetDiscreteOwnerIDName(id, name) + gc, sel := GCWithSelector(t, ctx, acct, connector.Sites, ssel.Selector, nil, nil) bo, err := NewBackupOperation( ctx, - control.Options{}, + control.Defaults(), kw, sw, gc, acct, - spsel.Selector, - spsel.Selector, + sel, + inMock.NewProvider(owner, owner), evmock.NewBus()) require.NoError(t, err, clues.ToCore(err)) @@ -410,7 +392,7 @@ func (suite *RestoreOpIntegrationSuite) TestRestore_Run() { { name: "Exchange_Restore", owner: tester.M365UserID(suite.T()), - dest: tester.DefaultTestRestoreDestination(), + dest: tester.DefaultTestRestoreDestination(""), getSelector: func(t *testing.T, owners []string) selectors.Selector { rsel := selectors.NewExchangeRestore(owners) rsel.Include(rsel.AllData()) @@ -422,7 +404,7 @@ func (suite *RestoreOpIntegrationSuite) TestRestore_Run() { { name: "SharePoint_Restore", owner: tester.M365SiteID(suite.T()), - dest: control.DefaultRestoreDestination(common.SimpleDateTimeOneDrive), + dest: control.DefaultRestoreDestination(dttm.SafeForTesting), getSelector: func(t *testing.T, owners []string) selectors.Selector { rsel := selectors.NewSharePointRestore(owners) rsel.Include(rsel.AllData()) @@ -476,13 +458,13 @@ func (suite *RestoreOpIntegrationSuite) TestRestore_Run() { } } -func (suite *RestoreOpIntegrationSuite) TestRestore_Run_errorNoResults() { +func (suite *RestoreOpIntegrationSuite) TestRestore_Run_errorNoBackup() { ctx, flush := tester.NewContext() defer flush() var ( t = suite.T() - dest = tester.DefaultTestRestoreDestination() + dest = tester.DefaultTestRestoreDestination("") mb = evmock.NewBus() ) @@ -492,13 +474,12 @@ func (suite *RestoreOpIntegrationSuite) TestRestore_Run_errorNoResults() { gc, err := connector.NewGraphConnector( ctx, suite.acct, - connector.Users, - fault.New(true)) + connector.Users) require.NoError(t, err, clues.ToCore(err)) ro, err := NewRestoreOperation( ctx, - control.Options{}, + control.Defaults(), suite.kw, suite.sw, gc, @@ -514,6 +495,7 @@ func (suite *RestoreOpIntegrationSuite) TestRestore_Run_errorNoResults() { require.Nil(t, ds, "restoreOp.Run() should not produce details") assert.Zero(t, ro.Results.ResourceOwners, "resource owners") assert.Zero(t, ro.Results.BytesRead, "bytes read") - assert.Zero(t, mb.TimesCalled[events.RestoreStart], "restore-start events") - assert.Zero(t, mb.TimesCalled[events.RestoreEnd], "restore-end events") + // no restore start, because we'd need to find the backup first. + assert.Equal(t, 0, mb.TimesCalled[events.RestoreStart], "restore-start events") + assert.Equal(t, 1, mb.TimesCalled[events.RestoreEnd], "restore-end events") } diff --git a/src/internal/streamstore/collectables_test.go b/src/internal/streamstore/collectables_test.go index 8c146e3a6..8cd45aea9 100644 --- a/src/internal/streamstore/collectables_test.go +++ b/src/internal/streamstore/collectables_test.go @@ -12,6 +12,7 @@ import ( "github.com/alcionai/corso/src/internal/kopia" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/control/repository" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" ) @@ -41,7 +42,7 @@ func (suite *StreamStoreIntgSuite) SetupSubTest() { st := tester.NewPrefixedS3Storage(t) k := kopia.NewConn(st) - require.NoError(t, k.Initialize(ctx)) + require.NoError(t, k.Initialize(ctx, repository.Options{})) suite.kcloser = func() { k.Close(ctx) } @@ -107,8 +108,8 @@ func (suite *StreamStoreIntgSuite) TestStreamer() { bus := fault.New(false) bus.Fail(clues.New("foo")) bus.AddRecoverable(clues.New("bar")) - bus.AddRecoverable(fault.FileErr(clues.New("file"), "file-id", "file-name", map[string]any{"foo": "bar"})) - bus.AddSkip(fault.FileSkip(fault.SkipMalware, "file-id", "file-name", map[string]any{"foo": "bar"})) + bus.AddRecoverable(fault.FileErr(clues.New("file"), "ns", "file-id", "file-name", map[string]any{"foo": "bar"})) + bus.AddSkip(fault.FileSkip(fault.SkipMalware, "ns", "file-id", "file-name", map[string]any{"foo": "bar"})) fe := bus.Errors() return fe @@ -136,8 +137,8 @@ func (suite *StreamStoreIntgSuite) TestStreamer() { bus := fault.New(false) bus.Fail(clues.New("foo")) bus.AddRecoverable(clues.New("bar")) - bus.AddRecoverable(fault.FileErr(clues.New("file"), "file-id", "file-name", map[string]any{"foo": "bar"})) - bus.AddSkip(fault.FileSkip(fault.SkipMalware, "file-id", "file-name", map[string]any{"foo": "bar"})) + bus.AddRecoverable(fault.FileErr(clues.New("file"), "ns", "file-id", "file-name", map[string]any{"foo": "bar"})) + bus.AddSkip(fault.FileSkip(fault.SkipMalware, "ns", "file-id", "file-name", map[string]any{"foo": "bar"})) fe := bus.Errors() return fe diff --git a/src/internal/streamstore/streamstore.go b/src/internal/streamstore/streamstore.go index 57fe5b8f1..146f0d1c7 100644 --- a/src/internal/streamstore/streamstore.go +++ b/src/internal/streamstore/streamstore.go @@ -9,6 +9,7 @@ import ( "github.com/alcionai/clues" + "github.com/alcionai/corso/src/internal/common/prefixmatcher" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/kopia" "github.com/alcionai/corso/src/internal/operations/inject" @@ -232,7 +233,7 @@ func write( ctx, nil, dbcs, - nil, + prefixmatcher.NopReader[map[string]struct{}](), nil, false, errs) @@ -261,12 +262,22 @@ func read( return clues.Stack(err).WithClues(ctx) } + pd, err := p.Dir() + if err != nil { + return clues.Stack(err).WithClues(ctx) + } + ctx = clues.Add(ctx, "snapshot_id", snapshotID) cs, err := rer.ProduceRestoreCollections( ctx, snapshotID, - []path.Path{p}, + []path.RestorePaths{ + { + StoragePath: p, + RestorePath: pd, + }, + }, &stats.ByteCounter{}, errs) if err != nil { diff --git a/src/internal/tester/cli.go b/src/internal/tester/cli.go index cee5a8f0f..925233d2e 100644 --- a/src/internal/tester/cli.go +++ b/src/internal/tester/cli.go @@ -9,7 +9,7 @@ import ( "github.com/google/uuid" "github.com/spf13/cobra" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/pkg/logger" ) @@ -17,7 +17,7 @@ import ( // the root command for integration testing on the CLI func StubRootCmd(args ...string) *cobra.Command { id := uuid.NewString() - now := common.FormatTime(time.Now()) + now := dttm.Format(time.Now()) cmdArg := "testing-corso" c := &cobra.Command{ Use: cmdArg, @@ -34,21 +34,32 @@ func StubRootCmd(args ...string) *cobra.Command { } func NewContext() (context.Context, func()) { - level := logger.Info + level := logger.LLInfo + format := logger.LFText for _, a := range os.Args { if a == "-test.v=true" { - level = logger.Development + level = logger.LLDebug } } + ls := logger.Settings{ + Level: level, + Format: format, + } + //nolint:forbidigo - ctx, _ := logger.SeedLevel(context.Background(), level) + ctx, _ := logger.CtxOrSeed(context.Background(), ls) return ctx, func() { logger.Flush(ctx) } } func WithContext(ctx context.Context) (context.Context, func()) { - ctx, _ = logger.SeedLevel(ctx, logger.Development) + ls := logger.Settings{ + Level: logger.LLDebug, + Format: logger.LFText, + } + ctx, _ = logger.CtxOrSeed(ctx, ls) + return ctx, func() { logger.Flush(ctx) } } diff --git a/src/internal/tester/config.go b/src/internal/tester/config.go index 8a002fd2c..14a4f54d9 100644 --- a/src/internal/tester/config.go +++ b/src/internal/tester/config.go @@ -106,7 +106,7 @@ func readTestConfig() (map[string]string, error) { testEnv := map[string]string{} fallbackTo(testEnv, TestCfgStorageProvider, vpr.GetString(TestCfgStorageProvider)) fallbackTo(testEnv, TestCfgAccountProvider, vpr.GetString(TestCfgAccountProvider)) - fallbackTo(testEnv, TestCfgBucket, vpr.GetString(TestCfgBucket), "test-corso-repo-init") + fallbackTo(testEnv, TestCfgBucket, os.Getenv("S3_BUCKET"), vpr.GetString(TestCfgBucket), "test-corso-repo-init") fallbackTo(testEnv, TestCfgEndpoint, vpr.GetString(TestCfgEndpoint), "s3.amazonaws.com") fallbackTo(testEnv, TestCfgPrefix, vpr.GetString(TestCfgPrefix)) fallbackTo(testEnv, TestCfgAzureTenantID, os.Getenv(account.AzureTenantID), vpr.GetString(TestCfgAzureTenantID)) diff --git a/src/internal/tester/resource_owners.go b/src/internal/tester/resource_owners.go index c39b39151..fb8a75837 100644 --- a/src/internal/tester/resource_owners.go +++ b/src/internal/tester/resource_owners.go @@ -184,3 +184,16 @@ func M365SiteURL(t *testing.T) string { return strings.ToLower(cfg[TestCfgSiteURL]) } + +// GetM365SiteID returns a siteID string representing the m365SitteID described +// by either the env var CORSO_M365_TEST_SITE_ID, the corso_test.toml config +// file or the default value (in that order of priority). The default is a +// last-attempt fallback that will only work on alcion's testing org. +func GetM365SiteID(ctx context.Context) string { + cfg, err := readTestConfig() + if err != nil { + logger.Ctx(ctx).Error(err, "retrieving m365 user id from test configuration") + } + + return strings.ToLower(cfg[TestCfgSiteID]) +} diff --git a/src/internal/tester/restore_destination.go b/src/internal/tester/restore_destination.go index e6224b8cc..af247258d 100644 --- a/src/internal/tester/restore_destination.go +++ b/src/internal/tester/restore_destination.go @@ -1,11 +1,26 @@ package tester import ( - "github.com/alcionai/corso/src/internal/common" + "strings" + + "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/pkg/control" ) -func DefaultTestRestoreDestination() control.RestoreDestination { - // Use microsecond granularity to help reduce collisions. - return control.DefaultRestoreDestination(common.SimpleTimeTesting) +const RestoreFolderPrefix = "Corso_Test" + +func DefaultTestRestoreDestination(namespace string) control.RestoreDestination { + var ( + dest = control.DefaultRestoreDestination(dttm.SafeForTesting) + sft = dttm.FormatNow(dttm.SafeForTesting) + ) + + parts := []string{RestoreFolderPrefix, namespace, sft} + if len(namespace) == 0 { + parts = []string{RestoreFolderPrefix, sft} + } + + dest.ContainerName = strings.Join(parts, "_") + + return dest } diff --git a/src/internal/version/backup.go b/src/internal/version/backup.go index 29e697bd8..7dbcc6718 100644 --- a/src/internal/version/backup.go +++ b/src/internal/version/backup.go @@ -1,6 +1,6 @@ package version -const Backup = 7 +const Backup = 8 // Various labels to refer to important version changes. // Labels don't need 1:1 service:version representation. Add a new @@ -9,6 +9,9 @@ const Backup = 7 // Labels should state their application, the backup version number, // and the colloquial purpose of the label. const ( + // NoBackup should be used when we cannot find, or do not supply, prior backup metadata. + NoBackup = -1 + // OneDrive1DataAndMetaFiles is the corso backup format version // in which we split from storing just the data to storing both // the data and metadata in two files. @@ -39,4 +42,13 @@ const ( // OneDriveXLocationRef provides LocationRef information for Exchange, // OneDrive, and SharePoint libraries. OneDrive7LocationRef = 7 + + // All8MigrateUserPNToID marks when we migrated repo refs from the user's + // PrincipalName to their ID for stability. + All8MigrateUserPNToID = 8 ) + +// IsNoBackup returns true if the version implies that no prior backup exists. +func IsNoBackup(version int) bool { + return version <= NoBackup +} diff --git a/src/pkg/backup/backup.go b/src/pkg/backup/backup.go index d9b52c9d3..352015203 100644 --- a/src/pkg/backup/backup.go +++ b/src/pkg/backup/backup.go @@ -3,14 +3,16 @@ package backup import ( "context" "fmt" + "strconv" "strings" "time" + "github.com/dustin/go-humanize" + "github.com/alcionai/corso/src/cli/print" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/internal/stats" - "github.com/alcionai/corso/src/internal/version" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/selectors" ) @@ -64,6 +66,7 @@ var _ print.Printable = &Backup{} func New( snapshotID, streamStoreID, status string, + version int, id model.StableID, selector selectors.Selector, ownerID, ownerName string, @@ -113,7 +116,7 @@ func New( ResourceOwnerID: ownerID, ResourceOwnerName: ownerName, - Version: version.Backup, + Version: version, SnapshotID: snapshotID, StreamStoreID: streamStoreID, @@ -141,6 +144,8 @@ func New( // CLI Output // -------------------------------------------------------------------------------- +// ----- print backups + // Print writes the Backup to StdOut, in the format requested by the caller. func (b Backup) Print(ctx context.Context) { print.Item(ctx, b) @@ -162,36 +167,36 @@ func PrintAll(ctx context.Context, bs []*Backup) { } type Printable struct { - ID model.StableID `json:"id"` - ErrorCount int `json:"errorCount"` - StartedAt time.Time `json:"started at"` - Status string `json:"status"` - Version string `json:"version"` - BytesRead int64 `json:"bytesRead"` - BytesUploaded int64 `json:"bytesUploaded"` - Owner string `json:"owner"` + ID model.StableID `json:"id"` + Status string `json:"status"` + Version string `json:"version"` + Owner string `json:"owner"` + Stats backupStats `json:"stats"` +} + +// ToPrintable reduces the Backup to its minimally printable details. +func (b Backup) ToPrintable() Printable { + return Printable{ + ID: b.ID, + Status: b.Status, + Version: "0", + Owner: b.Selector.DiscreteOwner, + Stats: b.toStats(), + } } // MinimumPrintable reduces the Backup to its minimally printable details. func (b Backup) MinimumPrintable() any { - return Printable{ - ID: b.ID, - ErrorCount: b.ErrorCount, - StartedAt: b.StartedAt, - Status: b.Status, - Version: "0", - BytesRead: b.BytesRead, - BytesUploaded: b.BytesUploaded, - Owner: b.Selector.DiscreteOwner, - } + return b.ToPrintable() } // Headers returns the human-readable names of properties in a Backup // for printing out to a terminal in a columnar display. func (b Backup) Headers() []string { return []string{ - "Started At", "ID", + "Started At", + "Duration", "Status", "Resource Owner", } @@ -255,10 +260,78 @@ func (b Backup) Values() []string { name = b.Selector.DiscreteOwner } + bs := b.toStats() + return []string{ - common.FormatTabularDisplayTime(b.StartedAt), string(b.ID), + dttm.FormatToTabularDisplay(b.StartedAt), + bs.EndedAt.Sub(bs.StartedAt).String(), status, name, } } + +// ----- print backup stats + +func (b Backup) toStats() backupStats { + return backupStats{ + ID: string(b.ID), + BytesRead: b.BytesRead, + BytesUploaded: b.BytesUploaded, + EndedAt: b.CompletedAt, + ErrorCount: b.ErrorCount, + ItemsRead: b.ItemsRead, + ItemsSkipped: b.TotalSkippedItems, + ItemsWritten: b.ItemsWritten, + StartedAt: b.StartedAt, + } +} + +// interface compliance checks +var _ print.Printable = &backupStats{} + +type backupStats struct { + ID string `json:"id"` + BytesRead int64 `json:"bytesRead"` + BytesUploaded int64 `json:"bytesUploaded"` + EndedAt time.Time `json:"endedAt"` + ErrorCount int `json:"errorCount"` + ItemsRead int `json:"itemsRead"` + ItemsSkipped int `json:"itemsSkipped"` + ItemsWritten int `json:"itemsWritten"` + StartedAt time.Time `json:"startedAt"` +} + +// Print writes the Backup to StdOut, in the format requested by the caller. +func (bs backupStats) Print(ctx context.Context) { + print.Item(ctx, bs) +} + +// MinimumPrintable reduces the Backup to its minimally printable details. +func (bs backupStats) MinimumPrintable() any { + return bs +} + +// Headers returns the human-readable names of properties in a Backup +// for printing out to a terminal in a columnar display. +func (bs backupStats) Headers() []string { + return []string{ + "ID", + "Bytes Uploaded", + "Items Uploaded", + "Items Skipped", + "Errors", + } +} + +// Values returns the values matching the Headers list for printing +// out to a terminal in a columnar display. +func (bs backupStats) Values() []string { + return []string{ + bs.ID, + humanize.Bytes(uint64(bs.BytesUploaded)), + strconv.Itoa(bs.ItemsWritten), + strconv.Itoa(bs.ItemsSkipped), + strconv.Itoa(bs.ErrorCount), + } +} diff --git a/src/pkg/backup/backup_test.go b/src/pkg/backup/backup_test.go index 91bde1a17..74ab35fe0 100644 --- a/src/pkg/backup/backup_test.go +++ b/src/pkg/backup/backup_test.go @@ -1,14 +1,16 @@ package backup_test import ( + "strconv" "testing" "time" + "github.com/dustin/go-humanize" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/internal/stats" "github.com/alcionai/corso/src/internal/tester" @@ -50,7 +52,7 @@ func stubBackup(t time.Time, ownerID, ownerName string) backup.Backup { }, StartAndEndTime: stats.StartAndEndTime{ StartedAt: t, - CompletedAt: t, + CompletedAt: t.Add(1 * time.Minute), }, SkippedCounts: stats.SkippedCounts{ TotalSkippedItems: 1, @@ -63,22 +65,27 @@ func (suite *BackupUnitSuite) TestBackup_HeadersValues() { var ( t = suite.T() now = time.Now() + later = now.Add(1 * time.Minute) b = stubBackup(now, "id", "name") expectHs = []string{ - "Started At", "ID", + "Started At", + "Duration", "Status", "Resource Owner", } - nowFmt = common.FormatTabularDisplayTime(now) + nowFmt = dttm.FormatToTabularDisplay(now) expectVs = []string{ - nowFmt, "id", + nowFmt, + "1m0s", "status (2 errors, 1 skipped: 1 malware)", "test", } ) + b.StartAndEndTime.CompletedAt = later + // single skipped malware hs := b.Headers() assert.Equal(t, expectHs, hs) @@ -182,7 +189,7 @@ func (suite *BackupUnitSuite) TestBackup_Values_statusVariations() { for _, test := range table { suite.Run(test.name, func() { result := test.bup.Values() - assert.Equal(suite.T(), test.expect, result[2], "status value") + assert.Equal(suite.T(), test.expect, result[3], "status value") }) } } @@ -197,10 +204,57 @@ func (suite *BackupUnitSuite) TestBackup_MinimumPrintable() { require.True(t, ok) assert.Equal(t, b.ID, result.ID, "id") - assert.Equal(t, 2, result.ErrorCount, "error count") - assert.Equal(t, now, result.StartedAt, "started at") + assert.Equal(t, 2, result.Stats.ErrorCount, "error count") + assert.Equal(t, now, result.Stats.StartedAt, "started at") assert.Equal(t, b.Status, result.Status, "status") - assert.Equal(t, b.BytesRead, result.BytesRead, "size") - assert.Equal(t, b.BytesUploaded, result.BytesUploaded, "stored size") + assert.Equal(t, b.BytesRead, result.Stats.BytesRead, "size") + assert.Equal(t, b.BytesUploaded, result.Stats.BytesUploaded, "stored size") assert.Equal(t, b.Selector.DiscreteOwner, result.Owner, "owner") } + +func (suite *BackupUnitSuite) TestStats() { + var ( + t = suite.T() + start = time.Now() + b = stubBackup(start, "owner", "ownername") + s = b.ToPrintable().Stats + ) + + assert.Equal(t, b.BytesRead, s.BytesRead, "bytes read") + assert.Equal(t, b.BytesUploaded, s.BytesUploaded, "bytes uploaded") + assert.Equal(t, b.CompletedAt, s.EndedAt, "completion time") + assert.Equal(t, b.ErrorCount, s.ErrorCount, "error count") + assert.Equal(t, b.ItemsRead, s.ItemsRead, "items read") + assert.Equal(t, b.TotalSkippedItems, s.ItemsSkipped, "items skipped") + assert.Equal(t, b.ItemsWritten, s.ItemsWritten, "items written") + assert.Equal(t, b.StartedAt, s.StartedAt, "started at") +} + +func (suite *BackupUnitSuite) TestStats_headersValues() { + var ( + t = suite.T() + start = time.Now() + b = stubBackup(start, "owner", "ownername") + s = b.ToPrintable().Stats + ) + + expectHeaders := []string{ + "ID", + "Bytes Uploaded", + "Items Uploaded", + "Items Skipped", + "Errors", + } + + assert.Equal(t, expectHeaders, s.Headers()) + + expectValues := []string{ + "id", + humanize.Bytes(uint64(b.BytesUploaded)), + strconv.Itoa(b.ItemsWritten), + strconv.Itoa(b.TotalSkippedItems), + strconv.Itoa(b.ErrorCount), + } + + assert.Equal(t, expectValues, s.Values()) +} diff --git a/src/pkg/backup/details/details.go b/src/pkg/backup/details/details.go index 32074c9c6..14a92eb99 100644 --- a/src/pkg/backup/details/details.go +++ b/src/pkg/backup/details/details.go @@ -14,7 +14,7 @@ import ( "golang.org/x/exp/maps" "github.com/alcionai/corso/src/cli/print" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/internal/connector/onedrive/metadata" "github.com/alcionai/corso/src/internal/version" "github.com/alcionai/corso/src/pkg/path" @@ -133,31 +133,42 @@ func NewSharePointLocationIDer( // DetailsModel describes what was stored in a Backup type DetailsModel struct { - Entries []DetailsEntry `json:"entries"` + Entries []Entry `json:"entries"` } // Print writes the DetailModel Entries to StdOut, in the format // requested by the caller. func (dm DetailsModel) PrintEntries(ctx context.Context) { - if print.JSONFormat() { - printJSON(ctx, dm) + printEntries(ctx, dm.Entries) +} + +type infoer interface { + Entry | *Entry + // Need this here so we can access the infoType function without a type + // assertion. See https://stackoverflow.com/a/71378366 for more details. + infoType() ItemType +} + +func printEntries[T infoer](ctx context.Context, entries []T) { + if print.DisplayJSONFormat() { + printJSON(ctx, entries) } else { - printTable(ctx, dm) + printTable(ctx, entries) } } -func printTable(ctx context.Context, dm DetailsModel) { +func printTable[T infoer](ctx context.Context, entries []T) { perType := map[ItemType][]print.Printable{} - for _, de := range dm.Entries { - it := de.infoType() + for _, ent := range entries { + it := ent.infoType() ps, ok := perType[it] if !ok { ps = []print.Printable{} } - perType[it] = append(ps, print.Printable(de)) + perType[it] = append(ps, print.Printable(ent)) } for _, ps := range perType { @@ -165,10 +176,10 @@ func printTable(ctx context.Context, dm DetailsModel) { } } -func printJSON(ctx context.Context, dm DetailsModel) { +func printJSON[T infoer](ctx context.Context, entries []T) { ents := []print.Printable{} - for _, ent := range dm.Entries { + for _, ent := range entries { ents = append(ents, print.Printable(ent)) } @@ -194,8 +205,8 @@ func (dm DetailsModel) Paths() []string { // Items returns a slice of *ItemInfo that does not contain any FolderInfo // entries. Required because not all folders in the details are valid resource // paths, and we want to slice out metadata. -func (dm DetailsModel) Items() []*DetailsEntry { - res := make([]*DetailsEntry, 0, len(dm.Entries)) +func (dm DetailsModel) Items() entrySet { + res := make([]*Entry, 0, len(dm.Entries)) for i := 0; i < len(dm.Entries); i++ { ent := dm.Entries[i] @@ -213,7 +224,7 @@ func (dm DetailsModel) Items() []*DetailsEntry { // .meta files removed from the entries. func (dm DetailsModel) FilterMetaFiles() DetailsModel { d2 := DetailsModel{ - Entries: []DetailsEntry{}, + Entries: []Entry{}, } for _, ent := range dm.Entries { @@ -226,11 +237,11 @@ func (dm DetailsModel) FilterMetaFiles() DetailsModel { } // Check if a file is a metadata file. These are used to store -// additional data like permissions in case of OneDrive and are not to -// be treated as regular files. -func (de DetailsEntry) isMetaFile() bool { - // TODO: Add meta file filtering to SharePoint as well once we add - // meta files for SharePoint. +// additional data like permissions (in case of Drive items) and are +// not to be treated as regular files. +func (de Entry) isMetaFile() bool { + // sharepoint types not needed, since sharepoint permissions were + // added after IsMeta was deprecated. return de.ItemInfo.OneDrive != nil && de.ItemInfo.OneDrive.IsMeta } @@ -241,8 +252,8 @@ func (de DetailsEntry) isMetaFile() bool { // Builder should be used to create a details model. type Builder struct { d Details - mu sync.Mutex `json:"-"` - knownFolders map[string]DetailsEntry `json:"-"` + mu sync.Mutex `json:"-"` + knownFolders map[string]Entry `json:"-"` } func (b *Builder) Add( @@ -276,7 +287,7 @@ func (b *Builder) Add( func (b *Builder) addFolderEntries( repoRef, locationRef *path.Builder, - entry DetailsEntry, + entry Entry, ) error { if len(repoRef.Elements()) < len(locationRef.Elements()) { return clues.New("RepoRef shorter than LocationRef"). @@ -284,7 +295,7 @@ func (b *Builder) addFolderEntries( } if b.knownFolders == nil { - b.knownFolders = map[string]DetailsEntry{} + b.knownFolders = map[string]Entry{} } // Need a unique location because we want to have separate folders for @@ -317,7 +328,7 @@ func (b *Builder) addFolderEntries( if !ok { loc := uniqueLoc.InDetails().String() - folder = DetailsEntry{ + folder = Entry{ RepoRef: rr, ShortRef: shortRef, ParentRef: parentRef, @@ -380,12 +391,12 @@ func (d *Details) add( locationRef *path.Builder, updated bool, info ItemInfo, -) (DetailsEntry, error) { +) (Entry, error) { if locationRef == nil { - return DetailsEntry{}, clues.New("nil LocationRef").With("repo_ref", repoRef) + return Entry{}, clues.New("nil LocationRef").With("repo_ref", repoRef) } - entry := DetailsEntry{ + entry := Entry{ RepoRef: repoRef.String(), ShortRef: repoRef.ShortRef(), ParentRef: repoRef.ToBuilder().Dir().ShortRef(), @@ -457,8 +468,15 @@ func withoutMetadataSuffix(id string) string { // Entry // -------------------------------------------------------------------------------- -// DetailsEntry describes a single item stored in a Backup -type DetailsEntry struct { +// Add a new type so we can transparently use PrintAll in different situations. +type entrySet []*Entry + +func (ents entrySet) PrintEntries(ctx context.Context) { + printEntries(ctx, ents) +} + +// Entry describes a single item stored in a Backup +type Entry struct { // RepoRef is the full storage path of the item in Kopia RepoRef string `json:"repoRef"` ShortRef string `json:"shortRef"` @@ -490,7 +508,7 @@ type DetailsEntry struct { // ToLocationIDer takes a backup version and produces the unique location for // this entry if possible. Reasons it may not be possible to produce the unique // location include an unsupported backup version or missing information. -func (de DetailsEntry) ToLocationIDer(backupVersion int) (LocationIDer, error) { +func (de Entry) ToLocationIDer(backupVersion int) (LocationIDer, error) { if len(de.LocationRef) > 0 { baseLoc, err := path.Builder{}.SplitUnescapeAppend(de.LocationRef) if err != nil { @@ -522,9 +540,9 @@ func (de DetailsEntry) ToLocationIDer(backupVersion int) (LocationIDer, error) { return nil, clues.Wrap(err, "getting item RepoRef") } - p, err := path.ToOneDrivePath(rr) + p, err := path.ToDrivePath(rr) if err != nil { - return nil, clues.New("converting RepoRef to OneDrive path") + return nil, clues.New("converting RepoRef to drive path") } baseLoc := path.Builder{}.Append(p.Root).Append(p.Folders...) @@ -538,17 +556,17 @@ func (de DetailsEntry) ToLocationIDer(backupVersion int) (LocationIDer, error) { // -------------------------------------------------------------------------------- // interface compliance checks -var _ print.Printable = &DetailsEntry{} +var _ print.Printable = &Entry{} // MinimumPrintable DetailsEntries is a passthrough func, because no // reduction is needed for the json output. -func (de DetailsEntry) MinimumPrintable() any { +func (de Entry) MinimumPrintable() any { return de } // Headers returns the human-readable names of properties in a DetailsEntry // for printing out to a terminal in a columnar display. -func (de DetailsEntry) Headers() []string { +func (de Entry) Headers() []string { hs := []string{"ID"} if de.ItemInfo.Folder != nil { @@ -571,7 +589,7 @@ func (de DetailsEntry) Headers() []string { } // Values returns the values matching the Headers list. -func (de DetailsEntry) Values() []string { +func (de Entry) Values() []string { vs := []string{de.ShortRef} if de.ItemInfo.Folder != nil { @@ -626,20 +644,21 @@ const ( func UpdateItem(item *ItemInfo, newLocPath *path.Builder) { // Only OneDrive and SharePoint have information about parent folders // contained in them. - var updatePath func(newLocPath *path.Builder) + // Can't switch based on infoType because that's been unstable. + if item.Exchange != nil { + item.Exchange.UpdateParentPath(newLocPath) + } else if item.SharePoint != nil { + // SharePoint used to store library items with the OneDriveItem ItemType. + // Start switching them over as we see them since there's no point in + // keeping the old format. + if item.SharePoint.ItemType == OneDriveItem { + item.SharePoint.ItemType = SharePointLibrary + } - switch item.infoType() { - case ExchangeContact, ExchangeEvent, ExchangeMail: - updatePath = item.Exchange.UpdateParentPath - case SharePointLibrary: - updatePath = item.SharePoint.UpdateParentPath - case OneDriveItem: - updatePath = item.OneDrive.UpdateParentPath - default: - return + item.SharePoint.UpdateParentPath(newLocPath) + } else if item.OneDrive != nil { + item.OneDrive.UpdateParentPath(newLocPath) } - - updatePath(newLocPath) } // ItemInfo is a oneOf that contains service specific @@ -804,8 +823,8 @@ func (i ExchangeInfo) Values() []string { return []string{ i.Organizer, i.Subject, - common.FormatTabularDisplayTime(i.EventStart), - common.FormatTabularDisplayTime(i.EventEnd), + dttm.FormatToTabularDisplay(i.EventStart), + dttm.FormatToTabularDisplay(i.EventEnd), strconv.FormatBool(i.EventRecurs), } @@ -815,7 +834,7 @@ func (i ExchangeInfo) Values() []string { case ExchangeMail: return []string{ i.Sender, i.ParentPath, i.Subject, - common.FormatTabularDisplayTime(i.Received), + dttm.FormatToTabularDisplay(i.Received), } } @@ -865,7 +884,7 @@ type SharePointInfo struct { DriveID string `json:"driveID,omitempty"` ItemName string `json:"itemName,omitempty"` ItemType ItemType `json:"itemType,omitempty"` - Modified time.Time `josn:"modified,omitempty"` + Modified time.Time `json:"modified,omitempty"` Owner string `json:"owner,omitempty"` ParentPath string `json:"parentPath,omitempty"` Size int64 `json:"size,omitempty"` @@ -887,8 +906,8 @@ func (i SharePointInfo) Values() []string { i.ParentPath, humanize.Bytes(uint64(i.Size)), i.Owner, - common.FormatTabularDisplayTime(i.Created), - common.FormatTabularDisplayTime(i.Modified), + dttm.FormatToTabularDisplay(i.Created), + dttm.FormatToTabularDisplay(i.Modified), } } @@ -944,8 +963,8 @@ func (i OneDriveInfo) Values() []string { i.ParentPath, humanize.Bytes(uint64(i.Size)), i.Owner, - common.FormatTabularDisplayTime(i.Created), - common.FormatTabularDisplayTime(i.Modified), + dttm.FormatToTabularDisplay(i.Created), + dttm.FormatToTabularDisplay(i.Modified), } } diff --git a/src/pkg/backup/details/details_test.go b/src/pkg/backup/details/details_test.go index eba378f1b..d6aae6bbc 100644 --- a/src/pkg/backup/details/details_test.go +++ b/src/pkg/backup/details/details_test.go @@ -13,7 +13,8 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" + odConsts "github.com/alcionai/corso/src/internal/connector/onedrive/consts" "github.com/alcionai/corso/src/internal/connector/onedrive/metadata" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/version" @@ -34,19 +35,19 @@ func TestDetailsUnitSuite(t *testing.T) { func (suite *DetailsUnitSuite) TestDetailsEntry_HeadersValues() { initial := time.Now() - nowStr := common.FormatTimeWith(initial, common.TabularOutput) - now, err := common.ParseTime(nowStr) + nowStr := dttm.FormatTo(initial, dttm.TabularOutput) + now, err := dttm.ParseTime(nowStr) require.NoError(suite.T(), err, clues.ToCore(err)) table := []struct { name string - entry DetailsEntry + entry Entry expectHs []string expectVs []string }{ { name: "no info", - entry: DetailsEntry{ + entry: Entry{ RepoRef: "reporef", ShortRef: "deadbeef", LocationRef: "locationref", @@ -57,7 +58,7 @@ func (suite *DetailsUnitSuite) TestDetailsEntry_HeadersValues() { }, { name: "exchange event info", - entry: DetailsEntry{ + entry: Entry{ RepoRef: "reporef", ShortRef: "deadbeef", LocationRef: "locationref", @@ -78,7 +79,7 @@ func (suite *DetailsUnitSuite) TestDetailsEntry_HeadersValues() { }, { name: "exchange contact info", - entry: DetailsEntry{ + entry: Entry{ RepoRef: "reporef", ShortRef: "deadbeef", LocationRef: "locationref", @@ -95,7 +96,7 @@ func (suite *DetailsUnitSuite) TestDetailsEntry_HeadersValues() { }, { name: "exchange mail info", - entry: DetailsEntry{ + entry: Entry{ RepoRef: "reporef", ShortRef: "deadbeef", LocationRef: "locationref", @@ -116,7 +117,7 @@ func (suite *DetailsUnitSuite) TestDetailsEntry_HeadersValues() { }, { name: "sharepoint info", - entry: DetailsEntry{ + entry: Entry{ RepoRef: "reporef", ShortRef: "deadbeef", LocationRef: "locationref", @@ -148,7 +149,7 @@ func (suite *DetailsUnitSuite) TestDetailsEntry_HeadersValues() { }, { name: "oneDrive info", - entry: DetailsEntry{ + entry: Entry{ RepoRef: "reporef", ShortRef: "deadbeef", LocationRef: "locationref", @@ -181,7 +182,7 @@ func (suite *DetailsUnitSuite) TestDetailsEntry_HeadersValues() { } } -func exchangeEntry(t *testing.T, id string, size int, it ItemType) DetailsEntry { +func exchangeEntry(t *testing.T, id string, size int, it ItemType) Entry { rr := makeItemPath( t, path.ExchangeService, @@ -190,7 +191,7 @@ func exchangeEntry(t *testing.T, id string, size int, it ItemType) DetailsEntry "user-id", []string{"Inbox", "folder1", id}) - return DetailsEntry{ + return Entry{ RepoRef: rr.String(), ShortRef: rr.ShortRef(), ParentRef: rr.ToBuilder().Dir().ShortRef(), @@ -206,7 +207,7 @@ func exchangeEntry(t *testing.T, id string, size int, it ItemType) DetailsEntry } } -func oneDriveishEntry(t *testing.T, id string, size int, it ItemType) DetailsEntry { +func oneDriveishEntry(t *testing.T, id string, size int, it ItemType) Entry { service := path.OneDriveService category := path.FilesCategory info := ItemInfo{ @@ -242,9 +243,9 @@ func oneDriveishEntry(t *testing.T, id string, size int, it ItemType) DetailsEnt "tenant-id", "user-id", []string{ - "drives", + odConsts.DrivesPathDir, "drive-id", - "root:", + odConsts.RootPathDir, "Inbox", "folder1", id, @@ -252,7 +253,7 @@ func oneDriveishEntry(t *testing.T, id string, size int, it ItemType) DetailsEnt loc := path.Builder{}.Append(rr.Folders()...).PopFront().PopFront() - return DetailsEntry{ + return Entry{ RepoRef: rr.String(), ShortRef: rr.ShortRef(), ParentRef: rr.ToBuilder().Dir().ShortRef(), @@ -268,7 +269,7 @@ func (suite *DetailsUnitSuite) TestDetailsAdd_NoLocationFolders() { t := suite.T() table := []struct { name string - entry DetailsEntry + entry Entry // shortRefEqual allows checking that OneDrive and SharePoint have their // ShortRef updated in the returned entry. // @@ -293,7 +294,7 @@ func (suite *DetailsUnitSuite) TestDetailsAdd_NoLocationFolders() { }, { name: "Legacy SharePoint File", - entry: func() DetailsEntry { + entry: func() Entry { res := oneDriveishEntry(t, itemID, 42, SharePointLibrary) res.SharePoint.ItemType = OneDriveItem @@ -360,7 +361,7 @@ func (suite *DetailsUnitSuite) TestDetailsAdd_LocationFolders() { exchangeMail2 := exchangeEntry(t, "foo2", 43, ExchangeMail) exchangeContact1 := exchangeEntry(t, "foo3", 44, ExchangeContact) - exchangeFolders := []DetailsEntry{ + exchangeFolders := []Entry{ { ItemInfo: ItemInfo{ Folder: &FolderInfo{ @@ -382,7 +383,7 @@ func (suite *DetailsUnitSuite) TestDetailsAdd_LocationFolders() { }, } - exchangeContactFolders := []DetailsEntry{ + exchangeContactFolders := []Entry{ { ItemInfo: ItemInfo{ Folder: &FolderInfo{ @@ -404,11 +405,11 @@ func (suite *DetailsUnitSuite) TestDetailsAdd_LocationFolders() { }, } - oneDriveishFolders := []DetailsEntry{ + oneDriveishFolders := []Entry{ { ItemInfo: ItemInfo{ Folder: &FolderInfo{ - DisplayName: "root:", + DisplayName: odConsts.RootPathDir, ItemType: FolderItem, DriveName: "drive-name", DriveID: "drive-id", @@ -416,7 +417,7 @@ func (suite *DetailsUnitSuite) TestDetailsAdd_LocationFolders() { }, }, { - LocationRef: "root:", + LocationRef: odConsts.RootPathDir, ItemInfo: ItemInfo{ Folder: &FolderInfo{ DisplayName: "Inbox", @@ -441,20 +442,20 @@ func (suite *DetailsUnitSuite) TestDetailsAdd_LocationFolders() { table := []struct { name string - entries func() []DetailsEntry - expectedDirs func() []DetailsEntry + entries func() []Entry + expectedDirs func() []Entry }{ { name: "One Exchange Email None Updated", - entries: func() []DetailsEntry { + entries: func() []Entry { e := exchangeMail1 ei := *exchangeMail1.Exchange e.Exchange = &ei - return []DetailsEntry{e} + return []Entry{e} }, - expectedDirs: func() []DetailsEntry { - res := []DetailsEntry{} + expectedDirs: func() []Entry { + res := []Entry{} for _, entry := range exchangeFolders { e := entry @@ -472,16 +473,16 @@ func (suite *DetailsUnitSuite) TestDetailsAdd_LocationFolders() { }, { name: "One Exchange Email Updated", - entries: func() []DetailsEntry { + entries: func() []Entry { e := exchangeMail1 ei := *exchangeMail1.Exchange e.Exchange = &ei e.Updated = true - return []DetailsEntry{e} + return []Entry{e} }, - expectedDirs: func() []DetailsEntry { - res := []DetailsEntry{} + expectedDirs: func() []Entry { + res := []Entry{} for _, entry := range exchangeFolders { e := entry @@ -500,10 +501,10 @@ func (suite *DetailsUnitSuite) TestDetailsAdd_LocationFolders() { }, { name: "Two Exchange Emails None Updated", - entries: func() []DetailsEntry { - res := []DetailsEntry{} + entries: func() []Entry { + res := []Entry{} - for _, entry := range []DetailsEntry{exchangeMail1, exchangeMail2} { + for _, entry := range []Entry{exchangeMail1, exchangeMail2} { e := entry ei := *entry.Exchange e.Exchange = &ei @@ -513,8 +514,8 @@ func (suite *DetailsUnitSuite) TestDetailsAdd_LocationFolders() { return res }, - expectedDirs: func() []DetailsEntry { - res := []DetailsEntry{} + expectedDirs: func() []Entry { + res := []Entry{} for _, entry := range exchangeFolders { e := entry @@ -532,10 +533,10 @@ func (suite *DetailsUnitSuite) TestDetailsAdd_LocationFolders() { }, { name: "Two Exchange Emails One Updated", - entries: func() []DetailsEntry { - res := []DetailsEntry{} + entries: func() []Entry { + res := []Entry{} - for i, entry := range []DetailsEntry{exchangeMail1, exchangeMail2} { + for i, entry := range []Entry{exchangeMail1, exchangeMail2} { e := entry ei := *entry.Exchange e.Exchange = &ei @@ -546,8 +547,8 @@ func (suite *DetailsUnitSuite) TestDetailsAdd_LocationFolders() { return res }, - expectedDirs: func() []DetailsEntry { - res := []DetailsEntry{} + expectedDirs: func() []Entry { + res := []Entry{} for _, entry := range exchangeFolders { e := entry @@ -566,10 +567,10 @@ func (suite *DetailsUnitSuite) TestDetailsAdd_LocationFolders() { }, { name: "One Email And One Contact None Updated", - entries: func() []DetailsEntry { - res := []DetailsEntry{} + entries: func() []Entry { + res := []Entry{} - for _, entry := range []DetailsEntry{exchangeMail1, exchangeContact1} { + for _, entry := range []Entry{exchangeMail1, exchangeContact1} { e := entry ei := *entry.Exchange e.Exchange = &ei @@ -579,8 +580,8 @@ func (suite *DetailsUnitSuite) TestDetailsAdd_LocationFolders() { return res }, - expectedDirs: func() []DetailsEntry { - res := []DetailsEntry{} + expectedDirs: func() []Entry { + res := []Entry{} for _, entry := range exchangeFolders { e := entry @@ -609,15 +610,15 @@ func (suite *DetailsUnitSuite) TestDetailsAdd_LocationFolders() { }, { name: "One OneDrive Item None Updated", - entries: func() []DetailsEntry { + entries: func() []Entry { e := oneDrive1 ei := *oneDrive1.OneDrive e.OneDrive = &ei - return []DetailsEntry{e} + return []Entry{e} }, - expectedDirs: func() []DetailsEntry { - res := []DetailsEntry{} + expectedDirs: func() []Entry { + res := []Entry{} for _, entry := range oneDriveishFolders { e := entry @@ -636,15 +637,15 @@ func (suite *DetailsUnitSuite) TestDetailsAdd_LocationFolders() { }, { name: "One SharePoint Item None Updated", - entries: func() []DetailsEntry { + entries: func() []Entry { e := sharePoint1 ei := *sharePoint1.SharePoint e.SharePoint = &ei - return []DetailsEntry{e} + return []Entry{e} }, - expectedDirs: func() []DetailsEntry { - res := []DetailsEntry{} + expectedDirs: func() []Entry { + res := []Entry{} for _, entry := range oneDriveishFolders { e := entry @@ -663,15 +664,15 @@ func (suite *DetailsUnitSuite) TestDetailsAdd_LocationFolders() { }, { name: "One SharePoint Legacy Item None Updated", - entries: func() []DetailsEntry { + entries: func() []Entry { e := sharePoint1 ei := *sharePoint1.SharePoint e.SharePoint = &ei - return []DetailsEntry{e} + return []Entry{e} }, - expectedDirs: func() []DetailsEntry { - res := []DetailsEntry{} + expectedDirs: func() []Entry { + res := []Entry{} for _, entry := range oneDriveishFolders { e := entry @@ -707,7 +708,7 @@ func (suite *DetailsUnitSuite) TestDetailsAdd_LocationFolders() { } deets := db.Details() - gotDirs := []DetailsEntry{} + gotDirs := []Entry{} for _, entry := range deets.Entries { // Other test checks items are populated properly. @@ -730,7 +731,7 @@ func (suite *DetailsUnitSuite) TestDetailsAdd_LocationFolders() { var pathItemsTable = []struct { name string - ents []DetailsEntry + ents []Entry expectRepoRefs []string expectLocationRefs []string }{ @@ -742,7 +743,7 @@ var pathItemsTable = []struct { }, { name: "single entry", - ents: []DetailsEntry{ + ents: []Entry{ { RepoRef: "abcde", LocationRef: "locationref", @@ -754,7 +755,7 @@ var pathItemsTable = []struct { }, { name: "multiple entries", - ents: []DetailsEntry{ + ents: []Entry{ { RepoRef: "abcde", LocationRef: "locationref", @@ -771,7 +772,7 @@ var pathItemsTable = []struct { }, { name: "multiple entries with folder", - ents: []DetailsEntry{ + ents: []Entry{ { RepoRef: "abcde", LocationRef: "locationref", @@ -797,7 +798,7 @@ var pathItemsTable = []struct { }, { name: "multiple entries with meta file", - ents: []DetailsEntry{ + ents: []Entry{ { RepoRef: "abcde", LocationRef: "locationref", @@ -824,7 +825,7 @@ var pathItemsTable = []struct { }, { name: "multiple entries with folder and meta file", - ents: []DetailsEntry{ + ents: []Entry{ { RepoRef: "abcde", LocationRef: "locationref", @@ -908,7 +909,7 @@ func (suite *DetailsUnitSuite) TestDetailsModel_FilterMetaFiles() { t := suite.T() d := &DetailsModel{ - Entries: []DetailsEntry{ + Entries: []Entry{ { RepoRef: "a.data", ItemInfo: ItemInfo{ @@ -958,7 +959,7 @@ func (suite *DetailsUnitSuite) TestBuilder_Add_shortRefsUniqueFromFolder() { "a-user", []string{ "drive-id", - "root:", + odConsts.RootPathDir, "folder", name + "-id", }) @@ -971,7 +972,7 @@ func (suite *DetailsUnitSuite) TestBuilder_Add_shortRefsUniqueFromFolder() { "a-user", []string{ "drive-id", - "root:", + odConsts.RootPathDir, "folder", name + "-id", name, @@ -1060,7 +1061,7 @@ func (suite *DetailsUnitSuite) TestUpdateItem() { ) newExchangePB := path.Builder{}.Append(folder2) - newOneDrivePB := path.Builder{}.Append("root:", folder2) + newOneDrivePB := path.Builder{}.Append(odConsts.RootPathDir, folder2) table := []struct { name string @@ -1148,6 +1149,28 @@ func (suite *DetailsUnitSuite) TestUpdateItem() { }, }, }, + { + name: "SharePoint Old Format", + input: ItemInfo{ + SharePoint: &SharePointInfo{ + ItemType: OneDriveItem, + ParentPath: folder1, + }, + }, + locPath: newOneDrivePB, + expectedItem: ItemInfo{ + SharePoint: &SharePointInfo{ + ItemType: SharePointLibrary, + ParentPath: folder2, + }, + }, + }, + { + name: "Empty Item Doesn't Fail", + input: ItemInfo{}, + locPath: newOneDrivePB, + expectedItem: ItemInfo{}, + }, } for _, test := range table { @@ -1360,7 +1383,7 @@ func (suite *DetailsUnitSuite) TestLocationIDer_FromEntry() { suite.Run(test.name, func() { t := suite.T() - entry := DetailsEntry{ + entry := Entry{ RepoRef: fmt.Sprintf(rrString, test.service, test.category), ItemInfo: test.itemInfo, } diff --git a/src/pkg/backup/details/mock/location_ider.go b/src/pkg/backup/details/mock/location_ider.go new file mode 100644 index 000000000..046c9e146 --- /dev/null +++ b/src/pkg/backup/details/mock/location_ider.go @@ -0,0 +1,16 @@ +package mock + +import "github.com/alcionai/corso/src/pkg/path" + +type LocationIDer struct { + Unique *path.Builder + Details *path.Builder +} + +func (li LocationIDer) ID() *path.Builder { + return li.Unique +} + +func (li LocationIDer) InDetails() *path.Builder { + return li.Details +} diff --git a/src/pkg/backup/details/testdata/in_deets.go b/src/pkg/backup/details/testdata/in_deets.go new file mode 100644 index 000000000..b15c50f17 --- /dev/null +++ b/src/pkg/backup/details/testdata/in_deets.go @@ -0,0 +1,368 @@ +package testdata + +import ( + "context" + "strings" + "testing" + + "github.com/alcionai/clues" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/exp/maps" + + "github.com/alcionai/corso/src/internal/kopia" + "github.com/alcionai/corso/src/internal/model" + "github.com/alcionai/corso/src/internal/streamstore" + "github.com/alcionai/corso/src/pkg/backup" + "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/path" +) + +// --------------------------------------------------------------------------- +// location set handling +// --------------------------------------------------------------------------- + +var exists = struct{}{} + +type locSet struct { + // map [locationRef] map [itemRef] {} + // refs may be either the canonical ent refs, or something else, + // so long as they are consistent for the test in question + Locations map[string]map[string]struct{} + Deleted map[string]map[string]struct{} +} + +func newLocSet() *locSet { + return &locSet{ + Locations: map[string]map[string]struct{}{}, + Deleted: map[string]map[string]struct{}{}, + } +} + +func (ls *locSet) AddItem(locationRef, itemRef string) { + ls.AddLocation(locationRef) + + ls.Locations[locationRef][itemRef] = exists + delete(ls.Deleted[locationRef], itemRef) +} + +func (ls *locSet) RemoveItem(locationRef, itemRef string) { + delete(ls.Locations[locationRef], itemRef) + + if _, ok := ls.Deleted[locationRef]; !ok { + ls.Deleted[locationRef] = map[string]struct{}{} + } + + ls.Deleted[locationRef][itemRef] = exists +} + +func (ls *locSet) MoveItem(fromLocation, toLocation, ir string) { + ls.RemoveItem(fromLocation, ir) + ls.AddItem(toLocation, ir) +} + +func (ls *locSet) AddLocation(locationRef string) { + if _, ok := ls.Locations[locationRef]; !ok { + ls.Locations[locationRef] = map[string]struct{}{} + } + // don't purge previously deleted items, or child locations. + // Assumption is that their itemRef is unique, and still deleted. + delete(ls.Deleted, locationRef) +} + +func (ls *locSet) RemoveLocation(locationRef string) { + ss := ls.Subset(locationRef) + + for lr := range ss.Locations { + items := ls.Locations[lr] + + delete(ls.Locations, lr) + + if _, ok := ls.Deleted[lr]; !ok { + ls.Deleted[lr] = map[string]struct{}{} + } + + for ir := range items { + ls.Deleted[lr][ir] = exists + } + } +} + +// MoveLocation takes the LAST elemet in the fromLocation (and all) +// children matching the prefix, and relocates it as a child of toLocation. +// ex: MoveLocation("/a/b/c", "/d") will move all entries with the prefix +// "/a/b/c" into "/d/c". This also deletes all "/a/b/c" entries and children. +// assumes item IDs don't change across the migration. If item IDs do change, +// that difference will need to be handled manually by the caller. +// returns the base folder's new location (ex: /d/c) +func (ls *locSet) MoveLocation(fromLocation, toLocation string) string { + fromBuilder := path.Builder{}.Append(path.Split(fromLocation)...) + toBuilder := path.Builder{}.Append(path.Split(toLocation)...).Append(fromBuilder.LastElem()) + + ls.RenameLocation(fromBuilder.String(), toBuilder.String()) + + return toBuilder.String() +} + +func (ls *locSet) RenameLocation(fromLocation, toLocation string) { + ss := ls.Subset(fromLocation) + fromBuilder := path.Builder{}.Append(path.Split(fromLocation)...) + toBuilder := path.Builder{}.Append(path.Split(toLocation)...) + + for lr, items := range ss.Locations { + lrBuilder := path.Builder{}.Append(path.Split(lr)...) + lrBuilder.UpdateParent(fromBuilder, toBuilder) + + newLoc := lrBuilder.String() + + for ir := range items { + ls.RemoveItem(lr, ir) + ls.AddItem(newLoc, ir) + } + + ls.RemoveLocation(lr) + ls.AddLocation(newLoc) + } +} + +// Subset produces a new locSet containing only Items and Locations +// whose location matches the locationPfx +func (ls *locSet) Subset(locationPfx string) *locSet { + ss := newLocSet() + + for lr, items := range ls.Locations { + if strings.HasPrefix(lr, locationPfx) { + ss.AddLocation(lr) + + for ir := range items { + ss.AddItem(lr, ir) + } + } + } + + return ss +} + +// --------------------------------------------------------------------------- +// The goal of InDeets is to provide a struct and interface which allows +// tests to predict not just the elements within a set of details entries, +// but also their changes (relocation, renaming, etc) in a way that consolidates +// building an "expected set" of details entries that can be compared against +// the details results after a backup. +// --------------------------------------------------------------------------- + +// InDeets is a helper for comparing details state in tests +// across backup instances. +type InDeets struct { + // only: tenantID/service/resourceOwnerID + RRPrefix string + // map of container setting the uniqueness boundary for location + // ref entries (eg, data type like email, contacts, etc, or + // drive id) to the unique entries in that set. + Sets map[string]*locSet +} + +func NewInDeets(repoRefPrefix string) *InDeets { + return &InDeets{ + RRPrefix: repoRefPrefix, + Sets: map[string]*locSet{}, + } +} + +func (id *InDeets) getSet(set string) *locSet { + s, ok := id.Sets[set] + if ok { + return s + } + + return newLocSet() +} + +func (id *InDeets) AddAll(deets details.Details, ws whatSet) { + if id.Sets == nil { + id.Sets = map[string]*locSet{} + } + + for _, ent := range deets.Entries { + set, err := ws(ent) + if err != nil { + set = err.Error() + } + + dir := ent.LocationRef + + if ent.Folder != nil { + dir = dir + ent.Folder.DisplayName + id.AddLocation(set, dir) + } else { + id.AddItem(set, ent.LocationRef, ent.ItemRef) + } + } +} + +func (id *InDeets) AddItem(set, locationRef, itemRef string) { + id.getSet(set).AddItem(locationRef, itemRef) +} + +func (id *InDeets) RemoveItem(set, locationRef, itemRef string) { + id.getSet(set).RemoveItem(locationRef, itemRef) +} + +func (id *InDeets) MoveItem(set, fromLocation, toLocation, ir string) { + id.getSet(set).MoveItem(fromLocation, toLocation, ir) +} + +func (id *InDeets) AddLocation(set, locationRef string) { + id.getSet(set).AddLocation(locationRef) +} + +// RemoveLocation removes the provided location, and all children +// of that location. +func (id *InDeets) RemoveLocation(set, locationRef string) { + id.getSet(set).RemoveLocation(locationRef) +} + +// MoveLocation takes the LAST elemet in the fromLocation (and all) +// children matching the prefix, and relocates it as a child of toLocation. +// ex: MoveLocation("/a/b/c", "/d") will move all entries with the prefix +// "/a/b/c" into "/d/c". This also deletes all "/a/b/c" entries and children. +// assumes item IDs don't change across the migration. If item IDs do change, +// that difference will need to be handled manually by the caller. +// returns the base folder's new location (ex: /d/c) +func (id *InDeets) MoveLocation(set, fromLocation, toLocation string) string { + return id.getSet(set).MoveLocation(fromLocation, toLocation) +} + +func (id *InDeets) RenameLocation(set, fromLocation, toLocation string) { + id.getSet(set).RenameLocation(fromLocation, toLocation) +} + +// Subset produces a new locSet containing only Items and Locations +// whose location matches the locationPfx +func (id *InDeets) Subset(set, locationPfx string) *locSet { + return id.getSet(set).Subset(locationPfx) +} + +// --------------------------------------------------------------------------- +// whatSet helpers for extracting a set identifier from an arbitrary repoRef +// --------------------------------------------------------------------------- + +type whatSet func(details.Entry) (string, error) + +// common whatSet parser that extracts the service category from +// a repoRef. +func CategoryFromRepoRef(ent details.Entry) (string, error) { + p, err := path.FromDataLayerPath(ent.RepoRef, false) + if err != nil { + return "", err + } + + return p.Category().String(), nil +} + +// common whatSet parser that extracts the driveID from a repoRef. +func DriveIDFromRepoRef(ent details.Entry) (string, error) { + p, err := path.FromDataLayerPath(ent.RepoRef, false) + if err != nil { + return "", err + } + + odp, err := path.ToDrivePath(p) + if err != nil { + return "", err + } + + return odp.DriveID, nil +} + +// --------------------------------------------------------------------------- +// helpers and comparators +// --------------------------------------------------------------------------- + +func CheckBackupDetails( + t *testing.T, + ctx context.Context, //revive:disable-line:context-as-argument + backupID model.StableID, + ws whatSet, + ms *kopia.ModelStore, + ssr streamstore.Reader, + expect *InDeets, + // standard check is assert.Subset due to issues of external data cross- + // pollination. This should be true if the backup contains a unique directory + // of data. + mustEqualFolders bool, +) { + deets, result := GetDeetsInBackup(t, ctx, backupID, "", "", path.UnknownService, ws, ms, ssr) + + t.Log("details entries in result") + + for _, ent := range deets.Entries { + if ent.Folder == nil { + t.Log(ent.LocationRef) + t.Log(ent.ItemRef) + } + + assert.Truef( + t, + strings.HasPrefix(ent.RepoRef, expect.RRPrefix), + "all details should begin with the expected prefix\nwant: %s\ngot: %s", + expect.RRPrefix, ent.RepoRef) + } + + for set := range expect.Sets { + check := assert.Subsetf + + if mustEqualFolders { + check = assert.ElementsMatchf + } + + check( + t, + maps.Keys(result.Sets[set].Locations), + maps.Keys(expect.Sets[set].Locations), + "results in %s missing expected location", set) + + for lr, items := range expect.Sets[set].Deleted { + _, ok := result.Sets[set].Locations[lr] + assert.Falsef(t, ok, "deleted location in %s found in result: %s", set, lr) + + for ir := range items { + _, ok := result.Sets[set].Locations[lr][ir] + assert.Falsef(t, ok, "deleted item in %s found in result: %s", set, lr) + } + } + } +} + +func GetDeetsInBackup( + t *testing.T, + ctx context.Context, //revive:disable-line:context-as-argument + backupID model.StableID, + tid, resourceOwner string, + service path.ServiceType, + ws whatSet, + ms *kopia.ModelStore, + ssr streamstore.Reader, +) (details.Details, *InDeets) { + bup := backup.Backup{} + + err := ms.Get(ctx, model.BackupSchema, backupID, &bup) + require.NoError(t, err, clues.ToCore(err)) + + ssid := bup.StreamStoreID + require.NotEmpty(t, ssid, "stream store ID") + + var deets details.Details + err = ssr.Read( + ctx, + ssid, + streamstore.DetailsReader(details.UnmarshalTo(&deets)), + fault.New(true)) + require.NoError(t, err, clues.ToCore(err)) + + id := NewInDeets(path.Builder{}.Append(tid, service.String(), resourceOwner).String()) + id.AddAll(deets, ws) + + return deets, id +} diff --git a/src/pkg/backup/details/testdata/in_deets_test.go b/src/pkg/backup/details/testdata/in_deets_test.go new file mode 100644 index 000000000..81beb0b0f --- /dev/null +++ b/src/pkg/backup/details/testdata/in_deets_test.go @@ -0,0 +1,445 @@ +package testdata + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "golang.org/x/exp/maps" + + "github.com/alcionai/corso/src/internal/tester" +) + +type LocSetUnitSuite struct { + tester.Suite +} + +func TestLocSetUnitSuite(t *testing.T) { + suite.Run(t, &LocSetUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +const ( + l1 = "lr_1" + l2 = "lr_2" + l13 = "lr_1/lr_3" + l14 = "lr_1/lr_4" + i1 = "ir_1" + i2 = "ir_2" + i3 = "ir_3" + i4 = "ir_4" +) + +func (suite *LocSetUnitSuite) TestAdd() { + t := suite.T() + + ls := newLocSet() + + ls.AddItem(l1, i1) + ls.AddLocation(l2) + + assert.ElementsMatch(t, []string{l1, l2}, maps.Keys(ls.Locations)) + assert.ElementsMatch(t, []string{i1}, maps.Keys(ls.Locations[l1])) + assert.Empty(t, maps.Keys(ls.Locations[l2])) + assert.Empty(t, maps.Keys(ls.Locations[l13])) +} + +func (suite *LocSetUnitSuite) TestRemove() { + t := suite.T() + + ls := newLocSet() + + ls.AddItem(l1, i1) + ls.AddItem(l1, i2) + ls.AddLocation(l13) + ls.AddItem(l14, i3) + ls.AddItem(l14, i4) + + assert.ElementsMatch(t, []string{l1, l13, l14}, maps.Keys(ls.Locations)) + assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ls.Locations[l1])) + assert.Empty(t, maps.Keys(ls.Locations[l13])) + assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ls.Locations[l14])) + + // nop removal + ls.RemoveItem(l2, i1) + assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ls.Locations[l1])) + + // item removal + ls.RemoveItem(l1, i2) + assert.ElementsMatch(t, []string{i1}, maps.Keys(ls.Locations[l1])) + + // nop location removal + ls.RemoveLocation(l2) + assert.ElementsMatch(t, []string{l1, l13, l14}, maps.Keys(ls.Locations)) + + // non-cascading location removal + ls.RemoveLocation(l13) + assert.ElementsMatch(t, []string{l1, l14}, maps.Keys(ls.Locations)) + assert.ElementsMatch(t, []string{i1}, maps.Keys(ls.Locations[l1])) + assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ls.Locations[l14])) + + // cascading location removal + ls.RemoveLocation(l1) + assert.Empty(t, maps.Keys(ls.Locations)) + assert.Empty(t, maps.Keys(ls.Locations[l1])) + assert.Empty(t, maps.Keys(ls.Locations[l13])) + assert.Empty(t, maps.Keys(ls.Locations[l14])) +} + +func (suite *LocSetUnitSuite) TestSubset() { + ls := newLocSet() + + ls.AddItem(l1, i1) + ls.AddItem(l1, i2) + ls.AddLocation(l13) + ls.AddItem(l14, i3) + ls.AddItem(l14, i4) + + table := []struct { + name string + locPfx string + expect func(*testing.T, *locSet) + }{ + { + name: "nop", + locPfx: l2, + expect: func(t *testing.T, ss *locSet) { + assert.Empty(t, maps.Keys(ss.Locations)) + }, + }, + { + name: "no items", + locPfx: l13, + expect: func(t *testing.T, ss *locSet) { + assert.ElementsMatch(t, []string{l13}, maps.Keys(ss.Locations)) + assert.Empty(t, maps.Keys(ss.Locations[l13])) + }, + }, + { + name: "non-cascading", + locPfx: l14, + expect: func(t *testing.T, ss *locSet) { + assert.ElementsMatch(t, []string{l14}, maps.Keys(ss.Locations)) + assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ss.Locations[l14])) + }, + }, + { + name: "cascading", + locPfx: l1, + expect: func(t *testing.T, ss *locSet) { + assert.ElementsMatch(t, []string{l1, l13, l14}, maps.Keys(ss.Locations)) + assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ss.Locations[l1])) + assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ss.Locations[l14])) + assert.Empty(t, maps.Keys(ss.Locations[l13])) + }, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + test.expect(t, ls.Subset(test.locPfx)) + }) + } +} + +func (suite *LocSetUnitSuite) TestRename() { + t := suite.T() + + makeSet := func() *locSet { + ls := newLocSet() + + ls.AddItem(l1, i1) + ls.AddItem(l1, i2) + ls.AddLocation(l13) + ls.AddItem(l14, i3) + ls.AddItem(l14, i4) + + return ls + } + + ts := makeSet() + assert.ElementsMatch(t, []string{l1, l13, l14}, maps.Keys(ts.Locations)) + assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ts.Locations[l1])) + assert.Empty(t, maps.Keys(ts.Locations[l13])) + assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ts.Locations[l14])) + + table := []struct { + name string + from string + to string + expect func(*testing.T, *locSet) + }{ + { + name: "nop", + from: l2, + to: "foo", + expect: func(t *testing.T, ls *locSet) { + assert.ElementsMatch(t, []string{l1, l13, l14}, maps.Keys(ls.Locations)) + assert.Empty(t, maps.Keys(ls.Locations[l2])) + assert.Empty(t, maps.Keys(ls.Locations["foo"])) + }, + }, + { + name: "no items", + from: l13, + to: "foo", + expect: func(t *testing.T, ls *locSet) { + assert.ElementsMatch(t, []string{l1, "foo", l14}, maps.Keys(ls.Locations)) + assert.Empty(t, maps.Keys(ls.Locations[l13])) + assert.Empty(t, maps.Keys(ls.Locations["foo"])) + }, + }, + { + name: "with items", + from: l14, + to: "foo", + expect: func(t *testing.T, ls *locSet) { + assert.ElementsMatch(t, []string{l1, l13, "foo"}, maps.Keys(ls.Locations)) + assert.Empty(t, maps.Keys(ls.Locations[l14])) + assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ls.Locations["foo"])) + }, + }, + { + name: "cascading locations", + from: l1, + to: "foo", + expect: func(t *testing.T, ls *locSet) { + assert.ElementsMatch(t, []string{"foo", "foo/lr_3", "foo/lr_4"}, maps.Keys(ls.Locations)) + assert.Empty(t, maps.Keys(ls.Locations[l1])) + assert.Empty(t, maps.Keys(ls.Locations[l14])) + assert.Empty(t, maps.Keys(ls.Locations[l13])) + assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ls.Locations["foo"])) + assert.Empty(t, maps.Keys(ls.Locations["foo/lr_3"])) + assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ls.Locations["foo/lr_4"])) + }, + }, + { + name: "to existing location", + from: l14, + to: l1, + expect: func(t *testing.T, ls *locSet) { + assert.ElementsMatch(t, []string{l1, l13}, maps.Keys(ls.Locations)) + assert.Empty(t, maps.Keys(ls.Locations[l14])) + assert.ElementsMatch(t, []string{i1, i2, i3, i4}, maps.Keys(ls.Locations[l1])) + }, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + ls := makeSet() + + ls.RenameLocation(test.from, test.to) + test.expect(t, ls) + }) + } +} + +func (suite *LocSetUnitSuite) TestItem() { + t := suite.T() + b4 := "bar/lr_4" + + makeSet := func() *locSet { + ls := newLocSet() + + ls.AddItem(l1, i1) + ls.AddItem(l1, i2) + ls.AddLocation(l13) + ls.AddItem(l14, i3) + ls.AddItem(l14, i4) + ls.AddItem(b4, "fnord") + + return ls + } + + ts := makeSet() + assert.ElementsMatch(t, []string{l1, l13, l14, b4}, maps.Keys(ts.Locations)) + assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ts.Locations[l1])) + assert.Empty(t, maps.Keys(ts.Locations[l13])) + assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ts.Locations[l14])) + assert.ElementsMatch(t, []string{"fnord"}, maps.Keys(ts.Locations[b4])) + + table := []struct { + name string + item string + from string + to string + expect func(*testing.T, *locSet) + }{ + { + name: "nop item", + item: "floob", + from: l2, + to: l1, + expect: func(t *testing.T, ls *locSet) { + assert.ElementsMatch(t, []string{i1, i2, "floob"}, maps.Keys(ls.Locations[l1])) + assert.Empty(t, maps.Keys(ls.Locations[l2])) + }, + }, + { + name: "nop origin", + item: i1, + from: "smarf", + to: l2, + expect: func(t *testing.T, ls *locSet) { + assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ls.Locations[l1])) + assert.ElementsMatch(t, []string{i1}, maps.Keys(ls.Locations[l2])) + assert.Empty(t, maps.Keys(ls.Locations["smarf"])) + }, + }, + { + name: "new location", + item: i1, + from: l1, + to: "fnords", + expect: func(t *testing.T, ls *locSet) { + assert.ElementsMatch(t, []string{i2}, maps.Keys(ls.Locations[l1])) + assert.ElementsMatch(t, []string{i1}, maps.Keys(ls.Locations["fnords"])) + }, + }, + { + name: "existing location", + item: i1, + from: l1, + to: l2, + expect: func(t *testing.T, ls *locSet) { + assert.ElementsMatch(t, []string{i2}, maps.Keys(ls.Locations[l1])) + assert.ElementsMatch(t, []string{i1}, maps.Keys(ls.Locations[l2])) + }, + }, + { + name: "same location", + item: i1, + from: l1, + to: l1, + expect: func(t *testing.T, ls *locSet) { + assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ls.Locations[l1])) + }, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + ls := makeSet() + + ls.MoveItem(test.from, test.to, test.item) + test.expect(t, ls) + }) + } +} + +func (suite *LocSetUnitSuite) TestMoveLocation() { + t := suite.T() + b4 := "bar/lr_4" + + makeSet := func() *locSet { + ls := newLocSet() + + ls.AddItem(l1, i1) + ls.AddItem(l1, i2) + ls.AddLocation(l13) + ls.AddItem(l14, i3) + ls.AddItem(l14, i4) + ls.AddItem(b4, "fnord") + + return ls + } + + ts := makeSet() + assert.ElementsMatch(t, []string{l1, l13, l14, b4}, maps.Keys(ts.Locations)) + assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ts.Locations[l1])) + assert.Empty(t, maps.Keys(ts.Locations[l13])) + assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ts.Locations[l14])) + assert.ElementsMatch(t, []string{"fnord"}, maps.Keys(ts.Locations[b4])) + + table := []struct { + name string + from string + to string + expect func(*testing.T, *locSet) + expectNewLoc string + }{ + { + name: "nop root", + from: l2, + to: "", + expect: func(t *testing.T, ls *locSet) { + assert.ElementsMatch(t, []string{l1, l13, l14, b4}, maps.Keys(ls.Locations)) + assert.Empty(t, maps.Keys(ls.Locations[l2])) + }, + expectNewLoc: l2, + }, + { + name: "nop child", + from: l2, + to: "foo", + expect: func(t *testing.T, ls *locSet) { + assert.ElementsMatch(t, []string{l1, l13, l14, b4}, maps.Keys(ls.Locations)) + assert.Empty(t, maps.Keys(ls.Locations["foo"])) + assert.Empty(t, maps.Keys(ls.Locations["foo/"+l2])) + }, + expectNewLoc: "foo/" + l2, + }, + { + name: "no items", + from: l13, + to: "foo", + expect: func(t *testing.T, ls *locSet) { + newLoc := "foo/lr_3" + assert.ElementsMatch(t, []string{l1, newLoc, l14, b4}, maps.Keys(ls.Locations)) + assert.Empty(t, maps.Keys(ls.Locations[l13])) + assert.Empty(t, maps.Keys(ls.Locations[newLoc])) + }, + expectNewLoc: "foo/lr_3", + }, + { + name: "with items", + from: l14, + to: "foo", + expect: func(t *testing.T, ls *locSet) { + newLoc := "foo/lr_4" + assert.ElementsMatch(t, []string{l1, l13, newLoc, b4}, maps.Keys(ls.Locations)) + assert.Empty(t, maps.Keys(ls.Locations[l14])) + assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ls.Locations[newLoc])) + }, + expectNewLoc: "foo/lr_4", + }, + { + name: "cascading locations", + from: l1, + to: "foo", + expect: func(t *testing.T, ls *locSet) { + pfx := "foo/" + assert.ElementsMatch(t, []string{pfx + l1, pfx + l13, pfx + l14, b4}, maps.Keys(ls.Locations)) + assert.Empty(t, maps.Keys(ls.Locations[l1])) + assert.Empty(t, maps.Keys(ls.Locations[l14])) + assert.Empty(t, maps.Keys(ls.Locations[l13])) + assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ls.Locations[pfx+l1])) + assert.Empty(t, maps.Keys(ls.Locations[pfx+l13])) + assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ls.Locations[pfx+l14])) + }, + expectNewLoc: "foo/" + l1, + }, + { + name: "to existing location", + from: l14, + to: "bar", + expect: func(t *testing.T, ls *locSet) { + assert.ElementsMatch(t, []string{l1, l13, b4}, maps.Keys(ls.Locations)) + assert.Empty(t, maps.Keys(ls.Locations[l14])) + assert.Empty(t, maps.Keys(ls.Locations["bar"])) + assert.ElementsMatch(t, []string{"fnord", i3, i4}, maps.Keys(ls.Locations[b4])) + }, + expectNewLoc: b4, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + ls := makeSet() + + newLoc := ls.MoveLocation(test.from, test.to) + test.expect(t, ls) + assert.Equal(t, test.expectNewLoc, newLoc) + }) + } +} diff --git a/src/pkg/backup/details/testdata/testdata.go b/src/pkg/backup/details/testdata/testdata.go index d51937abd..0d98ec7df 100644 --- a/src/pkg/backup/details/testdata/testdata.go +++ b/src/pkg/backup/details/testdata/testdata.go @@ -1,7 +1,7 @@ package testdata import ( - stdpath "path" + "strings" "time" "github.com/alcionai/corso/src/pkg/backup/details" @@ -25,7 +25,7 @@ func mustParsePath(ref string, isItem bool) path.Path { // path with the element appended to it. Panics if the path cannot be parsed. // Useful for simple variable assignments. func mustAppendPath(p path.Path, newElement string, isItem bool) path.Path { - newP, err := p.Append(newElement, isItem) + newP, err := p.Append(isItem, newElement) if err != nil { panic(err) } @@ -33,7 +33,97 @@ func mustAppendPath(p path.Path, newElement string, isItem bool) path.Path { return newP } +func locFromRepo(rr path.Path, isItem bool) *path.Builder { + loc := &path.Builder{} + + for _, e := range rr.Folders() { + loc = loc.Append(strings.TrimSuffix(e, folderSuffix)) + } + + if rr.Service() == path.OneDriveService || rr.Category() == path.LibrariesCategory { + loc = loc.PopFront() + } + + // Folders don't have their final element in the location. + if !isItem { + loc = loc.Dir() + } + + return loc +} + +type repoRefAndLocRef struct { + RR path.Path + Loc *path.Builder +} + +func (p repoRefAndLocRef) MustAppend(newElement string, isItem bool) repoRefAndLocRef { + e := newElement + folderSuffix + + if isItem { + e = newElement + fileSuffix + } + + res := repoRefAndLocRef{ + RR: mustAppendPath(p.RR, e, isItem), + } + + res.Loc = locFromRepo(res.RR, isItem) + + return res +} + +func (p repoRefAndLocRef) ItemLocation() string { + return strings.TrimSuffix(p.RR.Item(), fileSuffix) +} + +func (p repoRefAndLocRef) FolderLocation() string { + lastElem := p.RR.ToBuilder().LastElem() + + if len(p.RR.Item()) > 0 { + f := p.RR.Folders() + lastElem = f[len(f)-2] + } + + return p.Loc.Append(strings.TrimSuffix(lastElem, folderSuffix)).String() +} + +func mustPathRep(ref string, isItem bool) repoRefAndLocRef { + res := repoRefAndLocRef{} + tmp := mustParsePath(ref, isItem) + + // Now append stuff to the RepoRef elements so we have distinct LocationRef + // and RepoRef elements to simulate using IDs in the path instead of display + // names. + rrPB := &path.Builder{} + for _, e := range tmp.Folders() { + rrPB = rrPB.Append(e + folderSuffix) + } + + if isItem { + rrPB = rrPB.Append(tmp.Item() + fileSuffix) + } + + rr, err := rrPB.ToDataLayerPath( + tmp.Tenant(), + tmp.ResourceOwner(), + tmp.Service(), + tmp.Category(), + isItem) + if err != nil { + panic(err) + } + + res.RR = rr + res.Loc = locFromRepo(rr, isItem) + + return res +} + const ( + folderSuffix = ".d" + fileSuffix = ".f" + ItemName1 = "item1" ItemName2 = "item2" ItemName3 = "item3" @@ -47,20 +137,21 @@ var ( Time3 = time.Date(2023, 9, 21, 10, 0, 0, 0, time.UTC) Time4 = time.Date(2023, 10, 21, 10, 0, 0, 0, time.UTC) - ExchangeEmailInboxPath = mustParsePath("tenant-id/exchange/user-id/email/Inbox", false) - ExchangeEmailBasePath = mustAppendPath(ExchangeEmailInboxPath, "subfolder", false) - ExchangeEmailBasePath2 = mustAppendPath(ExchangeEmailInboxPath, "othersubfolder/", false) - ExchangeEmailBasePath3 = mustAppendPath(ExchangeEmailBasePath2, "subsubfolder", false) - ExchangeEmailItemPath1 = mustAppendPath(ExchangeEmailBasePath, ItemName1, true) - ExchangeEmailItemPath2 = mustAppendPath(ExchangeEmailBasePath2, ItemName2, true) - ExchangeEmailItemPath3 = mustAppendPath(ExchangeEmailBasePath3, ItemName3, true) + ExchangeEmailInboxPath = mustPathRep("tenant-id/exchange/user-id/email/Inbox", false) + ExchangeEmailBasePath = ExchangeEmailInboxPath.MustAppend("subfolder", false) + ExchangeEmailBasePath2 = ExchangeEmailInboxPath.MustAppend("othersubfolder/", false) + ExchangeEmailBasePath3 = ExchangeEmailBasePath2.MustAppend("subsubfolder", false) + ExchangeEmailItemPath1 = ExchangeEmailBasePath.MustAppend(ItemName1, true) + ExchangeEmailItemPath2 = ExchangeEmailBasePath2.MustAppend(ItemName2, true) + ExchangeEmailItemPath3 = ExchangeEmailBasePath3.MustAppend(ItemName3, true) - ExchangeEmailItems = []details.DetailsEntry{ + ExchangeEmailItems = []details.Entry{ { - RepoRef: ExchangeEmailItemPath1.String(), - ShortRef: ExchangeEmailItemPath1.ShortRef(), - ParentRef: ExchangeEmailItemPath1.ToBuilder().Dir().ShortRef(), - ItemRef: ExchangeEmailItemPath1.Item(), + RepoRef: ExchangeEmailItemPath1.RR.String(), + ShortRef: ExchangeEmailItemPath1.RR.ShortRef(), + ParentRef: ExchangeEmailItemPath1.RR.ToBuilder().Dir().ShortRef(), + ItemRef: ExchangeEmailItemPath1.ItemLocation(), + LocationRef: ExchangeEmailItemPath1.Loc.String(), ItemInfo: details.ItemInfo{ Exchange: &details.ExchangeInfo{ ItemType: details.ExchangeMail, @@ -71,10 +162,11 @@ var ( }, }, { - RepoRef: ExchangeEmailItemPath2.String(), - ShortRef: ExchangeEmailItemPath2.ShortRef(), - ParentRef: ExchangeEmailItemPath2.ToBuilder().Dir().ShortRef(), - ItemRef: ExchangeEmailItemPath2.Item(), + RepoRef: ExchangeEmailItemPath2.RR.String(), + ShortRef: ExchangeEmailItemPath2.RR.ShortRef(), + ParentRef: ExchangeEmailItemPath2.RR.ToBuilder().Dir().ShortRef(), + ItemRef: ExchangeEmailItemPath2.ItemLocation(), + LocationRef: ExchangeEmailItemPath2.Loc.String(), ItemInfo: details.ItemInfo{ Exchange: &details.ExchangeInfo{ ItemType: details.ExchangeMail, @@ -85,10 +177,11 @@ var ( }, }, { - RepoRef: ExchangeEmailItemPath3.String(), - ShortRef: ExchangeEmailItemPath3.ShortRef(), - ParentRef: ExchangeEmailItemPath3.ToBuilder().Dir().ShortRef(), - ItemRef: ExchangeEmailItemPath3.Item(), + RepoRef: ExchangeEmailItemPath3.RR.String(), + ShortRef: ExchangeEmailItemPath3.RR.ShortRef(), + ParentRef: ExchangeEmailItemPath3.RR.ToBuilder().Dir().ShortRef(), + ItemRef: ExchangeEmailItemPath3.ItemLocation(), + LocationRef: ExchangeEmailItemPath3.Loc.String(), ItemInfo: details.ItemInfo{ Exchange: &details.ExchangeInfo{ ItemType: details.ExchangeMail, @@ -100,18 +193,19 @@ var ( }, } - ExchangeContactsRootPath = mustParsePath("tenant-id/exchange/user-id/contacts/contacts", false) - ExchangeContactsBasePath = mustAppendPath(ExchangeContactsRootPath, "contacts", false) - ExchangeContactsBasePath2 = mustAppendPath(ExchangeContactsRootPath, "morecontacts", false) - ExchangeContactsItemPath1 = mustAppendPath(ExchangeContactsBasePath, ItemName1, true) - ExchangeContactsItemPath2 = mustAppendPath(ExchangeContactsBasePath2, ItemName2, true) + ExchangeContactsRootPath = mustPathRep("tenant-id/exchange/user-id/contacts/contacts", false) + ExchangeContactsBasePath = ExchangeContactsRootPath.MustAppend("contacts", false) + ExchangeContactsBasePath2 = ExchangeContactsRootPath.MustAppend("morecontacts", false) + ExchangeContactsItemPath1 = ExchangeContactsBasePath.MustAppend(ItemName1, true) + ExchangeContactsItemPath2 = ExchangeContactsBasePath2.MustAppend(ItemName2, true) - ExchangeContactsItems = []details.DetailsEntry{ + ExchangeContactsItems = []details.Entry{ { - RepoRef: ExchangeContactsItemPath1.String(), - ShortRef: ExchangeContactsItemPath1.ShortRef(), - ParentRef: ExchangeContactsItemPath1.ToBuilder().Dir().ShortRef(), - ItemRef: ExchangeEmailItemPath1.Item(), + RepoRef: ExchangeContactsItemPath1.RR.String(), + ShortRef: ExchangeContactsItemPath1.RR.ShortRef(), + ParentRef: ExchangeContactsItemPath1.RR.ToBuilder().Dir().ShortRef(), + ItemRef: ExchangeContactsItemPath1.ItemLocation(), + LocationRef: ExchangeContactsItemPath1.Loc.String(), ItemInfo: details.ItemInfo{ Exchange: &details.ExchangeInfo{ ItemType: details.ExchangeContact, @@ -120,10 +214,11 @@ var ( }, }, { - RepoRef: ExchangeContactsItemPath2.String(), - ShortRef: ExchangeContactsItemPath2.ShortRef(), - ParentRef: ExchangeContactsItemPath2.ToBuilder().Dir().ShortRef(), - ItemRef: ExchangeEmailItemPath2.Item(), + RepoRef: ExchangeContactsItemPath2.RR.String(), + ShortRef: ExchangeContactsItemPath2.RR.ShortRef(), + ParentRef: ExchangeContactsItemPath2.RR.ToBuilder().Dir().ShortRef(), + ItemRef: ExchangeContactsItemPath2.ItemLocation(), + LocationRef: ExchangeContactsItemPath2.Loc.String(), ItemInfo: details.ItemInfo{ Exchange: &details.ExchangeInfo{ ItemType: details.ExchangeContact, @@ -133,18 +228,18 @@ var ( }, } - ExchangeEventsRootPath = mustParsePath("tenant-id/exchange/user-id/events/holidays", false) - ExchangeEventsBasePath = mustAppendPath(ExchangeEventsRootPath, "holidays", false) - ExchangeEventsBasePath2 = mustAppendPath(ExchangeEventsRootPath, "moreholidays", false) - ExchangeEventsItemPath1 = mustAppendPath(ExchangeEventsBasePath, ItemName1, true) - ExchangeEventsItemPath2 = mustAppendPath(ExchangeEventsBasePath2, ItemName2, true) + ExchangeEventsBasePath = mustPathRep("tenant-id/exchange/user-id/events/holidays", false) + ExchangeEventsBasePath2 = mustPathRep("tenant-id/exchange/user-id/events/moreholidays", false) + ExchangeEventsItemPath1 = ExchangeEventsBasePath.MustAppend(ItemName1, true) + ExchangeEventsItemPath2 = ExchangeEventsBasePath2.MustAppend(ItemName2, true) - ExchangeEventsItems = []details.DetailsEntry{ + ExchangeEventsItems = []details.Entry{ { - RepoRef: ExchangeEventsItemPath1.String(), - ShortRef: ExchangeEventsItemPath1.ShortRef(), - ParentRef: ExchangeEventsItemPath1.ToBuilder().Dir().ShortRef(), - ItemRef: ExchangeEmailItemPath2.Item(), + RepoRef: ExchangeEventsItemPath1.RR.String(), + ShortRef: ExchangeEventsItemPath1.RR.ShortRef(), + ParentRef: ExchangeEventsItemPath1.RR.ToBuilder().Dir().ShortRef(), + ItemRef: ExchangeEventsItemPath1.ItemLocation(), + LocationRef: ExchangeEventsItemPath1.Loc.String(), ItemInfo: details.ItemInfo{ Exchange: &details.ExchangeInfo{ ItemType: details.ExchangeEvent, @@ -156,10 +251,11 @@ var ( }, }, { - RepoRef: ExchangeEventsItemPath2.String(), - ShortRef: ExchangeEventsItemPath2.ShortRef(), - ParentRef: ExchangeEventsItemPath2.ToBuilder().Dir().ShortRef(), - ItemRef: ExchangeEmailItemPath2.Item(), + RepoRef: ExchangeEventsItemPath2.RR.String(), + ShortRef: ExchangeEventsItemPath2.RR.ShortRef(), + ParentRef: ExchangeEventsItemPath2.RR.ToBuilder().Dir().ShortRef(), + ItemRef: ExchangeEventsItemPath2.ItemLocation(), + LocationRef: ExchangeEventsItemPath2.Loc.String(), ItemInfo: details.ItemInfo{ Exchange: &details.ExchangeInfo{ ItemType: details.ExchangeEvent, @@ -172,30 +268,31 @@ var ( }, } - OneDriveRootPath = mustParsePath("tenant-id/onedrive/user-id/files/drives/foo/root:", false) - OneDriveFolderPath = mustAppendPath(OneDriveRootPath, "folder", false) - OneDriveBasePath1 = mustAppendPath(OneDriveFolderPath, "a", false) - OneDriveBasePath2 = mustAppendPath(OneDriveFolderPath, "b", false) + OneDriveRootPath = mustPathRep("tenant-id/onedrive/user-id/files/drives/foo/root:", false) + OneDriveFolderPath = OneDriveRootPath.MustAppend("folder", false) + OneDriveBasePath1 = OneDriveFolderPath.MustAppend("a", false) + OneDriveBasePath2 = OneDriveFolderPath.MustAppend("b", false) - OneDriveItemPath1 = mustAppendPath(OneDriveFolderPath, ItemName1, true) - OneDriveItemPath2 = mustAppendPath(OneDriveBasePath1, ItemName2, true) - OneDriveItemPath3 = mustAppendPath(OneDriveBasePath2, ItemName3, true) + OneDriveItemPath1 = OneDriveFolderPath.MustAppend(ItemName1, true) + OneDriveItemPath2 = OneDriveBasePath1.MustAppend(ItemName2, true) + OneDriveItemPath3 = OneDriveBasePath2.MustAppend(ItemName3, true) - OneDriveFolderFolder = stdpath.Join(OneDriveFolderPath.Folders()[3:]...) - OneDriveParentFolder1 = stdpath.Join(OneDriveBasePath1.Folders()[3:]...) - OneDriveParentFolder2 = stdpath.Join(OneDriveBasePath2.Folders()[3:]...) + OneDriveFolderFolder = OneDriveFolderPath.Loc.PopFront().String() + OneDriveParentFolder1 = OneDriveBasePath1.Loc.PopFront().String() + OneDriveParentFolder2 = OneDriveBasePath2.Loc.PopFront().String() - OneDriveItems = []details.DetailsEntry{ + OneDriveItems = []details.Entry{ { - RepoRef: OneDriveItemPath1.String(), - ShortRef: OneDriveItemPath1.ShortRef(), - ParentRef: OneDriveItemPath1.ToBuilder().Dir().ShortRef(), - ItemRef: OneDriveItemPath1.Item(), + RepoRef: OneDriveItemPath1.RR.String(), + ShortRef: OneDriveItemPath1.RR.ShortRef(), + ParentRef: OneDriveItemPath1.RR.ToBuilder().Dir().ShortRef(), + ItemRef: OneDriveItemPath1.ItemLocation(), + LocationRef: OneDriveItemPath1.Loc.String(), ItemInfo: details.ItemInfo{ OneDrive: &details.OneDriveInfo{ ItemType: details.OneDriveItem, ParentPath: OneDriveFolderFolder, - ItemName: OneDriveItemPath1.Item() + "name", + ItemName: OneDriveItemPath1.ItemLocation() + "name", Size: int64(23), Owner: UserEmail1, Created: Time2, @@ -204,15 +301,16 @@ var ( }, }, { - RepoRef: OneDriveItemPath2.String(), - ShortRef: OneDriveItemPath2.ShortRef(), - ParentRef: OneDriveItemPath2.ToBuilder().Dir().ShortRef(), - ItemRef: OneDriveItemPath2.Item(), + RepoRef: OneDriveItemPath2.RR.String(), + ShortRef: OneDriveItemPath2.RR.ShortRef(), + ParentRef: OneDriveItemPath2.RR.ToBuilder().Dir().ShortRef(), + ItemRef: OneDriveItemPath2.ItemLocation(), + LocationRef: OneDriveItemPath2.Loc.String(), ItemInfo: details.ItemInfo{ OneDrive: &details.OneDriveInfo{ ItemType: details.OneDriveItem, ParentPath: OneDriveParentFolder1, - ItemName: OneDriveItemPath2.Item() + "name", + ItemName: OneDriveItemPath2.ItemLocation() + "name", Size: int64(42), Owner: UserEmail1, Created: Time1, @@ -221,15 +319,16 @@ var ( }, }, { - RepoRef: OneDriveItemPath3.String(), - ShortRef: OneDriveItemPath3.ShortRef(), - ParentRef: OneDriveItemPath3.ToBuilder().Dir().ShortRef(), - ItemRef: OneDriveItemPath3.Item(), + RepoRef: OneDriveItemPath3.RR.String(), + ShortRef: OneDriveItemPath3.RR.ShortRef(), + ParentRef: OneDriveItemPath3.RR.ToBuilder().Dir().ShortRef(), + ItemRef: OneDriveItemPath3.ItemLocation(), + LocationRef: OneDriveItemPath3.Loc.String(), ItemInfo: details.ItemInfo{ OneDrive: &details.OneDriveInfo{ ItemType: details.OneDriveItem, ParentPath: OneDriveParentFolder2, - ItemName: OneDriveItemPath3.Item() + "name", + ItemName: OneDriveItemPath3.ItemLocation() + "name", Size: int64(19), Owner: UserEmail2, Created: Time2, @@ -239,30 +338,31 @@ var ( }, } - SharePointRootPath = mustParsePath("tenant-id/sharepoint/site-id/libraries/drives/foo/root:", false) - SharePointLibraryPath = mustAppendPath(SharePointRootPath, "library", false) - SharePointBasePath1 = mustAppendPath(SharePointLibraryPath, "a", false) - SharePointBasePath2 = mustAppendPath(SharePointLibraryPath, "b", false) + SharePointRootPath = mustPathRep("tenant-id/sharepoint/site-id/libraries/drives/foo/root:", false) + SharePointLibraryPath = SharePointRootPath.MustAppend("library", false) + SharePointBasePath1 = SharePointLibraryPath.MustAppend("a", false) + SharePointBasePath2 = SharePointLibraryPath.MustAppend("b", false) - SharePointLibraryItemPath1 = mustAppendPath(SharePointLibraryPath, ItemName1, true) - SharePointLibraryItemPath2 = mustAppendPath(SharePointBasePath1, ItemName2, true) - SharePointLibraryItemPath3 = mustAppendPath(SharePointBasePath2, ItemName3, true) + SharePointLibraryItemPath1 = SharePointLibraryPath.MustAppend(ItemName1, true) + SharePointLibraryItemPath2 = SharePointBasePath1.MustAppend(ItemName2, true) + SharePointLibraryItemPath3 = SharePointBasePath2.MustAppend(ItemName3, true) - SharePointLibraryFolder = stdpath.Join(SharePointLibraryPath.Folders()[3:]...) - SharePointParentLibrary1 = stdpath.Join(SharePointBasePath1.Folders()[3:]...) - SharePointParentLibrary2 = stdpath.Join(SharePointBasePath2.Folders()[3:]...) + SharePointLibraryFolder = SharePointLibraryPath.Loc.PopFront().String() + SharePointParentLibrary1 = SharePointBasePath1.Loc.PopFront().String() + SharePointParentLibrary2 = SharePointBasePath2.Loc.PopFront().String() - SharePointLibraryItems = []details.DetailsEntry{ + SharePointLibraryItems = []details.Entry{ { - RepoRef: SharePointLibraryItemPath1.String(), - ShortRef: SharePointLibraryItemPath1.ShortRef(), - ParentRef: SharePointLibraryItemPath1.ToBuilder().Dir().ShortRef(), - ItemRef: SharePointLibraryItemPath1.Item(), + RepoRef: SharePointLibraryItemPath1.RR.String(), + ShortRef: SharePointLibraryItemPath1.RR.ShortRef(), + ParentRef: SharePointLibraryItemPath1.RR.ToBuilder().Dir().ShortRef(), + ItemRef: SharePointLibraryItemPath1.ItemLocation(), + LocationRef: SharePointLibraryItemPath1.Loc.String(), ItemInfo: details.ItemInfo{ SharePoint: &details.SharePointInfo{ ItemType: details.SharePointLibrary, ParentPath: SharePointLibraryFolder, - ItemName: SharePointLibraryItemPath1.Item() + "name", + ItemName: SharePointLibraryItemPath1.ItemLocation() + "name", Size: int64(23), Owner: UserEmail1, Created: Time2, @@ -271,15 +371,16 @@ var ( }, }, { - RepoRef: SharePointLibraryItemPath2.String(), - ShortRef: SharePointLibraryItemPath2.ShortRef(), - ParentRef: SharePointLibraryItemPath2.ToBuilder().Dir().ShortRef(), - ItemRef: SharePointLibraryItemPath2.Item(), + RepoRef: SharePointLibraryItemPath2.RR.String(), + ShortRef: SharePointLibraryItemPath2.RR.ShortRef(), + ParentRef: SharePointLibraryItemPath2.RR.ToBuilder().Dir().ShortRef(), + ItemRef: SharePointLibraryItemPath2.ItemLocation(), + LocationRef: SharePointLibraryItemPath2.Loc.String(), ItemInfo: details.ItemInfo{ SharePoint: &details.SharePointInfo{ ItemType: details.SharePointLibrary, ParentPath: SharePointParentLibrary1, - ItemName: SharePointLibraryItemPath2.Item() + "name", + ItemName: SharePointLibraryItemPath2.ItemLocation() + "name", Size: int64(42), Owner: UserEmail1, Created: Time1, @@ -288,15 +389,16 @@ var ( }, }, { - RepoRef: SharePointLibraryItemPath3.String(), - ShortRef: SharePointLibraryItemPath3.ShortRef(), - ParentRef: SharePointLibraryItemPath3.ToBuilder().Dir().ShortRef(), - ItemRef: SharePointLibraryItemPath3.Item(), + RepoRef: SharePointLibraryItemPath3.RR.String(), + ShortRef: SharePointLibraryItemPath3.RR.ShortRef(), + ParentRef: SharePointLibraryItemPath3.RR.ToBuilder().Dir().ShortRef(), + ItemRef: SharePointLibraryItemPath3.ItemLocation(), + LocationRef: SharePointLibraryItemPath3.Loc.String(), ItemInfo: details.ItemInfo{ SharePoint: &details.SharePointInfo{ ItemType: details.SharePointLibrary, ParentPath: SharePointParentLibrary2, - ItemName: SharePointLibraryItemPath3.Item() + "name", + ItemName: SharePointLibraryItemPath3.ItemLocation() + "name", Size: int64(19), Owner: UserEmail2, Created: Time2, @@ -308,7 +410,7 @@ var ( ) func GetDetailsSet() *details.Details { - entries := []details.DetailsEntry{} + entries := []details.Entry{} for _, e := range ExchangeEmailItems { entries = append(entries, e) diff --git a/src/pkg/control/options.go b/src/pkg/control/options.go index 62a8027af..3bda48854 100644 --- a/src/pkg/control/options.go +++ b/src/pkg/control/options.go @@ -1,22 +1,31 @@ package control import ( - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" + "github.com/alcionai/corso/src/pkg/control/repository" ) // Options holds the optional configurations for a process type Options struct { - Collision CollisionPolicy `json:"-"` - DisableMetrics bool `json:"disableMetrics"` - FailureHandling FailureBehavior `json:"failureHandling"` - ItemFetchParallelism int `json:"itemFetchParallelism"` - RestorePermissions bool `json:"restorePermissions"` - SkipReduce bool `json:"skipReduce"` - ToggleFeatures Toggles `json:"ToggleFeatures"` + Collision CollisionPolicy `json:"-"` + DisableMetrics bool `json:"disableMetrics"` + FailureHandling FailureBehavior `json:"failureHandling"` + RestorePermissions bool `json:"restorePermissions"` + SkipReduce bool `json:"skipReduce"` + ToggleFeatures Toggles `json:"toggleFeatures"` + Parallelism Parallelism `json:"parallelism"` + Repo repository.Options `json:"repo"` } type FailureBehavior string +type Parallelism struct { + // sets the collection buffer size before blocking. + CollectionBuffer int + // sets the parallelism of item population within a collection. + ItemFetch int +} + const ( // fails and exits the run immediately FailFast FailureBehavior = "fail-fast" @@ -31,6 +40,10 @@ func Defaults() Options { return Options{ FailureHandling: FailAfterRecovery, ToggleFeatures: Toggles{}, + Parallelism: Parallelism{ + CollectionBuffer: 4, + ItemFetch: 4, + }, } } @@ -70,9 +83,9 @@ type RestoreDestination struct { ContainerName string } -func DefaultRestoreDestination(timeFormat common.TimeFormat) RestoreDestination { +func DefaultRestoreDestination(timeFormat dttm.TimeFormat) RestoreDestination { return RestoreDestination{ - ContainerName: defaultRestoreLocation + common.FormatNow(timeFormat), + ContainerName: defaultRestoreLocation + dttm.FormatNow(timeFormat), } } @@ -88,8 +101,22 @@ type Toggles struct { // DisableIncrementals prevents backups from using incremental lookups, // forcing a new, complete backup of all data regardless of prior state. DisableIncrementals bool `json:"exchangeIncrementals,omitempty"` + // DisableDelta prevents backups from using delta based lookups, + // forcing a backup by enumerating all items. This is different + // from DisableIncrementals in that this does not even makes use of + // delta endpoints with or without a delta token. This is necessary + // when the user has filled up the mailbox storage available to the + // user as Microsoft prevents the API from being able to make calls + // to delta endpoints. + DisableDelta bool `json:"exchangeDelta,omitempty"` // ExchangeImmutableIDs denotes whether Corso should store items with // immutable Exchange IDs. This is only safe to set if the previous backup for // incremental backups used immutable IDs or if a full backup is being done. ExchangeImmutableIDs bool `json:"exchangeImmutableIDs,omitempty"` + + RunMigrations bool `json:"runMigrations"` + + // DisableConcurrencyLimiter removes concurrency limits when communicating with + // graph API. This flag is only relevant for exchange backups for now + DisableConcurrencyLimiter bool `json:"disableConcurrencyLimiter,omitempty"` } diff --git a/src/pkg/control/repository/maintenancesafety_string.go b/src/pkg/control/repository/maintenancesafety_string.go new file mode 100644 index 000000000..789bd918a --- /dev/null +++ b/src/pkg/control/repository/maintenancesafety_string.go @@ -0,0 +1,24 @@ +// Code generated by "stringer -type=MaintenanceSafety -linecomment"; DO NOT EDIT. + +package repository + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[FullMaintenanceSafety-0] + _ = x[NoMaintenanceSafety-1] +} + +const _MaintenanceSafety_name = "FullMaintenanceSafetyNoMaintenanceSafety" + +var _MaintenanceSafety_index = [...]uint8{0, 21, 40} + +func (i MaintenanceSafety) String() string { + if i < 0 || i >= MaintenanceSafety(len(_MaintenanceSafety_index)-1) { + return "MaintenanceSafety(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _MaintenanceSafety_name[_MaintenanceSafety_index[i]:_MaintenanceSafety_index[i+1]] +} diff --git a/src/pkg/control/repository/maintenancetype_string.go b/src/pkg/control/repository/maintenancetype_string.go new file mode 100644 index 000000000..fea525c93 --- /dev/null +++ b/src/pkg/control/repository/maintenancetype_string.go @@ -0,0 +1,24 @@ +// Code generated by "stringer -type=MaintenanceType -linecomment"; DO NOT EDIT. + +package repository + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[CompleteMaintenance-0] + _ = x[MetadataMaintenance-1] +} + +const _MaintenanceType_name = "completemetadata" + +var _MaintenanceType_index = [...]uint8{0, 8, 16} + +func (i MaintenanceType) String() string { + if i < 0 || i >= MaintenanceType(len(_MaintenanceType_index)-1) { + return "MaintenanceType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _MaintenanceType_name[_MaintenanceType_index[i]:_MaintenanceType_index[i+1]] +} diff --git a/src/pkg/control/repository/repo.go b/src/pkg/control/repository/repo.go new file mode 100644 index 000000000..aab97948a --- /dev/null +++ b/src/pkg/control/repository/repo.go @@ -0,0 +1,46 @@ +package repository + +// Repo represents options that are specific to the repo storing backed up data. +type Options struct { + User string `json:"user"` + Host string `json:"host"` +} + +type Maintenance struct { + Type MaintenanceType `json:"type"` + Safety MaintenanceSafety `json:"safety"` + Force bool `json:"force"` +} + +// --------------------------------------------------------------------------- +// Maintenance flags +// --------------------------------------------------------------------------- + +type MaintenanceType int + +// Can't be reordered as we rely on iota for numbering. +// +//go:generate stringer -type=MaintenanceType -linecomment +const ( + CompleteMaintenance MaintenanceType = iota // complete + MetadataMaintenance // metadata +) + +var StringToMaintenanceType = map[string]MaintenanceType{ + CompleteMaintenance.String(): CompleteMaintenance, + MetadataMaintenance.String(): MetadataMaintenance, +} + +type MaintenanceSafety int + +// Can't be reordered as we rely on iota for numbering. +// +//go:generate stringer -type=MaintenanceSafety -linecomment +const ( + FullMaintenanceSafety MaintenanceSafety = iota + //nolint:lll + // Use only if there's no other kopia instances accessing the repo and the + // storage backend is strongly consistent. + // https://github.com/kopia/kopia/blob/f9de453efc198b6e993af8922f953a7e5322dc5f/repo/maintenance/maintenance_safety.go#L42 + NoMaintenanceSafety +) diff --git a/src/pkg/errs/err.go b/src/pkg/errs/errs.go similarity index 51% rename from src/pkg/errs/err.go rename to src/pkg/errs/errs.go index fc8158390..f93e0e51a 100644 --- a/src/pkg/errs/err.go +++ b/src/pkg/errs/errs.go @@ -13,30 +13,37 @@ import ( type errEnum string const ( - RepoAlreadyExists errEnum = "repository-already-exists" - BackupNotFound errEnum = "backup-not-found" - ServiceNotEnabled errEnum = "service-not-enabled" + RepoAlreadyExists errEnum = "repository-already-exists" + BackupNotFound errEnum = "backup-not-found" + ServiceNotEnabled errEnum = "service-not-enabled" + ResourceOwnerNotFound errEnum = "resource-owner-not-found" ) // map of enums to errors. We might want to re-use an enum for multiple // internal errors (ex: "ServiceNotEnabled" may exist in both graph and // non-graph producers). var internalToExternal = map[errEnum][]error{ - RepoAlreadyExists: {repository.ErrorRepoAlreadyExists}, - BackupNotFound: {repository.ErrorBackupNotFound}, - ServiceNotEnabled: {graph.ErrServiceNotEnabled}, + RepoAlreadyExists: {repository.ErrorRepoAlreadyExists}, + BackupNotFound: {repository.ErrorBackupNotFound}, + ServiceNotEnabled: {graph.ErrServiceNotEnabled}, + ResourceOwnerNotFound: {graph.ErrResourceOwnerNotFound}, +} + +// Internal returns the internal errors which match to the public error category. +func Internal(enum errEnum) []error { + return internalToExternal[enum] } // Is checks if the provided error contains an internal error that matches // the public error category. func Is(err error, enum errEnum) bool { - esl, ok := internalToExternal[enum] + internalErrs, ok := internalToExternal[enum] if !ok { return false } - for _, e := range esl { - if errors.Is(err, e) { + for _, target := range internalErrs { + if errors.Is(err, target) { return true } } diff --git a/src/pkg/errs/errs_test.go b/src/pkg/errs/errs_test.go index 6c854f31b..789c88658 100644 --- a/src/pkg/errs/errs_test.go +++ b/src/pkg/errs/errs_test.go @@ -3,6 +3,7 @@ package errs import ( "testing" + "github.com/alcionai/clues" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" @@ -19,19 +20,52 @@ func TestErrUnitSuite(t *testing.T) { suite.Run(t, &ErrUnitSuite{Suite: tester.NewUnitSuite(t)}) } +func (suite *ErrUnitSuite) TestInternal() { + table := []struct { + get errEnum + expect []error + }{ + {RepoAlreadyExists, []error{repository.ErrorRepoAlreadyExists}}, + {BackupNotFound, []error{repository.ErrorBackupNotFound}}, + {ServiceNotEnabled, []error{graph.ErrServiceNotEnabled}}, + {ResourceOwnerNotFound, []error{graph.ErrResourceOwnerNotFound}}, + } + for _, test := range table { + suite.Run(string(test.get), func() { + assert.ElementsMatch(suite.T(), test.expect, Internal(test.get)) + }) + } +} + func (suite *ErrUnitSuite) TestIs() { table := []struct { - is errEnum - input error + target errEnum + err error }{ {RepoAlreadyExists, repository.ErrorRepoAlreadyExists}, {BackupNotFound, repository.ErrorBackupNotFound}, {ServiceNotEnabled, graph.ErrServiceNotEnabled}, + {ResourceOwnerNotFound, graph.ErrResourceOwnerNotFound}, } for _, test := range table { - suite.Run(string(test.is), func() { - assert.True(suite.T(), Is(test.input, test.is)) - assert.False(suite.T(), Is(assert.AnError, test.is)) + suite.Run(string(test.target), func() { + var ( + w = clues.Wrap(test.err, "wrap") + s = clues.Stack(test.err) + es = clues.Stack(assert.AnError, test.err) + se = clues.Stack(test.err, assert.AnError) + sw = clues.Stack(assert.AnError, w) + ws = clues.Stack(w, assert.AnError) + ) + + assert.True(suite.T(), Is(test.err, test.target)) + assert.True(suite.T(), Is(w, test.target)) + assert.True(suite.T(), Is(s, test.target)) + assert.True(suite.T(), Is(es, test.target)) + assert.True(suite.T(), Is(se, test.target)) + assert.True(suite.T(), Is(sw, test.target)) + assert.True(suite.T(), Is(ws, test.target)) + assert.False(suite.T(), Is(assert.AnError, test.target)) }) } } diff --git a/src/pkg/fault/example_fault_test.go b/src/pkg/fault/example_fault_test.go index 222d52270..c830a9aa9 100644 --- a/src/pkg/fault/example_fault_test.go +++ b/src/pkg/fault/example_fault_test.go @@ -428,6 +428,7 @@ func ExampleBus_AddSkip() { // error. Our only option is to skip it. errs.AddSkip(fault.FileSkip( fault.SkipMalware, + "deduplication-namespace", "file-id", "file-name", map[string]any{"foo": "bar"}, diff --git a/src/pkg/fault/fault.go b/src/pkg/fault/fault.go index c3601bd14..041e3d4e2 100644 --- a/src/pkg/fault/fault.go +++ b/src/pkg/fault/fault.go @@ -204,7 +204,7 @@ type Errors struct { // Items are the reduction of all errors (both the failure and the // recovered values) in the Errors struct into a slice of items, - // deduplicated by their ID. + // deduplicated by their Namespace + ID. Items []Item `json:"items"` // Skipped is the accumulation of skipped items. Skipped items @@ -218,7 +218,8 @@ type Errors struct { } // itemsIn reduces all errors (both the failure and recovered values) -// in the Errors struct into a slice of items, deduplicated by their ID. +// in the Errors struct into a slice of items, deduplicated by their +// Namespace + ID. // Any non-item error is serialized to a clues.ErrCore and returned in // the second list. func itemsIn(failure error, recovered []error) ([]Item, []*clues.ErrCore) { @@ -234,12 +235,12 @@ func itemsIn(failure error, recovered []error) ([]Item, []*clues.ErrCore) { continue } - is[ie.ID] = *ie + is[ie.dedupeID()] = *ie } var ie *Item if errors.As(failure, &ie) { - is[ie.ID] = *ie + is[ie.dedupeID()] = *ie } return maps.Values(is), non diff --git a/src/pkg/fault/fault_test.go b/src/pkg/fault/fault_test.go index 3370e3d3e..4d731ede1 100644 --- a/src/pkg/fault/fault_test.go +++ b/src/pkg/fault/fault_test.go @@ -193,7 +193,7 @@ func (suite *FaultErrorsUnitSuite) TestAddSkip() { n.AddRecoverable(assert.AnError) assert.Len(t, n.Skipped(), 0) - n.AddSkip(fault.OwnerSkip(fault.SkipMalware, "id", "name", nil)) + n.AddSkip(fault.OwnerSkip(fault.SkipMalware, "ns", "id", "name", nil)) assert.Len(t, n.Skipped(), 1) } @@ -262,12 +262,12 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() { name: "failure item", errs: func() *fault.Errors { b := fault.New(false) - b.Fail(fault.OwnerErr(ae, "id", "name", addtl)) + b.Fail(fault.OwnerErr(ae, "ns", "id", "name", addtl)) b.AddRecoverable(ae) return b.Errors() }, - expectItems: []fault.Item{*fault.OwnerErr(ae, "id", "name", addtl)}, + expectItems: []fault.Item{*fault.OwnerErr(ae, "ns", "id", "name", addtl)}, expectRecoverable: noncore, }, { @@ -275,25 +275,40 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() { errs: func() *fault.Errors { b := fault.New(false) b.Fail(ae) - b.AddRecoverable(fault.OwnerErr(ae, "id", "name", addtl)) + b.AddRecoverable(fault.OwnerErr(ae, "ns", "id", "name", addtl)) return b.Errors() }, - expectItems: []fault.Item{*fault.OwnerErr(ae, "id", "name", addtl)}, + expectItems: []fault.Item{*fault.OwnerErr(ae, "ns", "id", "name", addtl)}, expectRecoverable: []*clues.ErrCore{}, }, { name: "two items", errs: func() *fault.Errors { b := fault.New(false) - b.Fail(fault.OwnerErr(ae, "oid", "name", addtl)) - b.AddRecoverable(fault.FileErr(ae, "fid", "name", addtl)) + b.Fail(fault.OwnerErr(ae, "ns", "oid", "name", addtl)) + b.AddRecoverable(fault.FileErr(ae, "ns", "fid", "name", addtl)) return b.Errors() }, expectItems: []fault.Item{ - *fault.OwnerErr(ae, "oid", "name", addtl), - *fault.FileErr(ae, "fid", "name", addtl), + *fault.OwnerErr(ae, "ns", "oid", "name", addtl), + *fault.FileErr(ae, "ns", "fid", "name", addtl), + }, + expectRecoverable: []*clues.ErrCore{}, + }, + { + name: "two items - diff namespace same id", + errs: func() *fault.Errors { + b := fault.New(false) + b.Fail(fault.OwnerErr(ae, "ns", "id", "name", addtl)) + b.AddRecoverable(fault.FileErr(ae, "ns2", "id", "name", addtl)) + + return b.Errors() + }, + expectItems: []fault.Item{ + *fault.OwnerErr(ae, "ns", "id", "name", addtl), + *fault.FileErr(ae, "ns2", "id", "name", addtl), }, expectRecoverable: []*clues.ErrCore{}, }, @@ -301,13 +316,13 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() { name: "duplicate items - failure priority", errs: func() *fault.Errors { b := fault.New(false) - b.Fail(fault.OwnerErr(ae, "id", "name", addtl)) - b.AddRecoverable(fault.FileErr(ae, "id", "name", addtl)) + b.Fail(fault.OwnerErr(ae, "ns", "id", "name", addtl)) + b.AddRecoverable(fault.FileErr(ae, "ns", "id", "name", addtl)) return b.Errors() }, expectItems: []fault.Item{ - *fault.OwnerErr(ae, "id", "name", addtl), + *fault.OwnerErr(ae, "ns", "id", "name", addtl), }, expectRecoverable: []*clues.ErrCore{}, }, @@ -316,13 +331,13 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() { errs: func() *fault.Errors { b := fault.New(false) b.Fail(ae) - b.AddRecoverable(fault.FileErr(ae, "fid", "name", addtl)) - b.AddRecoverable(fault.FileErr(ae, "fid", "name2", addtl)) + b.AddRecoverable(fault.FileErr(ae, "ns", "fid", "name", addtl)) + b.AddRecoverable(fault.FileErr(ae, "ns", "fid", "name2", addtl)) return b.Errors() }, expectItems: []fault.Item{ - *fault.FileErr(ae, "fid", "name2", addtl), + *fault.FileErr(ae, "ns", "fid", "name2", addtl), }, expectRecoverable: []*clues.ErrCore{}, }, @@ -331,13 +346,13 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() { errs: func() *fault.Errors { b := fault.New(false) b.Fail(ae) - b.AddRecoverable(fault.FileErr(ae, "fid", "name", addtl)) + b.AddRecoverable(fault.FileErr(ae, "ns", "fid", "name", addtl)) b.AddRecoverable(ae) return b.Errors() }, expectItems: []fault.Item{ - *fault.FileErr(ae, "fid", "name", addtl), + *fault.FileErr(ae, "ns", "fid", "name", addtl), }, expectRecoverable: noncore, }, diff --git a/src/pkg/fault/item.go b/src/pkg/fault/item.go index c0a5eac76..8b5f8c929 100644 --- a/src/pkg/fault/item.go +++ b/src/pkg/fault/item.go @@ -7,6 +7,7 @@ const ( AddtlLastModBy = "last_modified_by" AddtlContainerID = "container_id" AddtlContainerName = "container_name" + AddtlContainerPath = "container_path" AddtlMalwareDesc = "malware_description" ) @@ -48,6 +49,12 @@ var ( // by the end user (cli or sdk) for surfacing human-readable and // identifiable points of failure. type Item struct { + // deduplication namespace; the maximally-unique boundary of the + // item ID. The scope of this boundary depends on the service. + // ex: exchange items are unique within their category, drive items + // are only unique within a given drive. + Namespace string `json:"namespace"` + // deduplication identifier; the ID of the observed item. ID string `json:"id"` @@ -71,6 +78,12 @@ type Item struct { Additional map[string]any `json:"additional"` } +// dedupeID is the id used to deduplicate items when aggreagating +// errors in fault.Errors(). +func (i *Item) dedupeID() string { + return i.Namespace + i.ID +} + // Error complies with the error interface. func (i *Item) Error() string { if i == nil { @@ -110,23 +123,24 @@ func (i Item) Values() []string { } // ContainerErr produces a Container-type Item for tracking erroneous items -func ContainerErr(cause error, id, name string, addtl map[string]any) *Item { - return itemErr(ContainerType, cause, id, name, addtl) +func ContainerErr(cause error, namespace, id, name string, addtl map[string]any) *Item { + return itemErr(ContainerType, cause, namespace, id, name, addtl) } // FileErr produces a File-type Item for tracking erroneous items. -func FileErr(cause error, id, name string, addtl map[string]any) *Item { - return itemErr(FileType, cause, id, name, addtl) +func FileErr(cause error, namespace, id, name string, addtl map[string]any) *Item { + return itemErr(FileType, cause, namespace, id, name, addtl) } // OnwerErr produces a ResourceOwner-type Item for tracking erroneous items. -func OwnerErr(cause error, id, name string, addtl map[string]any) *Item { - return itemErr(ResourceOwnerType, cause, id, name, addtl) +func OwnerErr(cause error, namespace, id, name string, addtl map[string]any) *Item { + return itemErr(ResourceOwnerType, cause, namespace, id, name, addtl) } // itemErr produces a Item of the provided type for tracking erroneous items. -func itemErr(t itemType, cause error, id, name string, addtl map[string]any) *Item { +func itemErr(t itemType, cause error, namespace, id, name string, addtl map[string]any) *Item { return &Item{ + Namespace: namespace, ID: id, Name: name, Type: t, @@ -227,24 +241,25 @@ func (s Skipped) Values() []string { } // ContainerSkip produces a Container-kind Item for tracking skipped items. -func ContainerSkip(cause skipCause, id, name string, addtl map[string]any) *Skipped { - return itemSkip(ContainerType, cause, id, name, addtl) +func ContainerSkip(cause skipCause, namespace, id, name string, addtl map[string]any) *Skipped { + return itemSkip(ContainerType, cause, namespace, id, name, addtl) } // FileSkip produces a File-kind Item for tracking skipped items. -func FileSkip(cause skipCause, id, name string, addtl map[string]any) *Skipped { - return itemSkip(FileType, cause, id, name, addtl) +func FileSkip(cause skipCause, namespace, id, name string, addtl map[string]any) *Skipped { + return itemSkip(FileType, cause, namespace, id, name, addtl) } // OnwerSkip produces a ResourceOwner-kind Item for tracking skipped items. -func OwnerSkip(cause skipCause, id, name string, addtl map[string]any) *Skipped { - return itemSkip(ResourceOwnerType, cause, id, name, addtl) +func OwnerSkip(cause skipCause, namespace, id, name string, addtl map[string]any) *Skipped { + return itemSkip(ResourceOwnerType, cause, namespace, id, name, addtl) } // itemSkip produces a Item of the provided type for tracking skipped items. -func itemSkip(t itemType, cause skipCause, id, name string, addtl map[string]any) *Skipped { +func itemSkip(t itemType, cause skipCause, namespace, id, name string, addtl map[string]any) *Skipped { return &Skipped{ Item: Item{ + Namespace: namespace, ID: id, Name: name, Type: t, diff --git a/src/pkg/fault/item_test.go b/src/pkg/fault/item_test.go index 18fce66f7..b597121ee 100644 --- a/src/pkg/fault/item_test.go +++ b/src/pkg/fault/item_test.go @@ -36,9 +36,10 @@ func (suite *ItemUnitSuite) TestItem_Error() { func (suite *ItemUnitSuite) TestContainerErr() { t := suite.T() addtl := map[string]any{"foo": "bar"} - i := ContainerErr(clues.New("foo"), "id", "name", addtl) + i := ContainerErr(clues.New("foo"), "ns", "id", "name", addtl) expect := Item{ + Namespace: "ns", ID: "id", Name: "name", Type: ContainerType, @@ -52,9 +53,10 @@ func (suite *ItemUnitSuite) TestContainerErr() { func (suite *ItemUnitSuite) TestFileErr() { t := suite.T() addtl := map[string]any{"foo": "bar"} - i := FileErr(clues.New("foo"), "id", "name", addtl) + i := FileErr(clues.New("foo"), "ns", "id", "name", addtl) expect := Item{ + Namespace: "ns", ID: "id", Name: "name", Type: FileType, @@ -68,9 +70,10 @@ func (suite *ItemUnitSuite) TestFileErr() { func (suite *ItemUnitSuite) TestOwnerErr() { t := suite.T() addtl := map[string]any{"foo": "bar"} - i := OwnerErr(clues.New("foo"), "id", "name", addtl) + i := OwnerErr(clues.New("foo"), "ns", "id", "name", addtl) expect := Item{ + Namespace: "ns", ID: "id", Name: "name", Type: ResourceOwnerType, @@ -127,17 +130,17 @@ func (suite *ItemUnitSuite) TestItem_HeadersValues() { }{ { name: "file", - item: FileErr(assert.AnError, "id", "name", addtl), + item: FileErr(assert.AnError, "ns", "id", "name", addtl), expect: []string{"Error", FileType.Printable(), "name", "cname", cause}, }, { name: "container", - item: ContainerErr(assert.AnError, "id", "name", addtl), + item: ContainerErr(assert.AnError, "ns", "id", "name", addtl), expect: []string{"Error", ContainerType.Printable(), "name", "cname", cause}, }, { name: "owner", - item: OwnerErr(assert.AnError, "id", "name", nil), + item: OwnerErr(assert.AnError, "ns", "id", "name", nil), expect: []string{"Error", ResourceOwnerType.Printable(), "name", "", cause}, }, } @@ -169,9 +172,10 @@ func (suite *ItemUnitSuite) TestSkipped_String() { func (suite *ItemUnitSuite) TestContainerSkip() { t := suite.T() addtl := map[string]any{"foo": "bar"} - i := ContainerSkip(SkipMalware, "id", "name", addtl) + i := ContainerSkip(SkipMalware, "ns", "id", "name", addtl) expect := Item{ + Namespace: "ns", ID: "id", Name: "name", Type: ContainerType, @@ -185,9 +189,10 @@ func (suite *ItemUnitSuite) TestContainerSkip() { func (suite *ItemUnitSuite) TestFileSkip() { t := suite.T() addtl := map[string]any{"foo": "bar"} - i := FileSkip(SkipMalware, "id", "name", addtl) + i := FileSkip(SkipMalware, "ns", "id", "name", addtl) expect := Item{ + Namespace: "ns", ID: "id", Name: "name", Type: FileType, @@ -201,9 +206,10 @@ func (suite *ItemUnitSuite) TestFileSkip() { func (suite *ItemUnitSuite) TestOwnerSkip() { t := suite.T() addtl := map[string]any{"foo": "bar"} - i := OwnerSkip(SkipMalware, "id", "name", addtl) + i := OwnerSkip(SkipMalware, "ns", "id", "name", addtl) expect := Item{ + Namespace: "ns", ID: "id", Name: "name", Type: ResourceOwnerType, @@ -227,17 +233,17 @@ func (suite *ItemUnitSuite) TestSkipped_HeadersValues() { }{ { name: "file", - skip: FileSkip(SkipMalware, "id", "name", addtl), + skip: FileSkip(SkipMalware, "ns", "id", "name", addtl), expect: []string{"Skip", FileType.Printable(), "name", "cname", string(SkipMalware)}, }, { name: "container", - skip: ContainerSkip(SkipMalware, "id", "name", addtl), + skip: ContainerSkip(SkipMalware, "ns", "id", "name", addtl), expect: []string{"Skip", ContainerType.Printable(), "name", "cname", string(SkipMalware)}, }, { name: "owner", - skip: OwnerSkip(SkipMalware, "id", "name", nil), + skip: OwnerSkip(SkipMalware, "ns", "id", "name", nil), expect: []string{"Skip", ResourceOwnerType.Printable(), "name", "", string(SkipMalware)}, }, } diff --git a/src/pkg/fault/testdata/testdata.go b/src/pkg/fault/testdata/testdata.go index 8b3cf7bb8..a3a0e48dc 100644 --- a/src/pkg/fault/testdata/testdata.go +++ b/src/pkg/fault/testdata/testdata.go @@ -19,7 +19,7 @@ func MakeErrors(failure, recovered, skipped bool) fault.Errors { } if skipped { - fe.Skipped = []fault.Skipped{*fault.FileSkip(fault.SkipMalware, "id", "name", nil)} + fe.Skipped = []fault.Skipped{*fault.FileSkip(fault.SkipMalware, "ns", "id", "name", nil)} } return fe diff --git a/src/pkg/filters/comparator_string.go b/src/pkg/filters/comparator_string.go deleted file mode 100644 index e6afe098c..000000000 --- a/src/pkg/filters/comparator_string.go +++ /dev/null @@ -1,37 +0,0 @@ -// Code generated by "stringer -type=comparator -linecomment"; DO NOT EDIT. - -package filters - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[UnknownComparator-0] - _ = x[EqualTo-1] - _ = x[GreaterThan-2] - _ = x[LessThan-3] - _ = x[TargetContains-4] - _ = x[TargetIn-5] - _ = x[Passes-6] - _ = x[Fails-7] - _ = x[IdentityValue-8] - _ = x[TargetPrefixes-9] - _ = x[TargetSuffixes-10] - _ = x[TargetPathPrefix-11] - _ = x[TargetPathContains-12] - _ = x[TargetPathSuffix-13] - _ = x[TargetPathEquals-14] -} - -const _comparator_name = "UnknownComparisonEQGTLTContINPassFailIdentityPfxSfxPathPfxPathContPathSfxPathEQ" - -var _comparator_index = [...]uint8{0, 17, 19, 21, 23, 27, 29, 33, 37, 45, 48, 51, 58, 66, 73, 79} - -func (i comparator) String() string { - if i < 0 || i >= comparator(len(_comparator_index)-1) { - return "comparator(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _comparator_name[_comparator_index[i]:_comparator_index[i+1]] -} diff --git a/src/pkg/filters/filters.go b/src/pkg/filters/filters.go index 67f2aaa34..638d578db 100644 --- a/src/pkg/filters/filters.go +++ b/src/pkg/filters/filters.go @@ -11,41 +11,47 @@ import ( "github.com/alcionai/corso/src/pkg/path" ) -type comparator int +type comparator string //go:generate stringer -type=comparator -linecomment const ( - UnknownComparator comparator = iota // UnknownComparison - // a == b - EqualTo // EQ + UnknownComparator comparator = "" + // norm(a) == norm(b) + EqualTo = "EQ" + // a === b + StrictEqualTo = "StrictEQ" // a > b - GreaterThan // GT + GreaterThan = "GT" // a < b - LessThan // LT + LessThan = "LT" // "foo,bar,baz" contains "foo" - TargetContains // Cont + TargetContains = "Cont" // "foo" is found in "foo,bar,baz" - TargetIn // IN + TargetIn = "IN" // always passes - Passes // Pass + Passes = "Pass" // always fails - Fails // Fail + Fails = "Fail" // passthrough for the target - IdentityValue // Identity + IdentityValue = "Identity" // "foo" is a prefix of "foobarbaz" - TargetPrefixes // Pfx + TargetPrefixes = "Pfx" // "baz" is a suffix of "foobarbaz" - TargetSuffixes // Sfx + TargetSuffixes = "Sfx" // "foo" equals any complete element prefix of "foo/bar/baz" - TargetPathPrefix // PathPfx + TargetPathPrefix = "PathPfx" // "foo" equals any complete element in "foo/bar/baz" - TargetPathContains // PathCont + TargetPathContains = "PathCont" // "baz" equals any complete element suffix of "foo/bar/baz" - TargetPathSuffix // PathSfx + TargetPathSuffix = "PathSfx" // "foo/bar/baz" equals the complete path "foo/bar/baz" - TargetPathEquals // PathEQ + TargetPathEquals = "PathEQ" ) +func (c comparator) String() string { + return string(c) +} + func normAll(ss []string) []string { r := slices.Clone(ss) for i := range r { @@ -56,7 +62,7 @@ func normAll(ss []string) []string { } func norm(s string) string { - return strings.ToLower(s) + return strings.ToLower(strings.TrimSpace(s)) } // normPathElem ensures the string is: @@ -66,6 +72,8 @@ func norm(s string) string { // without re-running the prefix-suffix addition multiple // times per target. func normPathElem(s string) string { + s = strings.TrimSpace(s) + if len(s) == 0 { return s } @@ -74,7 +82,9 @@ func normPathElem(s string) string { s = string(path.PathSeparator) + s } - s = path.TrimTrailingSlash(s) + string(path.PathSeparator) + s = path.TrimTrailingSlash(s) + s = strings.ToLower(s) + s += string(path.PathSeparator) return s } @@ -83,7 +93,7 @@ func normPathElem(s string) string { // compare values against. Filter.Matches(v) returns // true if Filter.Comparer(filter.target, v) is true. type Filter struct { - Comparator comparator `json:"comparator"` + Comparator comparator `json:"comparator_type"` // the type of comparison Targets []string `json:"targets"` // the set of values to compare NormalizedTargets []string `json:"normalizedTargets"` // the set of comparable values post normalization Negate bool `json:"negate"` // when true, negate the comparator result @@ -92,7 +102,8 @@ type Filter struct { Identity string `json:"identity"` // deprecated, kept around for deserialization - Target string `json:"target"` // the value to compare against + Target string `json:"target"` // the value to compare against + ComparatorInt int `json:"comparator"` } // ---------------------------------------------------------------------------------------------------- @@ -111,7 +122,7 @@ func Identity(id string) Filter { } // Equal creates a filter where Compare(v) is true if, for any target string, -// target == v +// norm(target) == norm(v) func Equal(target []string) Filter { return newFilter(EqualTo, target, normAll(target), false) } @@ -122,6 +133,19 @@ func NotEqual(target []string) Filter { return newFilter(EqualTo, target, normAll(target), true) } +// StrictEqual creates a filter where Compare(v) is true if, for any target string, +// target === v. Target and v are not normalized for this comparison. The comparison +// is case sensitive and ignores character folding. +func StrictEqual(target []string) Filter { + return newFilter(StrictEqualTo, target, normAll(target), false) +} + +// NotStrictEqual creates a filter where Compare(v) is true if, for any target string, +// target != v +func NotStrictEqual(target []string) Filter { + return newFilter(StrictEqualTo, target, normAll(target), true) +} + // Greater creates a filter where Compare(v) is true if, for any target string, // target > v func Greater(target []string) Filter { @@ -356,29 +380,24 @@ func (f Filter) CompareAny(inputs ...string) bool { func (f Filter) Compare(input string) bool { var cmp func(string, string) bool + // select comparison func switch f.Comparator { - case EqualTo, IdentityValue: + case EqualTo, IdentityValue, TargetPathEquals: cmp = equals + case StrictEqualTo: + cmp = strictEquals case GreaterThan: cmp = greater case LessThan: cmp = less - case TargetContains: + case TargetContains, TargetPathContains: cmp = contains case TargetIn: cmp = in - case TargetPrefixes: + case TargetPrefixes, TargetPathPrefix: cmp = prefixed - case TargetSuffixes: + case TargetSuffixes, TargetPathSuffix: cmp = suffixed - case TargetPathPrefix: - cmp = pathPrefix - case TargetPathContains: - cmp = pathContains - case TargetPathSuffix: - cmp = pathSuffix - case TargetPathEquals: - cmp = pathEquals case Passes: return true case Fails: @@ -388,14 +407,39 @@ func (f Filter) Compare(input string) bool { var ( res bool targets = f.NormalizedTargets + _input = norm(input) + // most comparators expect cmp(target, input) + // path comparators expect cmp(input, target) + swapParams bool ) + // set conditional behavior + switch f.Comparator { + case TargetContains: + // legacy case handling for contains, which checks for + // strings.Contains(target, input) instead of (input, target) + swapParams = true + case StrictEqualTo: + targets = f.Targets + _input = input + case TargetPathPrefix, TargetPathContains, TargetPathSuffix, TargetPathEquals: + // As a precondition, assumes each entry in the NormalizedTargets + // list has been passed through normPathElem(). + _input = normPathElem(input) + } + if len(targets) == 0 { targets = f.Targets } for _, tgt := range targets { - res = cmp(norm(tgt), norm(input)) + t, i := tgt, _input + + if swapParams { + t, i = _input, tgt + } + + res = cmp(t, i) // any-match if res { @@ -410,11 +454,16 @@ func (f Filter) Compare(input string) bool { return res } -// true if t == i +// true if t == i, case insensitive and folded func equals(target, input string) bool { return strings.EqualFold(target, input) } +// true if t == i, case sensitive and not folded +func strictEquals(target, input string) bool { + return target == input +} + // true if t > i func greater(target, input string) bool { return target > input @@ -425,9 +474,9 @@ func less(target, input string) bool { return target < input } -// true if target contains input as a substring. +// true if input contains target as a substring. func contains(target, input string) bool { - return strings.Contains(target, input) + return strings.Contains(input, target) } // true if input contains target as a substring. @@ -445,63 +494,6 @@ func suffixed(target, input string) bool { return strings.HasSuffix(input, target) } -// true if target is an _element complete_ prefix match -// on the input. Element complete means we do not -// succeed on partial element matches (ex: "/foo" does -// not match "/foobar"). -// -// As a precondition, assumes the target value has been -// passed through normPathElem(). -// -// The input is assumed to be the complete path that may -// have the target as a prefix. -func pathPrefix(target, input string) bool { - return strings.HasPrefix(normPathElem(input), target) -} - -// true if target has an _element complete_ equality -// with any element, or any sequence of elements, from -// the input. Element complete means we do not succeed -// on partial element matches (ex: foo does not match -// /foobar, and foo/bar does not match foo/barbaz). -// -// As a precondition, assumes the target value has been -// passed through normPathElem(). -// -// Input is assumed to be the complete path that may -// contain the target as an element or sequence of elems. -func pathContains(target, input string) bool { - return strings.Contains(normPathElem(input), target) -} - -// true if target is an _element complete_ suffix match -// on the input. Element complete means we do not -// succeed on partial element matches (ex: "/bar" does -// not match "/foobar"). -// -// As a precondition, assumes the target value has been -// passed through normPathElem(). -// -// The input is assumed to be the complete path that may -// have the target as a suffix. -func pathSuffix(target, input string) bool { - return strings.HasSuffix(normPathElem(input), target) -} - -// true if target is an _exact_ match on the input, excluding -// path delmiters. Element complete means we do not succeed -// on partial element matches (ex: "/bar" does not match -// "/foobar"). -// -// As a precondition, assumes the target value has been -// passed through normPathElem(). -// -// The input is assumed to be the complete path that may -// match the target. -func pathEquals(target, input string) bool { - return strings.EqualFold(normPathElem(input), target) -} - // ---------------------------------------------------------------------------------------------------- // Printers and PII control // ---------------------------------------------------------------------------------------------------- @@ -511,9 +503,11 @@ var _ clues.PlainConcealer = &Filter{} var safeFilterValues = map[string]struct{}{"*": {}} func (f Filter) Conceal() string { - fcs := f.Comparator.String() + fcs := string(f.Comparator) switch f.Comparator { + case UnknownComparator: + fcs = "UnknownComparison" case Passes, Fails: return fcs } @@ -532,9 +526,11 @@ func (f Filter) String() string { } func (f Filter) PlainString() string { - fcs := f.Comparator.String() + fcs := string(f.Comparator) switch f.Comparator { + case UnknownComparator: + fcs = "UnknownComparison" case Passes, Fails: return fcs } diff --git a/src/pkg/filters/filters_test.go b/src/pkg/filters/filters_test.go index c71ffffa0..ac8820551 100644 --- a/src/pkg/filters/filters_test.go +++ b/src/pkg/filters/filters_test.go @@ -51,6 +51,8 @@ func (suite *FiltersSuite) TestEquals() { expectNF assert.BoolAssertionFunc }{ {"foo", assert.True, assert.False}, + {"FOO", assert.True, assert.False}, + {" foo ", assert.True, assert.False}, {"bar", assert.False, assert.True}, } for _, test := range table { @@ -86,6 +88,30 @@ func (suite *FiltersSuite) TestEquals_any() { } } +func (suite *FiltersSuite) TestStrictEquals() { + f := filters.StrictEqual(foo) + nf := filters.NotStrictEqual(foo) + + table := []struct { + input string + expectF assert.BoolAssertionFunc + expectNF assert.BoolAssertionFunc + }{ + {"foo", assert.True, assert.False}, + {"FOO", assert.False, assert.True}, + {" foo ", assert.False, assert.True}, + {"bar", assert.False, assert.True}, + } + for _, test := range table { + suite.Run(test.input, func() { + t := suite.T() + + test.expectF(t, f.Compare(test.input), "filter") + test.expectNF(t, nf.Compare(test.input), "negated filter") + }) + } +} + func (suite *FiltersSuite) TestGreater() { f := filters.Greater(five) nf := filters.NotGreater(five) @@ -143,6 +169,7 @@ func (suite *FiltersSuite) TestContains() { }{ {"murf", assert.True, assert.False}, {"frum", assert.False, assert.True}, + {"ssmurfss", assert.False, assert.True}, } for _, test := range table { suite.Run(test.input, func() { @@ -300,77 +327,134 @@ func (suite *FiltersSuite) TestSuffixes() { } } -func (suite *FiltersSuite) TestPathPrefix() { - table := []struct { - name string - targets []string - input string - expectF assert.BoolAssertionFunc - expectNF assert.BoolAssertionFunc - }{ - {"Exact - same case", []string{"fA"}, "/fA", assert.True, assert.False}, - {"Exact - different case", []string{"fa"}, "/fA", assert.True, assert.False}, - {"Prefix - same case", []string{"fA"}, "/fA/fB", assert.True, assert.False}, - {"Prefix - different case", []string{"fa"}, "/fA/fB", assert.True, assert.False}, - {"Exact - multiple folders", []string{"fA/fB"}, "/fA/fB", assert.True, assert.False}, - {"Prefix - single folder partial", []string{"f"}, "/fA/fB", assert.False, assert.True}, - {"Prefix - multi folder partial", []string{"fA/f"}, "/fA/fB", assert.False, assert.True}, - {"Target Longer - single folder", []string{"fA"}, "/f", assert.False, assert.True}, - {"Target Longer - multi folder", []string{"fA/fB"}, "/fA/f", assert.False, assert.True}, - {"Not prefix - single folder", []string{"fA"}, "/af", assert.False, assert.True}, - {"Not prefix - multi folder", []string{"fA/fB"}, "/fA/bf", assert.False, assert.True}, - {"Exact - target variations - none", []string{"fA"}, "/fA", assert.True, assert.False}, - {"Exact - target variations - prefix", []string{"/fA"}, "/fA", assert.True, assert.False}, - {"Exact - target variations - suffix", []string{"fA/"}, "/fA", assert.True, assert.False}, - {"Exact - target variations - both", []string{"/fA/"}, "/fA", assert.True, assert.False}, - {"Exact - input variations - none", []string{"fA"}, "fA", assert.True, assert.False}, - {"Exact - input variations - prefix", []string{"fA"}, "/fA", assert.True, assert.False}, - {"Exact - input variations - suffix", []string{"fA"}, "fA/", assert.True, assert.False}, - {"Exact - input variations - both", []string{"fA"}, "/fA/", assert.True, assert.False}, - {"Prefix - target variations - none", []string{"fA"}, "/fA/fb", assert.True, assert.False}, - {"Prefix - target variations - prefix", []string{"/fA"}, "/fA/fb", assert.True, assert.False}, - {"Prefix - target variations - suffix", []string{"fA/"}, "/fA/fb", assert.True, assert.False}, - {"Prefix - target variations - both", []string{"/fA/"}, "/fA/fb", assert.True, assert.False}, - {"Prefix - input variations - none", []string{"fA"}, "fA/fb", assert.True, assert.False}, - {"Prefix - input variations - prefix", []string{"fA"}, "/fA/fb", assert.True, assert.False}, - {"Prefix - input variations - suffix", []string{"fA"}, "fA/fb/", assert.True, assert.False}, - {"Prefix - input variations - both", []string{"fA"}, "/fA/fb/", assert.True, assert.False}, - {"Slice - one matches", []string{"foo", "fa/f", "fA"}, "/fA/fb", assert.True, assert.False}, - {"Slice - none match", []string{"foo", "fa/f", "f"}, "/fA/fb", assert.False, assert.True}, +// --------------------------------------------------------------------------- +// path comparators +// --------------------------------------------------------------------------- + +var pathElemNormalizationTable = []struct { + name string + targets []string + expect []string +}{ + {"Single - no slash", []string{"fA"}, []string{"/fa/"}}, + {"Single - pre slash", []string{"/fA"}, []string{"/fa/"}}, + {"Single - suff slash", []string{"fA/"}, []string{"/fa/"}}, + {"Single - both slashes", []string{"/fA/"}, []string{"/fa/"}}, + {"Multipath - no slash", []string{"fA/fB"}, []string{"/fa/fb/"}}, + {"Multipath - pre slash", []string{"/fA/fB"}, []string{"/fa/fb/"}}, + {"Multipath - suff slash", []string{"fA/fB/"}, []string{"/fa/fb/"}}, + {"Multipath - both slashes", []string{"/fA/fB/"}, []string{"/fa/fb/"}}, + {"Multi input - no slash", []string{"fA", "fB"}, []string{"/fa/", "/fb/"}}, + {"Multi input - pre slash", []string{"/fA", "/fB"}, []string{"/fa/", "/fb/"}}, + {"Multi input - suff slash", []string{"fA/", "fB/"}, []string{"/fa/", "/fb/"}}, + {"Multi input - both slashes", []string{"/fA/", "/fB/"}, []string{"/fa/", "/fb/"}}, +} + +type baf struct { + fn assert.BoolAssertionFunc + yes bool +} + +var ( + yes = baf{ + fn: assert.True, + yes: true, } - for _, test := range table { + no = baf{ + fn: assert.False, + yes: false, + } +) + +var pathComparisonsTable = []struct { + name string + targets []string + input string + expectContains baf + expectEquals baf + expectPrefix baf + expectSuffix baf +}{ + {"single folder partial", []string{"f"}, "/fA", no, no, no, no}, + {"single folder target partial", []string{"f"}, "/fA/fB", no, no, no, no}, + {"multi folder input partial", []string{"A/f"}, "/fA/fB", no, no, no, no}, + {"longer target - single folder", []string{"fA"}, "/f", no, no, no, no}, + {"longer target - multi folder", []string{"fA/fB"}, "/fA/f", no, no, no, no}, + {"non-matching - single folder", []string{"fA"}, "/af", no, no, no, no}, + {"non-matching - multi folder", []string{"fA/fB"}, "/fA/bf", no, no, no, no}, + + {"Exact - same case", []string{"fA"}, "/fA", yes, yes, yes, yes}, + {"Exact - different case", []string{"fa"}, "/fA", yes, yes, yes, yes}, + {"Exact - multiple folders", []string{"fA/fB"}, "/fA/fB", yes, yes, yes, yes}, + {"Exact - target slash variations - prefix", []string{"/fA"}, "/fA", yes, yes, yes, yes}, + {"Exact - target slash variations - suffix", []string{"fA/"}, "/fA", yes, yes, yes, yes}, + {"Exact - target slash variations - both", []string{"/fA/"}, "/fA", yes, yes, yes, yes}, + {"Exact - input slash variations - none", []string{"fA"}, "fA", yes, yes, yes, yes}, + {"Exact - input slash variations - prefix", []string{"fA"}, "/fA", yes, yes, yes, yes}, + {"Exact - input slash variations - suffix", []string{"fA"}, "fA/", yes, yes, yes, yes}, + {"Exact - input slash variations - both", []string{"fA"}, "/fA/", yes, yes, yes, yes}, + + {"Prefix - same case", []string{"fA"}, "/fA/fB", yes, no, yes, no}, + {"Prefix - different case", []string{"fa"}, "/fA/fB", yes, no, yes, no}, + {"Prefix - multiple folders", []string{"fa/fb"}, "/fA/fB/fC", yes, no, yes, no}, + {"Prefix - target slash variations - none", []string{"fA"}, "/fA/fb", yes, no, yes, no}, + {"Prefix - target slash variations - prefix", []string{"/fA"}, "/fA/fb", yes, no, yes, no}, + {"Prefix - target slash variations - suffix", []string{"fA/"}, "/fA/fb", yes, no, yes, no}, + {"Prefix - target slash variations - both", []string{"/fA/"}, "/fA/fb", yes, no, yes, no}, + {"Prefix - input slash variations - none", []string{"fA"}, "fA/fb", yes, no, yes, no}, + {"Prefix - input slash variations - prefix", []string{"fA"}, "/fA/fb", yes, no, yes, no}, + {"Prefix - input slash variations - suffix", []string{"fA"}, "fA/fb/", yes, no, yes, no}, + {"Prefix - input slash variations - both", []string{"fA"}, "/fA/fb/", yes, no, yes, no}, + + {"Suffix - same case", []string{"fB"}, "/fA/fB", yes, no, no, yes}, + {"Suffix - different case", []string{"fb"}, "/fA/fB", yes, no, no, yes}, + {"Suffix - multiple folders", []string{"fb/fc"}, "/fA/fB/fC", yes, no, no, yes}, + {"Suffix - target slash variations - none", []string{"fB"}, "/fA/fb", yes, no, no, yes}, + {"Suffix - target slash variations - prefix", []string{"/fB"}, "/fA/fb", yes, no, no, yes}, + {"Suffix - target slash variations - suffix", []string{"fB/"}, "/fA/fb", yes, no, no, yes}, + {"Suffix - target slash variations - both", []string{"/fB/"}, "/fA/fb", yes, no, no, yes}, + {"Suffix - input slash variations - none", []string{"fB"}, "fA/fb", yes, no, no, yes}, + {"Suffix - input slash variations - prefix", []string{"fB"}, "/fA/fb", yes, no, no, yes}, + {"Suffix - input slash variations - suffix", []string{"fB"}, "fA/fb/", yes, no, no, yes}, + {"Suffix - input slash variations - both", []string{"fB"}, "/fA/fb/", yes, no, no, yes}, + + {"Contains - same case", []string{"fB"}, "/fA/fB/fC", yes, no, no, no}, + {"Contains - different case", []string{"fb"}, "/fA/fB/fC", yes, no, no, no}, + {"Contains - multiple folders", []string{"fb/fc"}, "/fA/fB/fC/fD", yes, no, no, no}, + {"Contains - target slash variations - none", []string{"fB"}, "/fA/fb/fc", yes, no, no, no}, + {"Contains - target slash variations - prefix", []string{"/fB"}, "/fA/fb/fc", yes, no, no, no}, + {"Contains - target slash variations - suffix", []string{"fB/"}, "/fA/fb/fc", yes, no, no, no}, + {"Contains - target slash variations - both", []string{"/fB/"}, "/fA/fb/fc", yes, no, no, no}, + {"Contains - input slash variations - none", []string{"fB"}, "fA/fb/fc", yes, no, no, no}, + {"Contains - input slash variations - prefix", []string{"fB"}, "/fA/fb/fc/", yes, no, no, no}, + {"Contains - input slash variations - suffix", []string{"fB"}, "fA/fb/fc/", yes, no, no, no}, + {"Contains - input slash variations - both", []string{"fB"}, "/fA/fb/fc/", yes, no, no, no}, + + {"Slice - one exact matches", []string{"foo", "fa/f", "fA"}, "/fA", yes, yes, yes, yes}, + {"Slice - none match", []string{"foo", "fa/f", "f"}, "/fA", no, no, no, no}, +} + +func (suite *FiltersSuite) TestPathPrefix() { + for _, test := range pathComparisonsTable { suite.Run(test.name, func() { - t := suite.T() + var ( + t = suite.T() + f = filters.PathPrefix(test.targets) + nf = filters.NotPathPrefix(test.targets) + ) - f := filters.PathPrefix(test.targets) - nf := filters.NotPathPrefix(test.targets) - - test.expectF(t, f.Compare(test.input), "filter") - test.expectNF(t, nf.Compare(test.input), "negated filter") + test.expectPrefix.fn(t, f.Compare(test.input), "filter") + if test.expectPrefix.yes { + no.fn(t, nf.Compare(test.input), "negated filter") + } else { + yes.fn(t, nf.Compare(test.input), "negated filter") + } }) } } func (suite *FiltersSuite) TestPathPrefix_NormalizedTargets() { - table := []struct { - name string - targets []string - expect []string - }{ - {"Single - no slash", []string{"fA"}, []string{"/fA/"}}, - {"Single - pre slash", []string{"/fA"}, []string{"/fA/"}}, - {"Single - suff slash", []string{"fA/"}, []string{"/fA/"}}, - {"Single - both slashes", []string{"/fA/"}, []string{"/fA/"}}, - {"Multipath - no slash", []string{"fA/fB"}, []string{"/fA/fB/"}}, - {"Multipath - pre slash", []string{"/fA/fB"}, []string{"/fA/fB/"}}, - {"Multipath - suff slash", []string{"fA/fB/"}, []string{"/fA/fB/"}}, - {"Multipath - both slashes", []string{"/fA/fB/"}, []string{"/fA/fB/"}}, - {"Multi input - no slash", []string{"fA", "fB"}, []string{"/fA/", "/fB/"}}, - {"Multi input - pre slash", []string{"/fA", "/fB"}, []string{"/fA/", "/fB/"}}, - {"Multi input - suff slash", []string{"fA/", "fB/"}, []string{"/fA/", "/fB/"}}, - {"Multi input - both slashes", []string{"/fA/", "/fB/"}, []string{"/fA/", "/fB/"}}, - } - for _, test := range table { + for _, test := range pathElemNormalizationTable { suite.Run(test.name, func() { t := suite.T() @@ -381,79 +465,26 @@ func (suite *FiltersSuite) TestPathPrefix_NormalizedTargets() { } func (suite *FiltersSuite) TestPathContains() { - table := []struct { - name string - targets []string - input string - expectF assert.BoolAssertionFunc - expectNF assert.BoolAssertionFunc - }{ - {"Exact - same case", []string{"fA"}, "/fA", assert.True, assert.False}, - {"Exact - different case", []string{"fa"}, "/fA", assert.True, assert.False}, - {"Cont - same case single target", []string{"fA"}, "/Z/fA/B", assert.True, assert.False}, - {"Cont - different case single target", []string{"fA"}, "/z/fa/b", assert.True, assert.False}, - {"Cont - same case multi target", []string{"Z/fA"}, "/Z/fA/B", assert.True, assert.False}, - {"Cont - different case multi target", []string{"fA/B"}, "/z/fa/b", assert.True, assert.False}, - {"Exact - multiple folders", []string{"Z/fA/B"}, "/Z/fA/B", assert.True, assert.False}, - {"Cont - single folder partial", []string{"folder"}, "/Z/fA/fB", assert.False, assert.True}, - {"Cont - multi folder partial", []string{"fA/fold"}, "/Z/fA/fB", assert.False, assert.True}, - {"Target Longer - single folder", []string{"fA"}, "/folder", assert.False, assert.True}, - {"Target Longer - multi folder", []string{"fA/fB"}, "/fA/fold", assert.False, assert.True}, - {"Not cont - single folder", []string{"fA"}, "/afolder", assert.False, assert.True}, - {"Not cont - single target", []string{"fA"}, "/z/afolder/bfolder", assert.False, assert.True}, - {"Not cont - multi folder", []string{"fA/fB"}, "/z/fA/bfolder", assert.False, assert.True}, - {"Exact - target variations - none", []string{"fA"}, "/fA", assert.True, assert.False}, - {"Exact - target variations - prefix", []string{"/fA"}, "/fA", assert.True, assert.False}, - {"Exact - target variations - suffix", []string{"fA/"}, "/fA", assert.True, assert.False}, - {"Exact - target variations - both", []string{"/fA/"}, "/fA", assert.True, assert.False}, - {"Exact - input variations - none", []string{"fA"}, "fA", assert.True, assert.False}, - {"Exact - input variations - prefix", []string{"fA"}, "/fA", assert.True, assert.False}, - {"Exact - input variations - suffix", []string{"fA"}, "fA/", assert.True, assert.False}, - {"Exact - input variations - both", []string{"fA"}, "/fA/", assert.True, assert.False}, - {"Cont - target variations - none", []string{"fA"}, "/fA/fb", assert.True, assert.False}, - {"Cont - target variations - prefix", []string{"/fA"}, "/fA/fb", assert.True, assert.False}, - {"Cont - target variations - suffix", []string{"fA/"}, "/fA/fb", assert.True, assert.False}, - {"Cont - target variations - both", []string{"/fA/"}, "/fA/fb", assert.True, assert.False}, - {"Cont - input variations - none", []string{"fA"}, "fA/fb", assert.True, assert.False}, - {"Cont - input variations - prefix", []string{"fA"}, "/fA/fb", assert.True, assert.False}, - {"Cont - input variations - suffix", []string{"fA"}, "fA/fb/", assert.True, assert.False}, - {"Cont - input variations - both", []string{"fA"}, "/fA/fb/", assert.True, assert.False}, - {"Slice - one matches", []string{"foo", "fa/f", "fA"}, "/fA/fb", assert.True, assert.False}, - {"Slice - none match", []string{"foo", "fa/f", "f"}, "/fA/fb", assert.False, assert.True}, - } - for _, test := range table { + for _, test := range pathComparisonsTable { suite.Run(test.name, func() { - t := suite.T() + var ( + t = suite.T() + f = filters.PathContains(test.targets) + nf = filters.NotPathContains(test.targets) + ) - f := filters.PathContains(test.targets) - nf := filters.NotPathContains(test.targets) - - test.expectF(t, f.Compare(test.input), "filter") - test.expectNF(t, nf.Compare(test.input), "negated filter") + test.expectContains.fn(t, f.Compare(test.input), "filter") + if test.expectContains.yes { + no.fn(t, nf.Compare(test.input), "negated filter") + } else { + yes.fn(t, nf.Compare(test.input), "negated filter") + } }) } } func (suite *FiltersSuite) TestPathContains_NormalizedTargets() { - table := []struct { - name string - targets []string - expect []string - }{ - {"Single - no slash", []string{"fA"}, []string{"/fA/"}}, - {"Single - pre slash", []string{"/fA"}, []string{"/fA/"}}, - {"Single - suff slash", []string{"fA/"}, []string{"/fA/"}}, - {"Single - both slashes", []string{"/fA/"}, []string{"/fA/"}}, - {"Multipath - no slash", []string{"fA/fB"}, []string{"/fA/fB/"}}, - {"Multipath - pre slash", []string{"/fA/fB"}, []string{"/fA/fB/"}}, - {"Multipath - suff slash", []string{"fA/fB/"}, []string{"/fA/fB/"}}, - {"Multipath - both slashes", []string{"/fA/fB/"}, []string{"/fA/fB/"}}, - {"Multi input - no slash", []string{"fA", "fB"}, []string{"/fA/", "/fB/"}}, - {"Multi input - pre slash", []string{"/fA", "/fB"}, []string{"/fA/", "/fB/"}}, - {"Multi input - suff slash", []string{"fA/", "fB/"}, []string{"/fA/", "/fB/"}}, - {"Multi input - both slashes", []string{"/fA/", "/fB/"}, []string{"/fA/", "/fB/"}}, - } - for _, test := range table { + for _, test := range pathElemNormalizationTable { suite.Run(test.name, func() { t := suite.T() @@ -464,76 +495,26 @@ func (suite *FiltersSuite) TestPathContains_NormalizedTargets() { } func (suite *FiltersSuite) TestPathSuffix() { - table := []struct { - name string - targets []string - input string - expectF assert.BoolAssertionFunc - expectNF assert.BoolAssertionFunc - }{ - {"Exact - same case", []string{"fA"}, "/fA", assert.True, assert.False}, - {"Exact - different case", []string{"fa"}, "/fA", assert.True, assert.False}, - {"Suffix - same case", []string{"fB"}, "/fA/fB", assert.True, assert.False}, - {"Suffix - different case", []string{"fb"}, "/fA/fB", assert.True, assert.False}, - {"Exact - multiple folders", []string{"fA/fB"}, "/fA/fB", assert.True, assert.False}, - {"Suffix - single folder partial", []string{"f"}, "/fA/fB", assert.False, assert.True}, - {"Suffix - multi folder partial", []string{"A/fB"}, "/fA/fB", assert.False, assert.True}, - {"Target Longer - single folder", []string{"fA"}, "/f", assert.False, assert.True}, - {"Target Longer - multi folder", []string{"fA/fB"}, "/fA/f", assert.False, assert.True}, - {"Not suffix - single folder", []string{"fA"}, "/af", assert.False, assert.True}, - {"Not suffix - multi folder", []string{"fA/fB"}, "/Af/fB", assert.False, assert.True}, - {"Exact - target variations - none", []string{"fA"}, "/fA", assert.True, assert.False}, - {"Exact - target variations - prefix", []string{"/fA"}, "/fA", assert.True, assert.False}, - {"Exact - target variations - suffix", []string{"fA/"}, "/fA", assert.True, assert.False}, - {"Exact - target variations - both", []string{"/fA/"}, "/fA", assert.True, assert.False}, - {"Exact - input variations - none", []string{"fA"}, "fA", assert.True, assert.False}, - {"Exact - input variations - prefix", []string{"fA"}, "/fA", assert.True, assert.False}, - {"Exact - input variations - suffix", []string{"fA"}, "fA/", assert.True, assert.False}, - {"Exact - input variations - both", []string{"fA"}, "/fA/", assert.True, assert.False}, - {"Suffix - target variations - none", []string{"fb"}, "/fA/fb", assert.True, assert.False}, - {"Suffix - target variations - prefix", []string{"/fb"}, "/fA/fb", assert.True, assert.False}, - {"Suffix - target variations - suffix", []string{"fb/"}, "/fA/fb", assert.True, assert.False}, - {"Suffix - target variations - both", []string{"/fb/"}, "/fA/fb", assert.True, assert.False}, - {"Suffix - input variations - none", []string{"fb"}, "fA/fb", assert.True, assert.False}, - {"Suffix - input variations - prefix", []string{"fb"}, "/fA/fb", assert.True, assert.False}, - {"Suffix - input variations - suffix", []string{"fb"}, "fA/fb/", assert.True, assert.False}, - {"Suffix - input variations - both", []string{"fb"}, "/fA/fb/", assert.True, assert.False}, - {"Slice - one matches", []string{"foo", "fa/f", "fb"}, "/fA/fb", assert.True, assert.False}, - {"Slice - none match", []string{"foo", "fa/f", "f"}, "/fA/fb", assert.False, assert.True}, - } - for _, test := range table { + for _, test := range pathComparisonsTable { suite.Run(test.name, func() { - t := suite.T() + var ( + t = suite.T() + f = filters.PathSuffix(test.targets) + nf = filters.NotPathSuffix(test.targets) + ) - f := filters.PathSuffix(test.targets) - nf := filters.NotPathSuffix(test.targets) - - test.expectF(t, f.Compare(test.input), "filter") - test.expectNF(t, nf.Compare(test.input), "negated filter") + test.expectSuffix.fn(t, f.Compare(test.input), "filter") + if test.expectSuffix.yes { + no.fn(t, nf.Compare(test.input), "negated filter") + } else { + yes.fn(t, nf.Compare(test.input), "negated filter") + } }) } } func (suite *FiltersSuite) TestPathSuffix_NormalizedTargets() { - table := []struct { - name string - targets []string - expect []string - }{ - {"Single - no slash", []string{"fA"}, []string{"/fA/"}}, - {"Single - pre slash", []string{"/fA"}, []string{"/fA/"}}, - {"Single - suff slash", []string{"fA/"}, []string{"/fA/"}}, - {"Single - both slashes", []string{"/fA/"}, []string{"/fA/"}}, - {"Multipath - no slash", []string{"fA/fB"}, []string{"/fA/fB/"}}, - {"Multipath - pre slash", []string{"/fA/fB"}, []string{"/fA/fB/"}}, - {"Multipath - suff slash", []string{"fA/fB/"}, []string{"/fA/fB/"}}, - {"Multipath - both slashes", []string{"/fA/fB/"}, []string{"/fA/fB/"}}, - {"Multi input - no slash", []string{"fA", "fB"}, []string{"/fA/", "/fB/"}}, - {"Multi input - pre slash", []string{"/fA", "/fB"}, []string{"/fA/", "/fB/"}}, - {"Multi input - suff slash", []string{"fA/", "fB/"}, []string{"/fA/", "/fB/"}}, - {"Multi input - both slashes", []string{"/fA/", "/fB/"}, []string{"/fA/", "/fB/"}}, - } - for _, test := range table { + for _, test := range pathElemNormalizationTable { suite.Run(test.name, func() { t := suite.T() @@ -544,67 +525,26 @@ func (suite *FiltersSuite) TestPathSuffix_NormalizedTargets() { } func (suite *FiltersSuite) TestPathEquals() { - table := []struct { - name string - targets []string - input string - expectF assert.BoolAssertionFunc - expectNF assert.BoolAssertionFunc - }{ - {"Exact - same case", []string{"fA"}, "/fA", assert.True, assert.False}, - {"Exact - different case", []string{"fa"}, "/fA", assert.True, assert.False}, - {"Exact - multiple folders", []string{"fA/fB"}, "/fA/fB", assert.True, assert.False}, - {"Exact - target variations - none", []string{"fA"}, "/fA", assert.True, assert.False}, - {"Exact - target variations - prefix", []string{"/fA"}, "/fA", assert.True, assert.False}, - {"Exact - target variations - suffix", []string{"fA/"}, "/fA", assert.True, assert.False}, - {"Exact - target variations - both", []string{"/fA/"}, "/fA", assert.True, assert.False}, - {"Exact - input variations - none", []string{"fA"}, "fA", assert.True, assert.False}, - {"Exact - input variations - prefix", []string{"fA"}, "/fA", assert.True, assert.False}, - {"Exact - input variations - suffix", []string{"fA"}, "fA/", assert.True, assert.False}, - {"Exact - input variations - both", []string{"fA"}, "/fA/", assert.True, assert.False}, - {"Partial match", []string{"f"}, "/fA/", assert.False, assert.True}, - {"Suffix - same case", []string{"fB"}, "/fA/fB", assert.False, assert.True}, - {"Suffix - different case", []string{"fb"}, "/fA/fB", assert.False, assert.True}, - {"Prefix - same case", []string{"fA"}, "/fA/fB", assert.False, assert.True}, - {"Prefix - different case", []string{"fa"}, "/fA/fB", assert.False, assert.True}, - {"Contains - same case", []string{"fB"}, "/fA/fB/fC", assert.False, assert.True}, - {"Contains - different case", []string{"fb"}, "/fA/fB/fC", assert.False, assert.True}, - {"Slice - one matches", []string{"foo", "/fA/fb", "fb"}, "/fA/fb", assert.True, assert.False}, - {"Slice - none match", []string{"foo", "fa/f", "f"}, "/fA/fb", assert.False, assert.True}, - } - for _, test := range table { + for _, test := range pathComparisonsTable { suite.Run(test.name, func() { - t := suite.T() + var ( + t = suite.T() + f = filters.PathEquals(test.targets) + nf = filters.NotPathEquals(test.targets) + ) - f := filters.PathEquals(test.targets) - nf := filters.NotPathEquals(test.targets) - - test.expectF(t, f.Compare(test.input), "filter") - test.expectNF(t, nf.Compare(test.input), "negated filter") + test.expectEquals.fn(t, f.Compare(test.input), "filter") + if test.expectEquals.yes { + no.fn(t, nf.Compare(test.input), "negated filter") + } else { + yes.fn(t, nf.Compare(test.input), "negated filter") + } }) } } func (suite *FiltersSuite) TestPathEquals_NormalizedTargets() { - table := []struct { - name string - targets []string - expect []string - }{ - {"Single - no slash", []string{"fA"}, []string{"/fA/"}}, - {"Single - pre slash", []string{"/fA"}, []string{"/fA/"}}, - {"Single - suff slash", []string{"fA/"}, []string{"/fA/"}}, - {"Single - both slashes", []string{"/fA/"}, []string{"/fA/"}}, - {"Multipath - no slash", []string{"fA/fB"}, []string{"/fA/fB/"}}, - {"Multipath - pre slash", []string{"/fA/fB"}, []string{"/fA/fB/"}}, - {"Multipath - suff slash", []string{"fA/fB/"}, []string{"/fA/fB/"}}, - {"Multipath - both slashes", []string{"/fA/fB/"}, []string{"/fA/fB/"}}, - {"Multi input - no slash", []string{"fA", "fB"}, []string{"/fA/", "/fB/"}}, - {"Multi input - pre slash", []string{"/fA", "/fB"}, []string{"/fA/", "/fB/"}}, - {"Multi input - suff slash", []string{"fA/", "fB/"}, []string{"/fA/", "/fB/"}}, - {"Multi input - both slashes", []string{"/fA/", "/fB/"}, []string{"/fA/", "/fB/"}}, - } - for _, test := range table { + for _, test := range pathElemNormalizationTable { suite.Run(test.name, func() { t := suite.T() @@ -614,6 +554,10 @@ func (suite *FiltersSuite) TestPathEquals_NormalizedTargets() { } } +// --------------------------------------------------------------------------- +// pii handling +// --------------------------------------------------------------------------- + func (suite *FiltersSuite) TestFilter_pii() { targets := []string{"fnords", "smarf", "*"} @@ -636,8 +580,8 @@ func (suite *FiltersSuite) TestFilter_pii() { suite.Run(test.name, func() { var ( t = suite.T() - expect = test.f.Comparator.String() + ":***,***,*" - expectPlain = test.f.Comparator.String() + ":" + strings.Join(targets, ",") + expect = string(test.f.Comparator) + ":***,***,*" + expectPlain = string(test.f.Comparator) + ":" + strings.Join(targets, ",") ) result := test.f.Conceal() @@ -671,14 +615,14 @@ func (suite *FiltersSuite) TestFilter_pii() { { "identity", filters.Identity("id"), - filters.IdentityValue.String() + ":***", - filters.IdentityValue.String() + ":id", + filters.IdentityValue + ":***", + filters.IdentityValue + ":id", }, { "identity", filters.Identity("*"), - filters.IdentityValue.String() + ":*", - filters.IdentityValue.String() + ":*", + filters.IdentityValue + ":*", + filters.IdentityValue + ":*", }, } for _, test := range table2 { diff --git a/src/pkg/logger/logger.go b/src/pkg/logger/logger.go index ad02751c4..39636a99c 100644 --- a/src/pkg/logger/logger.go +++ b/src/pkg/logger/logger.go @@ -12,6 +12,9 @@ import ( "github.com/spf13/pflag" "go.uber.org/zap" "go.uber.org/zap/zapcore" + "golang.org/x/exp/slices" + + "github.com/alcionai/corso/src/internal/common" ) // Default location for writing logs, initialized in platform specific files @@ -22,54 +25,68 @@ var ( loggerton *zap.SugaredLogger ) -type logLevel int +type logLevel string const ( - Development logLevel = iota - Info - Warn - Production - Disabled + LLDebug logLevel = "debug" + LLInfo logLevel = "info" + LLWarn logLevel = "warn" + LLError logLevel = "error" + LLDisabled logLevel = "disabled" +) + +type logFormat string + +const ( + // use for cli/terminal + LFText logFormat = "text" + // use for cloud logging + LFJSON logFormat = "json" +) + +type piiAlg string + +const ( + PIIHash piiAlg = "hash" + PIIMask piiAlg = "mask" + PIIPlainText piiAlg = "plaintext" ) // flag names const ( - DebugAPIFN = "debug-api-calls" - LogFileFN = "log-file" - LogLevelFN = "log-level" - ReadableLogsFN = "readable-logs" - SensitiveInfoFN = "sensitive-info" + DebugAPIFN = "debug-api-calls" + LogFileFN = "log-file" + LogFormatFN = "log-format" + LogLevelFN = "log-level" + ReadableLogsFN = "readable-logs" + MaskSensitiveDataFN = "mask-sensitive-data" ) // flag values var ( - DebugAPIFV bool - logFileFV = "" - LogLevelFV = "info" - ReadableLogsFV bool - SensitiveInfoFV = PIIPlainText + DebugAPIFV bool + logFileFV string + LogFormatFV string + LogLevelFV string + ReadableLogsFV bool + MaskSensitiveDataFV bool - LogFile string // logFileFV after processing + LogFile string // logFileFV after processing + piiHandling string // piiHandling after MaskSensitiveDataFV processing ) const ( Stderr = "stderr" Stdout = "stdout" - - PIIHash = "hash" - PIIMask = "mask" - PIIPlainText = "plaintext" - - LLDebug = "debug" - LLInfo = "info" - LLWarn = "warn" - LLError = "error" - LLDisabled = "disabled" ) // Returns the default location for writing logs func defaultLogLocation() string { - return filepath.Join(userLogsDir, "corso", "logs", time.Now().UTC().Format("2006-01-02T15-04-05Z")+".log") + return filepath.Join( + userLogsDir, + "corso", + "logs", + time.Now().UTC().Format("2006-01-02T15-04-05Z")+".log") } // adds the persistent flag --log-level and --log-file to the provided command. @@ -83,9 +100,6 @@ func AddLoggingFlags(cmd *cobra.Command) { //nolint:errcheck fs.MarkHidden(ReadableLogsFN) - // TODO(keepers): unhide when we have sufficient/complete coverage of PII handling - //nolint:errcheck - fs.MarkHidden(SensitiveInfoFN) } // internal deduplication for adding flags @@ -93,9 +107,15 @@ func addFlags(fs *pflag.FlagSet, defaultFile string) { fs.StringVar( &LogLevelFV, LogLevelFN, - LLInfo, + string(LLInfo), fmt.Sprintf("set the log level to %s|%s|%s|%s", LLDebug, LLInfo, LLWarn, LLError)) + fs.StringVar( + &LogFormatFV, + LogFormatFN, + string(LFText), + fmt.Sprintf("set the log format to %s|%s", LFText, LFJSON)) + // The default provided here is only for help info fs.StringVar(&logFileFV, LogFileFN, defaultFile, "location for writing logs, use '-' for stdout") fs.BoolVar(&DebugAPIFV, DebugAPIFN, false, "add non-2xx request/response errors to logging") @@ -106,18 +126,11 @@ func addFlags(fs *pflag.FlagSet, defaultFile string) { false, "minimizes log output for console readability: removes the file and date, colors the level") - fs.StringVar( - &SensitiveInfoFV, - SensitiveInfoFN, - PIIPlainText, - fmt.Sprintf("set the format for sensitive info in logs to %s|%s|%s", PIIHash, PIIMask, PIIPlainText)) -} - -// Settings records the user's preferred logging settings. -type Settings struct { - File string // what file to log to (alt: stderr, stdout) - Level string // what level to log at - PIIHandling string // how to obscure pii + fs.BoolVar( + &MaskSensitiveDataFV, + MaskSensitiveDataFN, + false, + "anonymize personal data in log output") } // Due to races between the lazy evaluation of flags in cobra and the @@ -126,54 +139,65 @@ type Settings struct { // AddLogLevelFlag() and AddLogFileFlag() ensures the flags are // displayed as part of the help/usage output. func PreloadLoggingFlags(args []string) Settings { - dlf := defaultLogLocation() fs := pflag.NewFlagSet("seed-logger", pflag.ContinueOnError) fs.ParseErrorsWhitelist.UnknownFlags = true - addFlags(fs, dlf) + addFlags(fs, "") // prevents overriding the corso/cobra help processor fs.BoolP("help", "h", false, "") - ls := Settings{ - File: dlf, - Level: LogLevelFV, - PIIHandling: SensitiveInfoFV, + set := Settings{ + File: defaultLogLocation(), + Format: LFText, + Level: LLInfo, + PIIHandling: PIIPlainText, } // parse the os args list to find the log level flag if err := fs.Parse(args); err != nil { - return ls + return set } // retrieve the user's preferred log level - // automatically defaults to "info" + // defaults to "info" levelString, err := fs.GetString(LogLevelFN) if err != nil { - return ls + return set } - ls.Level = levelString + set.Level = logLevel(levelString) + + // retrieve the user's preferred log format + // defaults to "text" + formatString, err := fs.GetString(LogFormatFN) + if err != nil { + return set + } + + set.Format = logFormat(formatString) // retrieve the user's preferred log file location - // automatically defaults to default log location + // defaults to default log location lffv, err := fs.GetString(LogFileFN) if err != nil { - return ls + return set } - ls.File = GetLogFile(lffv) - LogFile = ls.File + set.File = GetLogFile(lffv) + LogFile = set.File // retrieve the user's preferred PII handling algorithm - // automatically defaults to default log location - pii, err := fs.GetString(SensitiveInfoFN) + // defaults to "plaintext" + maskPII, err := fs.GetBool(MaskSensitiveDataFN) if err != nil { - return ls + return set } - ls.PIIHandling = pii + if maskPII { + set.PIIHandling = PIIHash + } - return ls + return set } // GetLogFile parses the log file. Uses the provided value, if populated, @@ -186,6 +210,11 @@ func GetLogFile(logFileFlagVal string) string { r = os.Getenv("CORSO_LOG_FILE") } + // if no flag or env is specified, fall back to the default + if len(r) == 0 { + r = defaultLogLocation() + } + if r == "-" { r = Stdout } @@ -202,72 +231,104 @@ func GetLogFile(logFileFlagVal string) string { return r } -func genLogger(level logLevel, logfile string) (*zapcore.Core, *zap.SugaredLogger) { +// Settings records the user's preferred logging settings. +type Settings struct { + File string // what file to log to (alt: stderr, stdout) + Format logFormat // whether to format as text (console) or json (cloud) + Level logLevel // what level to log at + PIIHandling piiAlg // how to obscure pii +} + +// EnsureDefaults sets any non-populated settings to their default value. +// exported for testing without circular dependencies. +func (s Settings) EnsureDefaults() Settings { + set := s + + levels := []logLevel{LLDisabled, LLDebug, LLInfo, LLWarn, LLError} + if len(set.Level) == 0 || !slices.Contains(levels, set.Level) { + set.Level = LLInfo + } + + formats := []logFormat{LFText, LFJSON} + if len(set.Format) == 0 || !slices.Contains(formats, set.Format) { + set.Format = LFText + } + + algs := []piiAlg{PIIPlainText, PIIMask, PIIHash} + if len(set.PIIHandling) == 0 || !slices.Contains(algs, set.PIIHandling) { + set.PIIHandling = piiAlg(common.First(piiHandling, string(PIIPlainText))) + } + + if len(set.File) == 0 { + set.File = GetLogFile("") + } + + return set +} + +// --------------------------------------------------------------------------- +// constructors +// --------------------------------------------------------------------------- + +func genLogger(set Settings) (*zapcore.Core, *zap.SugaredLogger) { // when testing, ensure debug logging matches the test.v setting for _, arg := range os.Args { if arg == `--test.v=true` { - level = Development + set.Level = LLDebug } } - // set up a logger core to use as a fallback - levelFilter := zap.LevelEnablerFunc(func(lvl zapcore.Level) bool { - switch level { - case Info: - return lvl >= zapcore.InfoLevel - case Warn: - return lvl >= zapcore.WarnLevel - case Production: - return lvl >= zapcore.ErrorLevel - case Disabled: - return false - default: - return true - } - }) - out := zapcore.Lock(os.Stderr) - consoleEncoder := zapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()) - core := zapcore.NewTee( - zapcore.NewCore(consoleEncoder, out, levelFilter), - ) - - // then try to set up a logger directly var ( lgr *zap.Logger err error opts = []zap.Option{zap.AddStacktrace(zapcore.PanicLevel)} + + // set up a logger core to use as a fallback + levelFilter = zap.LevelEnablerFunc(func(lvl zapcore.Level) bool { + switch set.Level { + case LLInfo: + return lvl >= zapcore.InfoLevel + case LLWarn: + return lvl >= zapcore.WarnLevel + case LLError: + return lvl >= zapcore.ErrorLevel + case LLDisabled: + return false + default: + return true + } + }) + + out = zapcore.Lock(os.Stderr) + consoleEncoder = zapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()) + core = zapcore.NewTee( + zapcore.NewCore(consoleEncoder, out, levelFilter), + ) + + cfg zap.Config ) - if level != Production { - cfg := zap.NewDevelopmentConfig() - - switch level { - case Info: - cfg.Level = zap.NewAtomicLevelAt(zapcore.InfoLevel) - case Warn: - cfg.Level = zap.NewAtomicLevelAt(zapcore.WarnLevel) - case Disabled: - cfg.Level = zap.NewAtomicLevelAt(zapcore.FatalLevel) - } + switch set.Format { + case LFJSON: + cfg = setLevel(zap.NewProductionConfig(), set.Level) + cfg.OutputPaths = []string{set.File} + default: + cfg = setLevel(zap.NewDevelopmentConfig(), set.Level) if ReadableLogsFV { opts = append(opts, zap.WithCaller(false)) cfg.EncoderConfig.EncodeTime = zapcore.TimeEncoderOfLayout("15:04:05.00") - if logfile == Stderr || logfile == Stdout { + if set.File == Stderr || set.File == Stdout { cfg.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder } } - cfg.OutputPaths = []string{logfile} - lgr, err = cfg.Build(opts...) - } else { - cfg := zap.NewProductionConfig() - cfg.OutputPaths = []string{logfile} - lgr, err = cfg.Build(opts...) + cfg.OutputPaths = []string{set.File} } // fall back to the core config if the default creation fails + lgr, err = cfg.Build(opts...) if err != nil { lgr = zap.New(core) } @@ -275,7 +336,22 @@ func genLogger(level logLevel, logfile string) (*zapcore.Core, *zap.SugaredLogge return &core, lgr.Sugar() } -func singleton(level logLevel, logfile string) *zap.SugaredLogger { +func setLevel(cfg zap.Config, level logLevel) zap.Config { + switch level { + case LLInfo: + cfg.Level = zap.NewAtomicLevelAt(zapcore.InfoLevel) + case LLWarn: + cfg.Level = zap.NewAtomicLevelAt(zapcore.WarnLevel) + case LLError: + cfg.Level = zap.NewAtomicLevelAt(zapcore.ErrorLevel) + case LLDisabled: + cfg.Level = zap.NewAtomicLevelAt(zapcore.FatalLevel) + } + + return cfg +} + +func singleton(set Settings) *zap.SugaredLogger { if loggerton != nil { return loggerton } @@ -287,7 +363,10 @@ func singleton(level logLevel, logfile string) *zap.SugaredLogger { return loggerton } - logCore, loggerton = genLogger(level, logfile) + set = set.EnsureDefaults() + setCluesSecretsHash(set.PIIHandling) + + logCore, loggerton = genLogger(set) return loggerton } @@ -305,18 +384,11 @@ const ctxKey loggingKey = "corsoLogger" // cobra. This early parsing is necessary since logging depends on // a seeded context prior to cobra evaluating flags. func Seed(ctx context.Context, set Settings) (context.Context, *zap.SugaredLogger) { - if len(set.Level) == 0 { - set.Level = LLInfo - } - - setCluesSecretsHash(set.PIIHandling) - - zsl := singleton(levelOf(set.Level), set.File) - + zsl := singleton(set) return Set(ctx, zsl), zsl } -func setCluesSecretsHash(alg string) { +func setCluesSecretsHash(alg piiAlg) { switch alg { case PIIHash: // TODO: a persistent hmac key for each tenant would be nice @@ -329,18 +401,12 @@ func setCluesSecretsHash(alg string) { } } -// SeedLevel generates a logger within the context with the given log-level. -func SeedLevel(ctx context.Context, level logLevel) (context.Context, *zap.SugaredLogger) { +// CtxOrSeed attempts to retrieve the logger from the ctx. If not found, it +// generates a logger with the given settings and adds it to the context. +func CtxOrSeed(ctx context.Context, set Settings) (context.Context, *zap.SugaredLogger) { l := ctx.Value(ctxKey) if l == nil { - logfile := os.Getenv("CORSO_LOG_FILE") - - if len(logfile) == 0 { - logfile = defaultLogLocation() - } - - zsl := singleton(level, logfile) - + zsl := singleton(set) return Set(ctx, zsl), zsl } @@ -360,7 +426,7 @@ func Set(ctx context.Context, logger *zap.SugaredLogger) context.Context { func Ctx(ctx context.Context) *zap.SugaredLogger { l := ctx.Value(ctxKey) if l == nil { - return singleton(levelOf(LogLevelFV), defaultLogLocation()) + l = singleton(Settings{}.EnsureDefaults()) } return l.(*zap.SugaredLogger).With(clues.In(ctx).Slice()...) @@ -376,22 +442,6 @@ func CtxErr(ctx context.Context, err error) *zap.SugaredLogger { With(clues.InErr(err).Slice()...) } -// transforms the llevel flag value to a logLevel enum -func levelOf(lvl string) logLevel { - switch lvl { - case LLDebug: - return Development - case LLWarn: - return Warn - case LLError: - return Production - case LLDisabled: - return Disabled - } - - return Info -} - // Flush writes out all buffered logs. func Flush(ctx context.Context) { _ = Ctx(ctx).Sync() @@ -403,7 +453,6 @@ func Flush(ctx context.Context) { type wrapper struct { zap.SugaredLogger - forceDebugLogLevel bool } diff --git a/src/pkg/logger/logger_test.go b/src/pkg/logger/logger_test.go index 7cb7926fa..910b546b0 100644 --- a/src/pkg/logger/logger_test.go +++ b/src/pkg/logger/logger_test.go @@ -32,8 +32,9 @@ func (suite *LoggerUnitSuite) TestAddLoggingFlags() { Run: func(cmd *cobra.Command, args []string) { assert.True(t, logger.DebugAPIFV, logger.DebugAPIFN) assert.True(t, logger.ReadableLogsFV, logger.ReadableLogsFN) - assert.Equal(t, logger.LLError, logger.LogLevelFV, logger.LogLevelFN) - assert.Equal(t, logger.PIIMask, logger.SensitiveInfoFV, logger.SensitiveInfoFN) + assert.Equal(t, string(logger.LLError), logger.LogLevelFV, logger.LogLevelFN) + assert.Equal(t, string(logger.LFText), logger.LogFormatFV, logger.LogFormatFN) + assert.True(t, logger.MaskSensitiveDataFV, logger.MaskSensitiveDataFN) // empty assertion here, instead of matching "log-file", because the LogFile // var isn't updated by running the command (this is expected and correct), // while the logFileFV remains unexported. @@ -48,9 +49,10 @@ func (suite *LoggerUnitSuite) TestAddLoggingFlags() { "test", "--" + logger.DebugAPIFN, "--" + logger.LogFileFN, "log-file", - "--" + logger.LogLevelFN, logger.LLError, + "--" + logger.LogLevelFN, string(logger.LLError), + "--" + logger.LogFormatFN, string(logger.LFText), "--" + logger.ReadableLogsFN, - "--" + logger.SensitiveInfoFN, logger.PIIMask, + "--" + logger.MaskSensitiveDataFN, }) err := cmd.Execute() @@ -66,9 +68,10 @@ func (suite *LoggerUnitSuite) TestPreloadLoggingFlags() { args := []string{ "--" + logger.DebugAPIFN, "--" + logger.LogFileFN, "log-file", - "--" + logger.LogLevelFN, logger.LLError, + "--" + logger.LogLevelFN, string(logger.LLError), + "--" + logger.LogFormatFN, string(logger.LFText), "--" + logger.ReadableLogsFN, - "--" + logger.SensitiveInfoFN, logger.PIIMask, + "--" + logger.MaskSensitiveDataFN, } settings := logger.PreloadLoggingFlags(args) @@ -77,5 +80,44 @@ func (suite *LoggerUnitSuite) TestPreloadLoggingFlags() { assert.True(t, logger.ReadableLogsFV, logger.ReadableLogsFN) assert.Equal(t, "log-file", settings.File, "settings.File") assert.Equal(t, logger.LLError, settings.Level, "settings.Level") - assert.Equal(t, logger.PIIMask, settings.PIIHandling, "settings.PIIHandling") + assert.Equal(t, logger.LFText, settings.Format, "settings.Format") + assert.Equal(t, logger.PIIHash, settings.PIIHandling, "settings.PIIHandling") +} + +func (suite *LoggerUnitSuite) TestPreloadLoggingFlags_badArgsEnsureDefault() { + t := suite.T() + + logger.DebugAPIFV = false + logger.ReadableLogsFV = false + + args := []string{ + "--" + logger.DebugAPIFN, + "--" + logger.LogFileFN, "log-file", + "--" + logger.LogLevelFN, "not-a-level", + "--" + logger.LogFormatFN, "not-a-format", + "--" + logger.ReadableLogsFN, + "--" + logger.MaskSensitiveDataFN, + } + + settings := logger.PreloadLoggingFlags(args) + settings = settings.EnsureDefaults() + + assert.Equal(t, logger.LLInfo, settings.Level, "settings.Level") + assert.Equal(t, logger.LFText, settings.Format, "settings.Format") +} + +func (suite *LoggerUnitSuite) TestSettings_ensureDefaults() { + t := suite.T() + + s := logger.Settings{} + require.Empty(t, s.File, "file") + require.Empty(t, s.Level, "level") + require.Empty(t, s.Format, "format") + require.Empty(t, s.PIIHandling, "piialg") + + s = s.EnsureDefaults() + require.NotEmpty(t, s.File, "file") + require.NotEmpty(t, s.Level, "level") + require.NotEmpty(t, s.Format, "format") + require.NotEmpty(t, s.PIIHandling, "piialg") } diff --git a/src/pkg/path/onedrive.go b/src/pkg/path/drive.go similarity index 60% rename from src/pkg/path/onedrive.go rename to src/pkg/path/drive.go index 48c443311..033f9934b 100644 --- a/src/pkg/path/onedrive.go +++ b/src/pkg/path/drive.go @@ -8,19 +8,21 @@ import "github.com/alcionai/clues" // // driveID is `b!X_8Z2zuXpkKkXZsr7gThk9oJpuj0yXVGnK5_VjRRPK-q725SX_8ZQJgFDK8PlFxA` and // folders[] is []{"Folder1", "Folder2"} +// +// Should be compatible with all drive-based services (ex: oneDrive, sharePoint Libraries, etc) type DrivePath struct { DriveID string Root string Folders Elements } -func ToOneDrivePath(p Path) (*DrivePath, error) { +func ToDrivePath(p Path) (*DrivePath, error) { folders := p.Folders() // Must be at least `drives//root:` if len(folders) < 3 { return nil, clues. - New("folder path doesn't match expected format for OneDrive items"). + New("folder path doesn't match expected format for Drive items"). With("path_folders", p.Folder(false)) } @@ -29,10 +31,20 @@ func ToOneDrivePath(p Path) (*DrivePath, error) { // Returns the path to the folder within the drive (i.e. under `root:`) func GetDriveFolderPath(p Path) (string, error) { - drivePath, err := ToOneDrivePath(p) + drivePath, err := ToDrivePath(p) if err != nil { return "", err } return Builder{}.Append(drivePath.Folders...).String(), nil } + +// BuildDriveLocation takes a driveID and a set of unescaped element names, +// including the root folder, and returns a *path.Builder containing the +// canonical path representation for the drive path. +func BuildDriveLocation( + driveID string, + unescapedElements ...string, +) *Builder { + return Builder{}.Append("drives", driveID).Append(unescapedElements...) +} diff --git a/src/pkg/path/drive_test.go b/src/pkg/path/drive_test.go new file mode 100644 index 000000000..5a6853caf --- /dev/null +++ b/src/pkg/path/drive_test.go @@ -0,0 +1,119 @@ +package path_test + +import ( + "strings" + "testing" + + "github.com/alcionai/clues" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + odConsts "github.com/alcionai/corso/src/internal/connector/onedrive/consts" + "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/pkg/path" +) + +type OneDrivePathSuite struct { + tester.Suite +} + +func TestOneDrivePathSuite(t *testing.T) { + suite.Run(t, &OneDrivePathSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *OneDrivePathSuite) Test_ToOneDrivePath() { + tests := []struct { + name string + pathElements []string + expected *path.DrivePath + errCheck assert.ErrorAssertionFunc + }{ + { + name: "Not enough path elements", + pathElements: []string{odConsts.DrivesPathDir, "driveID"}, + errCheck: assert.Error, + }, + { + name: "Root path", + pathElements: []string{odConsts.DrivesPathDir, "driveID", odConsts.RootPathDir}, + expected: &path.DrivePath{ + DriveID: "driveID", + Root: odConsts.RootPathDir, + Folders: []string{}, + }, + errCheck: assert.NoError, + }, + { + name: "Deeper path", + pathElements: []string{odConsts.DrivesPathDir, "driveID", odConsts.RootPathDir, "folder1", "folder2"}, + expected: &path.DrivePath{ + DriveID: "driveID", + Root: odConsts.RootPathDir, + Folders: []string{"folder1", "folder2"}, + }, + errCheck: assert.NoError, + }, + } + for _, tt := range tests { + suite.Run(tt.name, func() { + t := suite.T() + + p, err := path.Build("tenant", "user", path.OneDriveService, path.FilesCategory, false, tt.pathElements...) + require.NoError(suite.T(), err, clues.ToCore(err)) + + got, err := path.ToDrivePath(p) + tt.errCheck(t, err) + if err != nil { + return + } + assert.Equal(suite.T(), tt.expected, got) + }) + } +} + +func (suite *OneDrivePathSuite) TestFormatDriveFolders() { + const ( + driveID = "some-drive-id" + drivePrefix = "drives/" + driveID + ) + + table := []struct { + name string + input []string + expected string + }{ + { + name: "normal", + input: []string{ + "root:", + "foo", + "bar", + }, + expected: strings.Join( + append([]string{drivePrefix}, "root:", "foo", "bar"), + "/"), + }, + { + name: "has character that would be escaped", + input: []string{ + "root:", + "foo/", + "bar", + }, + // Element "foo/" should end up escaped in the string output. + expected: strings.Join( + append([]string{drivePrefix}, "root:", `foo\/`, "bar"), + "/"), + }, + } + + for _, test := range table { + suite.Run(test.name, func() { + assert.Equal( + suite.T(), + test.expected, + path.BuildDriveLocation(driveID, test.input...).String()) + }) + } +} diff --git a/src/pkg/path/elements.go b/src/pkg/path/elements.go index 0a55bd8e4..d1ca932dc 100644 --- a/src/pkg/path/elements.go +++ b/src/pkg/path/elements.go @@ -2,6 +2,7 @@ package path import ( "fmt" + "strings" "github.com/alcionai/clues" @@ -28,7 +29,26 @@ var piiSafePathElems = pii.MapWithPlurals( LibrariesCategory.String(), PagesCategory.String(), DetailsCategory.String(), -) + + // well known folders + // https://learn.microsoft.com/en-us/graph/api/resources/mailfolder?view=graph-rest-1.0 + "archive", + "clutter", + "conflict", + "conversationhistory", + "deleteditem", + "draft", + "inbox", + "junkemail", + "localfailure", + "msgfolderroot", + "outbox", + "recoverableitemsdeletion", + "scheduled", + "searchfolder", + "sentitem", + "serverfailure", + "syncissue") var ( // interface compliance required for handling PII @@ -86,3 +106,33 @@ func (el Elements) String() string { func (el Elements) PlainString() string { return join(el) } + +// Last returns the last element. Returns "" if empty. +func (el Elements) Last() string { + if len(el) == 0 { + return "" + } + + return el[len(el)-1] +} + +// --------------------------------------------------------------------------- +// helpers +// --------------------------------------------------------------------------- + +// LoggableDir takes in a path reference (of any structure) and conceals any +// non-standard elements (ids, filenames, foldernames, etc). +func LoggableDir(ref string) string { + r := ref + n := strings.TrimSuffix(r, string(PathSeparator)) + + for n != r { + r = n + n = strings.TrimSuffix(r, string(PathSeparator)) + } + + elems := Split(r) + elems = pii.ConcealElements(elems, piiSafePathElems) + + return join(elems) +} diff --git a/src/pkg/path/elements_test.go b/src/pkg/path/elements_test.go index f9f4c1d1a..dbbb572ba 100644 --- a/src/pkg/path/elements_test.go +++ b/src/pkg/path/elements_test.go @@ -98,3 +98,40 @@ func (suite *ElementsUnitSuite) TestElements_piiHandling() { }) } } + +func (suite *ElementsUnitSuite) TestLoggableDir() { + table := []struct { + inpt string + expect string + }{ + { + inpt: "archive/clutter", + expect: "archive/clutter", + }, + { + inpt: "foo/bar", + expect: "***/***", + }, + { + inpt: "inbox/foo", + expect: "inbox/***", + }, + { + inpt: "foo/", + expect: "***", + }, + { + inpt: "foo//", + expect: "***", + }, + { + inpt: "foo///", + expect: "***", + }, + } + for _, test := range table { + suite.Run(test.inpt, func() { + assert.Equal(suite.T(), test.expect, LoggableDir(test.inpt)) + }) + } +} diff --git a/src/pkg/path/onedrive_test.go b/src/pkg/path/onedrive_test.go deleted file mode 100644 index d81c59e31..000000000 --- a/src/pkg/path/onedrive_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package path_test - -import ( - "testing" - - "github.com/alcionai/clues" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - "github.com/alcionai/corso/src/internal/tester" - "github.com/alcionai/corso/src/pkg/path" -) - -type OneDrivePathSuite struct { - tester.Suite -} - -func TestOneDrivePathSuite(t *testing.T) { - suite.Run(t, &OneDrivePathSuite{Suite: tester.NewUnitSuite(t)}) -} - -func (suite *OneDrivePathSuite) Test_ToOneDrivePath() { - const root = "root:" - - tests := []struct { - name string - pathElements []string - expected *path.DrivePath - errCheck assert.ErrorAssertionFunc - }{ - { - name: "Not enough path elements", - pathElements: []string{"drive", "driveID"}, - errCheck: assert.Error, - }, - { - name: "Root path", - pathElements: []string{"drive", "driveID", root}, - expected: &path.DrivePath{DriveID: "driveID", Root: root, Folders: []string{}}, - errCheck: assert.NoError, - }, - { - name: "Deeper path", - pathElements: []string{"drive", "driveID", root, "folder1", "folder2"}, - expected: &path.DrivePath{DriveID: "driveID", Root: root, Folders: []string{"folder1", "folder2"}}, - errCheck: assert.NoError, - }, - } - for _, tt := range tests { - suite.Run(tt.name, func() { - t := suite.T() - - p, err := path.Build("tenant", "user", path.OneDriveService, path.FilesCategory, false, tt.pathElements...) - require.NoError(suite.T(), err, clues.ToCore(err)) - - got, err := path.ToOneDrivePath(p) - tt.errCheck(t, err) - if err != nil { - return - } - assert.Equal(suite.T(), tt.expected, got) - }) - } -} diff --git a/src/pkg/path/path.go b/src/pkg/path/path.go index 5e1cd9a03..189e24449 100644 --- a/src/pkg/path/path.go +++ b/src/pkg/path/path.go @@ -85,7 +85,7 @@ type Path interface { Category() CategoryType Tenant() string ResourceOwner() string - Folder(bool) string + Folder(escaped bool) string Folders() Elements Item() string // UpdateParent updates parent from old to new if the item/folder was @@ -106,7 +106,9 @@ type Path interface { // Append returns a new Path object with the given element added to the end of // the old Path if possible. If the old Path is an item Path then Append // returns an error. - Append(element string, isItem bool) (Path, error) + Append(isItem bool, elems ...string) (Path, error) + // AppendItem is a shorthand for Append(true, someItem) + AppendItem(item string) (Path, error) // ShortRef returns a short reference representing this path. The short // reference is guaranteed to be unique. No guarantees are made about whether // a short reference can be converted back into the Path that generated it. @@ -130,6 +132,13 @@ var ( _ fmt.Stringer = &Builder{} ) +// RestorePaths denotes the location to find an item in kopia and the path of +// the collection to place the item in for restore. +type RestorePaths struct { + StoragePath Path + RestorePath Path +} + // Builder is a simple path representation that only tracks path elements. It // can join, escape, and unescape elements. Higher-level packages are expected // to wrap this struct to build resource-specific contexts (e.x. an @@ -299,18 +308,27 @@ func (pb Builder) Elements() Elements { return append(Elements{}, pb.elements...) } -// verifyPrefix ensures that the tenant and resourceOwner are valid -// values, and that the builder has some directory structure. -func (pb Builder) verifyPrefix(tenant, resourceOwner string) error { +func ServicePrefix( + tenant, resourceOwner string, + s ServiceType, + c CategoryType, +) (Path, error) { + pb := Builder{} + + if err := ValidateServiceAndCategory(s, c); err != nil { + return nil, err + } + if err := verifyInputValues(tenant, resourceOwner); err != nil { - return err + return nil, err } - if len(pb.elements) == 0 { - return clues.New("missing path beyond prefix") - } - - return nil + return &dataLayerResourcePath{ + Builder: *pb.withPrefix(tenant, s.String(), resourceOwner, c.String()), + service: s, + category: c, + hasItem: false, + }, nil } // withPrefix creates a Builder prefixed with the parameter values, and @@ -740,3 +758,17 @@ func join(elements []string) string { // '\' according to the escaping rules. return strings.Join(elements, string(PathSeparator)) } + +// verifyPrefix ensures that the tenant and resourceOwner are valid +// values, and that the builder has some directory structure. +func (pb Builder) verifyPrefix(tenant, resourceOwner string) error { + if err := verifyInputValues(tenant, resourceOwner); err != nil { + return err + } + + if len(pb.elements) == 0 { + return clues.New("missing path beyond prefix") + } + + return nil +} diff --git a/src/pkg/path/path_test.go b/src/pkg/path/path_test.go index 6af2b2b0e..be43d3732 100644 --- a/src/pkg/path/path_test.go +++ b/src/pkg/path/path_test.go @@ -245,6 +245,26 @@ func (suite *PathUnitSuite) TestAppend() { } } +func (suite *PathUnitSuite) TestAppendItem() { + t := suite.T() + + p, err := Build("t", "ro", ExchangeService, EmailCategory, false, "foo", "bar") + require.NoError(t, err, clues.ToCore(err)) + + pb := p.ToBuilder() + assert.Equal(t, pb.String(), p.String()) + + pb = pb.Append("qux") + + p, err = p.AppendItem("qux") + + require.NoError(t, err, clues.ToCore(err)) + assert.Equal(t, pb.String(), p.String()) + + _, err = p.AppendItem("fnords") + require.Error(t, err, clues.ToCore(err)) +} + func (suite *PathUnitSuite) TestUnescapeAndAppend() { table := append(append([]testData{}, genericCases...), basicEscapedInputs...) for _, test := range table { @@ -749,3 +769,67 @@ func (suite *PathUnitSuite) TestPath_piiHandling() { }) } } + +func (suite *PathUnitSuite) TestToServicePrefix() { + table := []struct { + name string + service ServiceType + category CategoryType + tenant string + owner string + expect string + expectErr require.ErrorAssertionFunc + }{ + { + name: "ok", + service: ExchangeService, + category: ContactsCategory, + tenant: "t", + owner: "ro", + expect: join([]string{"t", ExchangeService.String(), "ro", ContactsCategory.String()}), + expectErr: require.NoError, + }, + { + name: "bad category", + service: ExchangeService, + category: FilesCategory, + tenant: "t", + owner: "ro", + expectErr: require.Error, + }, + { + name: "bad tenant", + service: ExchangeService, + category: ContactsCategory, + tenant: "", + owner: "ro", + expectErr: require.Error, + }, + { + name: "bad owner", + service: ExchangeService, + category: ContactsCategory, + tenant: "t", + owner: "", + expectErr: require.Error, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + r, err := ServicePrefix(test.tenant, test.owner, test.service, test.category) + test.expectErr(t, err, clues.ToCore(err)) + + if r == nil { + return + } + + assert.Equal(t, test.expect, r.String()) + assert.NotPanics(t, func() { + r.Folders() + r.Item() + }, "runs Folders() and Item()") + }) + } +} diff --git a/src/pkg/path/resource_path.go b/src/pkg/path/resource_path.go index 47d481a46..923d66453 100644 --- a/src/pkg/path/resource_path.go +++ b/src/pkg/path/resource_path.go @@ -253,21 +253,25 @@ func (rp dataLayerResourcePath) Dir() (Path, error) { } func (rp dataLayerResourcePath) Append( - element string, isItem bool, + elems ...string, ) (Path, error) { if rp.hasItem { return nil, clues.New("appending to an item path") } return &dataLayerResourcePath{ - Builder: *rp.Builder.Append(element), + Builder: *rp.Builder.Append(elems...), service: rp.service, category: rp.category, hasItem: isItem, }, nil } +func (rp dataLayerResourcePath) AppendItem(item string) (Path, error) { + return rp.Append(true, item) +} + func (rp dataLayerResourcePath) ToBuilder() *Builder { // Safe to directly return the Builder because Builders are immutable. return &rp.Builder diff --git a/src/pkg/path/resource_path_test.go b/src/pkg/path/resource_path_test.go index 3453737e6..e49f797e2 100644 --- a/src/pkg/path/resource_path_test.go +++ b/src/pkg/path/resource_path_test.go @@ -547,7 +547,7 @@ func (suite *PopulatedDataLayerResourcePath) TestAppend() { suite.Run(test.name, func() { t := suite.T() - newPath, err := suite.paths[m.isItem].Append(newElement, test.hasItem) + newPath, err := suite.paths[m.isItem].Append(test.hasItem, newElement) // Items don't allow appending. if m.isItem { diff --git a/src/pkg/repository/loadtest/repository_load_test.go b/src/pkg/repository/loadtest/repository_load_test.go index 226b6e15d..7ef56fdb0 100644 --- a/src/pkg/repository/loadtest/repository_load_test.go +++ b/src/pkg/repository/loadtest/repository_load_test.go @@ -24,6 +24,7 @@ import ( "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/selectors" + selTD "github.com/alcionai/corso/src/pkg/selectors/testdata" "github.com/alcionai/corso/src/pkg/storage" ) @@ -150,7 +151,7 @@ func runRestoreLoadTest( t.Skip("restore load test is toggled off") } - dest := tester.DefaultTestRestoreDestination() + dest := tester.DefaultTestRestoreDestination("") rst, err := r.NewRestore(ctx, backupID, restSel, dest) require.NoError(t, err, clues.ToCore(err)) @@ -302,10 +303,10 @@ func doRestoreLoadTest( } // noFolders removes all "folder" category details entries -func noFolders(t *testing.T, des []details.DetailsEntry) []details.DetailsEntry { +func noFolders(t *testing.T, des []details.Entry) []details.Entry { t.Helper() - sansfldr := []details.DetailsEntry{} + sansfldr := []details.Entry{} for _, ent := range des { if ent.Folder == nil { @@ -541,7 +542,7 @@ func (suite *LoadOneDriveSuite) TestOneDrive() { defer flush() bsel := selectors.NewOneDriveBackup(suite.usersUnderTest) - bsel.Include(bsel.AllData()) + bsel.Include(selTD.OneDriveBackupFolderScope(bsel)) sel := bsel.Selector runLoadTest( @@ -588,7 +589,7 @@ func (suite *IndividualLoadOneDriveSuite) TestOneDrive() { defer flush() bsel := selectors.NewOneDriveBackup(suite.usersUnderTest) - bsel.Include(bsel.AllData()) + bsel.Include(selTD.OneDriveBackupFolderScope(bsel)) sel := bsel.Selector runLoadTest( diff --git a/src/pkg/repository/repository.go b/src/pkg/repository/repository.go index b18488d3f..c3f191b96 100644 --- a/src/pkg/repository/repository.go +++ b/src/pkg/repository/repository.go @@ -8,8 +8,8 @@ import ( "github.com/google/uuid" "github.com/pkg/errors" - "github.com/alcionai/corso/src/internal/common" "github.com/alcionai/corso/src/internal/common/crash" + "github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/internal/connector" "github.com/alcionai/corso/src/internal/connector/onedrive/metadata" "github.com/alcionai/corso/src/internal/data" @@ -24,9 +24,9 @@ import ( "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/control" + rep "github.com/alcionai/corso/src/pkg/control/repository" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" - "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/storage" "github.com/alcionai/corso/src/pkg/store" @@ -63,7 +63,7 @@ type Repository interface { NewBackupWithLookup( ctx context.Context, self selectors.Selector, - ins common.IDNameSwapper, + ins idname.Cacher, ) (operations.BackupOperation, error) NewRestore( ctx context.Context, @@ -71,6 +71,10 @@ type Repository interface { sel selectors.Selector, dest control.RestoreDestination, ) (operations.RestoreOperation, error) + NewMaintenance( + ctx context.Context, + mOpts rep.Maintenance, + ) (operations.MaintenanceOperation, error) DeleteBackup(ctx context.Context, id string) error BackupGetter } @@ -115,13 +119,13 @@ func Initialize( "storage_provider", s.Provider.String()) defer func() { - if crErr := crash.Recovery(ctx, recover()); crErr != nil { + if crErr := crash.Recovery(ctx, recover(), "repo init"); crErr != nil { err = crErr } }() kopiaRef := kopia.NewConn(s) - if err := kopiaRef.Initialize(ctx); err != nil { + if err := kopiaRef.Initialize(ctx, opts.Repo); err != nil { // replace common internal errors so that sdk users can check results with errors.Is() if errors.Is(err, kopia.ErrorRepoAlreadyExists) { return nil, clues.Stack(ErrorRepoAlreadyExists, err).WithClues(ctx) @@ -189,7 +193,7 @@ func Connect( "storage_provider", s.Provider.String()) defer func() { - if crErr := crash.Recovery(ctx, recover()); crErr != nil { + if crErr := crash.Recovery(ctx, recover(), "repo connect"); crErr != nil { err = crErr } }() @@ -203,7 +207,7 @@ func Connect( defer close(complete) kopiaRef := kopia.NewConn(s) - if err := kopiaRef.Connect(ctx); err != nil { + if err := kopiaRef.Connect(ctx, opts.Repo); err != nil { return nil, clues.Wrap(err, "connecting kopia client") } // kopiaRef comes with a count of 1 and NewWrapper/NewModelStore bumps it again so safe @@ -306,9 +310,9 @@ func (r repository) NewBackup( func (r repository) NewBackupWithLookup( ctx context.Context, sel selectors.Selector, - ins common.IDNameSwapper, + ins idname.Cacher, ) (operations.BackupOperation, error) { - gc, err := connectToM365(ctx, sel, r.Account, fault.New(true)) + gc, err := connectToM365(ctx, sel, r.Account) if err != nil { return operations.BackupOperation{}, errors.Wrap(err, "connecting to m365") } @@ -318,11 +322,6 @@ func (r repository) NewBackupWithLookup( return operations.BackupOperation{}, errors.Wrap(err, "resolving resource owner details") } - // Exchange and OneDrive need to maintain the user PN as the ID until we're ready to migrate - if sel.PathService() != path.SharePointService { - ownerID = ownerName - } - // TODO: retrieve display name from gc sel = sel.SetDiscreteOwnerIDName(ownerID, ownerName) @@ -334,7 +333,7 @@ func (r repository) NewBackupWithLookup( gc, r.Account, sel, - sel, + sel, // the selector acts as an IDNamer for its discrete resource owner. r.Bus) } @@ -345,7 +344,7 @@ func (r repository) NewRestore( sel selectors.Selector, dest control.RestoreDestination, ) (operations.RestoreOperation, error) { - gc, err := connectToM365(ctx, sel, r.Account, fault.New(true)) + gc, err := connectToM365(ctx, sel, r.Account) if err != nil { return operations.RestoreOperation{}, errors.Wrap(err, "connecting to m365") } @@ -363,6 +362,18 @@ func (r repository) NewRestore( r.Bus) } +func (r repository) NewMaintenance( + ctx context.Context, + mOpts rep.Maintenance, +) (operations.MaintenanceOperation, error) { + return operations.NewMaintenanceOperation( + ctx, + r.Opts, + r.dataLayer, + mOpts, + r.Bus) +} + // Backup retrieves a backup by id. func (r repository) Backup(ctx context.Context, id string) (*backup.Backup, error) { return getBackup(ctx, id, store.NewKopiaStore(r.modelStore)) @@ -555,10 +566,6 @@ func deleteBackup( return errWrapper(err) } - if err := kw.DeleteSnapshot(ctx, b.SnapshotID); err != nil { - return err - } - if len(b.SnapshotID) > 0 { if err := kw.DeleteSnapshot(ctx, b.SnapshotID); err != nil { return err @@ -627,7 +634,6 @@ func connectToM365( ctx context.Context, sel selectors.Selector, acct account.Account, - errs *fault.Bus, ) (*connector.GraphConnector, error) { complete, closer := observe.MessageWithCompletion(ctx, "Connecting to M365") defer func() { @@ -642,7 +648,7 @@ func connectToM365( resource = connector.Sites } - gc, err := connector.NewGraphConnector(ctx, acct, resource, errs) + gc, err := connector.NewGraphConnector(ctx, acct, resource) if err != nil { return nil, err } diff --git a/src/pkg/repository/repository_test.go b/src/pkg/repository/repository_test.go index 3d6c9979f..8efe44f31 100644 --- a/src/pkg/repository/repository_test.go +++ b/src/pkg/repository/repository_test.go @@ -1,7 +1,9 @@ package repository_test import ( + "os" "testing" + "time" "github.com/alcionai/clues" "github.com/stretchr/testify/assert" @@ -11,6 +13,7 @@ import ( "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/control" + rep "github.com/alcionai/corso/src/pkg/control/repository" "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/storage" @@ -54,7 +57,7 @@ func (suite *RepositoryUnitSuite) TestInitialize() { st, err := test.storage() assert.NoError(t, err, clues.ToCore(err)) - _, err = repository.Initialize(ctx, test.account, st, control.Options{}) + _, err = repository.Initialize(ctx, test.account, st, control.Defaults()) test.errCheck(t, err, clues.ToCore(err)) }) } @@ -88,7 +91,7 @@ func (suite *RepositoryUnitSuite) TestConnect() { st, err := test.storage() assert.NoError(t, err, clues.ToCore(err)) - _, err = repository.Connect(ctx, test.account, st, control.Options{}) + _, err = repository.Connect(ctx, test.account, st, control.Defaults()) test.errCheck(t, err, clues.ToCore(err)) }) } @@ -131,7 +134,7 @@ func (suite *RepositoryIntegrationSuite) TestInitialize() { t := suite.T() st := test.storage(t) - r, err := repository.Initialize(ctx, test.account, st, control.Options{}) + r, err := repository.Initialize(ctx, test.account, st, control.Defaults()) if err == nil { defer func() { err := r.Close(ctx) @@ -144,6 +147,33 @@ func (suite *RepositoryIntegrationSuite) TestInitialize() { } } +const ( + roleARNEnvKey = "CORSO_TEST_S3_ROLE" + roleDuration = time.Minute * 20 +) + +func (suite *RepositoryIntegrationSuite) TestInitializeWithRole() { + if _, ok := os.LookupEnv(roleARNEnvKey); !ok { + suite.T().Skip(roleARNEnvKey + " not set") + } + + ctx, flush := tester.NewContext() + defer flush() + + st := tester.NewPrefixedS3Storage(suite.T()) + + st.Role = os.Getenv(roleARNEnvKey) + st.SessionName = "corso-repository-test" + st.SessionDuration = roleDuration.String() + + r, err := repository.Initialize(ctx, account.Account{}, st, control.Options{}) + require.NoError(suite.T(), err) + + defer func() { + r.Close(ctx) + }() +} + func (suite *RepositoryIntegrationSuite) TestConnect() { ctx, flush := tester.NewContext() defer flush() @@ -153,11 +183,11 @@ func (suite *RepositoryIntegrationSuite) TestConnect() { // need to initialize the repository before we can test connecting to it. st := tester.NewPrefixedS3Storage(t) - _, err := repository.Initialize(ctx, account.Account{}, st, control.Options{}) + _, err := repository.Initialize(ctx, account.Account{}, st, control.Defaults()) require.NoError(t, err, clues.ToCore(err)) // now re-connect - _, err = repository.Connect(ctx, account.Account{}, st, control.Options{}) + _, err = repository.Connect(ctx, account.Account{}, st, control.Defaults()) assert.NoError(t, err, clues.ToCore(err)) } @@ -170,7 +200,7 @@ func (suite *RepositoryIntegrationSuite) TestConnect_sameID() { // need to initialize the repository before we can test connecting to it. st := tester.NewPrefixedS3Storage(t) - r, err := repository.Initialize(ctx, account.Account{}, st, control.Options{}) + r, err := repository.Initialize(ctx, account.Account{}, st, control.Defaults()) require.NoError(t, err, clues.ToCore(err)) oldID := r.GetID() @@ -179,7 +209,7 @@ func (suite *RepositoryIntegrationSuite) TestConnect_sameID() { require.NoError(t, err, clues.ToCore(err)) // now re-connect - r, err = repository.Connect(ctx, account.Account{}, st, control.Options{}) + r, err = repository.Connect(ctx, account.Account{}, st, control.Defaults()) require.NoError(t, err, clues.ToCore(err)) assert.Equal(t, oldID, r.GetID()) } @@ -195,7 +225,7 @@ func (suite *RepositoryIntegrationSuite) TestNewBackup() { // need to initialize the repository before we can test connecting to it. st := tester.NewPrefixedS3Storage(t) - r, err := repository.Initialize(ctx, acct, st, control.Options{}) + r, err := repository.Initialize(ctx, acct, st, control.Defaults()) require.NoError(t, err, clues.ToCore(err)) userID := tester.M365UserID(t) @@ -212,12 +242,12 @@ func (suite *RepositoryIntegrationSuite) TestNewRestore() { t := suite.T() acct := tester.NewM365Account(t) - dest := tester.DefaultTestRestoreDestination() + dest := tester.DefaultTestRestoreDestination("") // need to initialize the repository before we can test connecting to it. st := tester.NewPrefixedS3Storage(t) - r, err := repository.Initialize(ctx, acct, st, control.Options{}) + r, err := repository.Initialize(ctx, acct, st, control.Defaults()) require.NoError(t, err, clues.ToCore(err)) ro, err := r.NewRestore(ctx, "backup-id", selectors.Selector{DiscreteOwner: "test"}, dest) @@ -225,6 +255,25 @@ func (suite *RepositoryIntegrationSuite) TestNewRestore() { require.NotNil(t, ro) } +func (suite *RepositoryIntegrationSuite) TestNewMaintenance() { + ctx, flush := tester.NewContext() + defer flush() + + t := suite.T() + + acct := tester.NewM365Account(t) + + // need to initialize the repository before we can test connecting to it. + st := tester.NewPrefixedS3Storage(t) + + r, err := repository.Initialize(ctx, acct, st, control.Defaults()) + require.NoError(t, err, clues.ToCore(err)) + + mo, err := r.NewMaintenance(ctx, rep.Maintenance{}) + require.NoError(t, err, clues.ToCore(err)) + require.NotNil(t, mo) +} + func (suite *RepositoryIntegrationSuite) TestConnect_DisableMetrics() { ctx, flush := tester.NewContext() defer flush() @@ -234,7 +283,7 @@ func (suite *RepositoryIntegrationSuite) TestConnect_DisableMetrics() { // need to initialize the repository before we can test connecting to it. st := tester.NewPrefixedS3Storage(t) - _, err := repository.Initialize(ctx, account.Account{}, st, control.Options{}) + _, err := repository.Initialize(ctx, account.Account{}, st, control.Defaults()) require.NoError(t, err) // now re-connect diff --git a/src/pkg/repository/repository_unexported_test.go b/src/pkg/repository/repository_unexported_test.go index e29350f6e..6860f6250 100644 --- a/src/pkg/repository/repository_unexported_test.go +++ b/src/pkg/repository/repository_unexported_test.go @@ -17,8 +17,10 @@ import ( "github.com/alcionai/corso/src/internal/stats" "github.com/alcionai/corso/src/internal/streamstore" "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/internal/version" "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" + rep "github.com/alcionai/corso/src/pkg/control/repository" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" @@ -111,6 +113,10 @@ func (suite *RepositoryBackupsUnitSuite) TestDeleteBackup() { }, } + bupNoSnapshot := &backup.Backup{ + BaseModel: model.BaseModel{}, + } + table := []struct { name string sw mock.BackupWrapper @@ -171,6 +177,19 @@ func (suite *RepositoryBackupsUnitSuite) TestDeleteBackup() { }, expectID: bup.ID, }, + { + name: "no snapshot present", + sw: mock.BackupWrapper{ + Backup: bupNoSnapshot, + GetErr: nil, + DeleteErr: nil, + }, + kw: mockSSDeleter{assert.AnError}, + expectErr: func(t *testing.T, result error) { + assert.NoError(t, result, clues.ToCore(result)) + }, + expectID: bupNoSnapshot.ID, + }, } for _, test := range table { suite.Run(test.name, func() { @@ -179,7 +198,7 @@ func (suite *RepositoryBackupsUnitSuite) TestDeleteBackup() { t := suite.T() - err := deleteBackup(ctx, string(bup.ID), test.kw, test.sw) + err := deleteBackup(ctx, string(test.sw.Backup.ID), test.kw, test.sw) test.expectErr(t, err) }) } @@ -218,10 +237,10 @@ func (suite *RepositoryModelIntgSuite) SetupSuite() { require.NotNil(t, k) - err = k.Initialize(ctx) + err = k.Initialize(ctx, rep.Options{}) require.NoError(t, err, clues.ToCore(err)) - err = k.Connect(ctx) + err = k.Connect(ctx, rep.Options{}) require.NoError(t, err, clues.ToCore(err)) suite.kopiaCloser = func(ctx context.Context) { @@ -268,8 +287,8 @@ func (suite *RepositoryModelIntgSuite) TestGetRepositoryModel() { k = kopia.NewConn(s) ) - require.NoError(t, k.Initialize(ctx)) - require.NoError(t, k.Connect(ctx)) + require.NoError(t, k.Initialize(ctx, rep.Options{})) + require.NoError(t, k.Connect(ctx, rep.Options{})) defer k.Close(ctx) @@ -316,6 +335,7 @@ func writeBackup( b := backup.New( snapID, ssid, operations.Completed.String(), + version.Backup, model.StableID(backupID), sel, ownerID, ownerName, @@ -414,8 +434,8 @@ func (suite *RepositoryModelIntgSuite) TestGetBackupErrors() { var ( err = clues.Wrap(assert.AnError, "wrap") cec = err.Core() - item = fault.FileErr(err, "file-id", "file-name", map[string]any{"foo": "bar"}) - skip = fault.FileSkip(fault.SkipMalware, "s-file-id", "s-file-name", map[string]any{"foo": "bar"}) + item = fault.FileErr(err, "ns", "file-id", "file-name", map[string]any{"foo": "bar"}) + skip = fault.FileSkip(fault.SkipMalware, "ns", "s-file-id", "s-file-name", map[string]any{"foo": "bar"}) info = details.ItemInfo{ Exchange: &details.ExchangeInfo{ ItemType: details.ExchangeMail, diff --git a/src/pkg/selectors/example_selectors_test.go b/src/pkg/selectors/example_selectors_test.go index 2e3260748..b1215d79f 100644 --- a/src/pkg/selectors/example_selectors_test.go +++ b/src/pkg/selectors/example_selectors_test.go @@ -121,11 +121,12 @@ var ( ctxBG = context.Background() exampleDetails = &details.Details{ DetailsModel: details.DetailsModel{ - Entries: []details.DetailsEntry{ + Entries: []details.Entry{ { - RepoRef: "tID/exchange/your-user-id/email/example/itemID", - ShortRef: "xyz", - ItemRef: "123", + RepoRef: "tID/exchange/your-user-id/email/example/itemID", + LocationRef: "example", + ShortRef: "xyz", + ItemRef: "123", ItemInfo: details.ItemInfo{ Exchange: &details.ExchangeInfo{ ItemType: details.ExchangeMail, diff --git a/src/pkg/selectors/exchange.go b/src/pkg/selectors/exchange.go index ccee7f948..008134559 100644 --- a/src/pkg/selectors/exchange.go +++ b/src/pkg/selectors/exchange.go @@ -7,7 +7,7 @@ import ( "github.com/alcionai/clues" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/filters" @@ -216,9 +216,8 @@ func (s *exchange) Contacts(folders, contacts []string, opts ...option) []Exchan scopes = append( scopes, - makeScope[ExchangeScope](ExchangeContact, contacts). - set(ExchangeContactFolder, folders, opts...), - ) + makeScope[ExchangeScope](ExchangeContact, contacts, defaultItemOptions(s.Cfg)...). + set(ExchangeContactFolder, folders, opts...)) return scopes } @@ -236,8 +235,7 @@ func (s *exchange) ContactFolders(folders []string, opts ...option) []ExchangeSc scopes = append( scopes, - makeScope[ExchangeScope](ExchangeContactFolder, folders, os...), - ) + makeScope[ExchangeScope](ExchangeContactFolder, folders, os...)) return scopes } @@ -252,9 +250,8 @@ func (s *exchange) Events(calendars, events []string, opts ...option) []Exchange scopes = append( scopes, - makeScope[ExchangeScope](ExchangeEvent, events). - set(ExchangeEventCalendar, calendars, opts...), - ) + makeScope[ExchangeScope](ExchangeEvent, events, defaultItemOptions(s.Cfg)...). + set(ExchangeEventCalendar, calendars, opts...)) return scopes } @@ -273,8 +270,7 @@ func (s *exchange) EventCalendars(events []string, opts ...option) []ExchangeSco scopes = append( scopes, - makeScope[ExchangeScope](ExchangeEventCalendar, events, os...), - ) + makeScope[ExchangeScope](ExchangeEventCalendar, events, os...)) return scopes } @@ -289,9 +285,8 @@ func (s *exchange) Mails(folders, mails []string, opts ...option) []ExchangeScop scopes = append( scopes, - makeScope[ExchangeScope](ExchangeMail, mails). - set(ExchangeMailFolder, folders, opts...), - ) + makeScope[ExchangeScope](ExchangeMail, mails, defaultItemOptions(s.Cfg)...). + set(ExchangeMailFolder, folders, opts...)) return scopes } @@ -309,8 +304,7 @@ func (s *exchange) MailFolders(folders []string, opts ...option) []ExchangeScope scopes = append( scopes, - makeScope[ExchangeScope](ExchangeMailFolder, folders, os...), - ) + makeScope[ExchangeScope](ExchangeMailFolder, folders, os...)) return scopes } @@ -326,8 +320,7 @@ func (s *exchange) AllData() []ExchangeScope { scopes = append(scopes, makeScope[ExchangeScope](ExchangeContactFolder, Any()), makeScope[ExchangeScope](ExchangeEventCalendar, Any()), - makeScope[ExchangeScope](ExchangeMailFolder, Any()), - ) + makeScope[ExchangeScope](ExchangeMailFolder, Any())) return scopes } @@ -593,7 +586,7 @@ func (ec exchangeCategory) isLeaf() bool { // => {exchMailFolder: mailFolder, exchMail: mailID} func (ec exchangeCategory) pathValues( repo path.Path, - ent details.DetailsEntry, + ent details.Entry, cfg Config, ) (map[categorizer][]string, error) { var folderCat, itemCat categorizer @@ -617,13 +610,25 @@ func (ec exchangeCategory) pathValues( item = repo.Item() } - result := map[categorizer][]string{ - folderCat: {repo.Folder(false)}, - itemCat: {item, ent.ShortRef}, + items := []string{ent.ShortRef, item} + + // only include the item ID when the user is NOT matching + // item names. Exchange data does not contain an item name, + // only an ID, and we don't want to mix up the two. + if cfg.OnlyMatchItemNames { + items = []string{ent.ShortRef} } - if len(ent.LocationRef) > 0 { - result[folderCat] = append(result[folderCat], ent.LocationRef) + // Will hit the if-condition when we're at a top-level folder, but we'll get + // the same result when we extract from the RepoRef. + folder := ent.LocationRef + if len(folder) == 0 { + folder = repo.Folder(true) + } + + result := map[categorizer][]string{ + folderCat: {folder}, + itemCat: items, } return result, nil @@ -773,7 +778,7 @@ func (s ExchangeScope) matchesInfo(dii details.ItemInfo) bool { case ExchangeInfoEventRecurs: i = strconv.FormatBool(info.EventRecurs) case ExchangeInfoEventStartsAfter, ExchangeInfoEventStartsBefore: - i = common.FormatTime(info.EventStart) + i = dttm.Format(info.EventStart) case ExchangeInfoEventSubject: i = info.Subject case ExchangeInfoMailSender: @@ -781,7 +786,7 @@ func (s ExchangeScope) matchesInfo(dii details.ItemInfo) bool { case ExchangeInfoMailSubject: i = info.Subject case ExchangeInfoMailReceivedAfter, ExchangeInfoMailReceivedBefore: - i = common.FormatTime(info.Received) + i = dttm.Format(info.Received) } return s.Matches(infoCat, i) diff --git a/src/pkg/selectors/exchange_test.go b/src/pkg/selectors/exchange_test.go index 703ea6f72..0473c522e 100644 --- a/src/pkg/selectors/exchange_test.go +++ b/src/pkg/selectors/exchange_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/fault" @@ -642,25 +642,25 @@ func (suite *ExchangeSelectorSuite) TestExchangeScope_MatchesInfo() { {"mail with a different subject", details.ExchangeMail, es.MailSubject("fancy"), assert.False}, {"mail with the matching subject", details.ExchangeMail, es.MailSubject(subject), assert.True}, {"mail with a substring subject match", details.ExchangeMail, es.MailSubject(subject[5:9]), assert.True}, - {"mail received after the epoch", details.ExchangeMail, es.MailReceivedAfter(common.FormatTime(epoch)), assert.True}, - {"mail received after now", details.ExchangeMail, es.MailReceivedAfter(common.FormatTime(now)), assert.False}, + {"mail received after the epoch", details.ExchangeMail, es.MailReceivedAfter(dttm.Format(epoch)), assert.True}, + {"mail received after now", details.ExchangeMail, es.MailReceivedAfter(dttm.Format(now)), assert.False}, { "mail received after sometime later", details.ExchangeMail, - es.MailReceivedAfter(common.FormatTime(future)), + es.MailReceivedAfter(dttm.Format(future)), assert.False, }, { "mail received before the epoch", details.ExchangeMail, - es.MailReceivedBefore(common.FormatTime(epoch)), + es.MailReceivedBefore(dttm.Format(epoch)), assert.False, }, - {"mail received before now", details.ExchangeMail, es.MailReceivedBefore(common.FormatTime(now)), assert.False}, + {"mail received before now", details.ExchangeMail, es.MailReceivedBefore(dttm.Format(now)), assert.False}, { "mail received before sometime later", details.ExchangeMail, - es.MailReceivedBefore(common.FormatTime(future)), + es.MailReceivedBefore(dttm.Format(future)), assert.True, }, {"event with any organizer", details.ExchangeEvent, es.EventOrganizer(AnyTgt), assert.True}, @@ -669,25 +669,25 @@ func (suite *ExchangeSelectorSuite) TestExchangeScope_MatchesInfo() { {"event with the matching organizer", details.ExchangeEvent, es.EventOrganizer(organizer), assert.True}, {"event that recurs", details.ExchangeEvent, es.EventRecurs("true"), assert.True}, {"event that does not recur", details.ExchangeEvent, es.EventRecurs("false"), assert.False}, - {"event starting after the epoch", details.ExchangeEvent, es.EventStartsAfter(common.FormatTime(epoch)), assert.True}, - {"event starting after now", details.ExchangeEvent, es.EventStartsAfter(common.FormatTime(now)), assert.False}, + {"event starting after the epoch", details.ExchangeEvent, es.EventStartsAfter(dttm.Format(epoch)), assert.True}, + {"event starting after now", details.ExchangeEvent, es.EventStartsAfter(dttm.Format(now)), assert.False}, { "event starting after sometime later", details.ExchangeEvent, - es.EventStartsAfter(common.FormatTime(future)), + es.EventStartsAfter(dttm.Format(future)), assert.False, }, { "event starting before the epoch", details.ExchangeEvent, - es.EventStartsBefore(common.FormatTime(epoch)), + es.EventStartsBefore(dttm.Format(epoch)), assert.False, }, - {"event starting before now", details.ExchangeEvent, es.EventStartsBefore(common.FormatTime(now)), assert.False}, + {"event starting before now", details.ExchangeEvent, es.EventStartsBefore(dttm.Format(now)), assert.False}, { "event starting before sometime later", details.ExchangeEvent, - es.EventStartsBefore(common.FormatTime(future)), + es.EventStartsBefore(dttm.Format(future)), assert.True, }, {"event with any subject", details.ExchangeEvent, es.EventSubject(AnyTgt), assert.True}, @@ -713,9 +713,9 @@ func (suite *ExchangeSelectorSuite) TestExchangeScope_MatchesInfo() { func (suite *ExchangeSelectorSuite) TestExchangeScope_MatchesPath() { const ( usr = "userID" - fID1 = "mf_id_1" + fID1 = "mf_id_1.d" fld1 = "mailFolder" - fID2 = "mf_id_2" + fID2 = "mf_id_2.d" fld2 = "subFolder" mail = "mailID" ) @@ -725,7 +725,7 @@ func (suite *ExchangeSelectorSuite) TestExchangeScope_MatchesPath() { loc = strings.Join([]string{fld1, fld2, mail}, "/") short = "thisisahashofsomekind" es = NewExchangeRestore(Any()) - ent = details.DetailsEntry{ + ent = details.Entry{ RepoRef: repo.String(), ShortRef: short, ItemRef: mail, @@ -743,18 +743,18 @@ func (suite *ExchangeSelectorSuite) TestExchangeScope_MatchesPath() { {"all folders", es.MailFolders(Any()), "", assert.True}, {"no folders", es.MailFolders(None()), "", assert.False}, {"matching folder", es.MailFolders([]string{fld1}), "", assert.True}, - {"matching folder id", es.MailFolders([]string{fID1}), "", assert.True}, + {"matching folder id", es.MailFolders([]string{fID1}), "", assert.False}, {"incomplete matching folder", es.MailFolders([]string{"mail"}), "", assert.False}, {"incomplete matching folder ID", es.MailFolders([]string{"mf_id"}), "", assert.False}, {"non-matching folder", es.MailFolders([]string{"smarf"}), "", assert.False}, {"non-matching folder substring", es.MailFolders([]string{fld1 + "_suffix"}), "", assert.False}, {"non-matching folder id substring", es.MailFolders([]string{fID1 + "_suffix"}), "", assert.False}, {"matching folder prefix", es.MailFolders([]string{fld1}, PrefixMatch()), "", assert.True}, - {"matching folder ID prefix", es.MailFolders([]string{fID1}, PrefixMatch()), "", assert.True}, + {"matching folder ID prefix", es.MailFolders([]string{fID1}, PrefixMatch()), "", assert.False}, {"incomplete folder prefix", es.MailFolders([]string{"mail"}, PrefixMatch()), "", assert.False}, {"matching folder substring", es.MailFolders([]string{"Folder"}), "", assert.False}, {"one of multiple folders", es.MailFolders([]string{"smarf", fld2}), "", assert.True}, - {"one of multiple folders by ID", es.MailFolders([]string{"smarf", fID2}), "", assert.True}, + {"one of multiple folders by ID", es.MailFolders([]string{"smarf", fID2}), "", assert.False}, {"all mail", es.Mails(Any(), Any()), "", assert.True}, {"no mail", es.Mails(Any(), None()), "", assert.False}, {"matching mail", es.Mails(Any(), []string{mail}), "", assert.True}, @@ -777,10 +777,6 @@ func (suite *ExchangeSelectorSuite) TestExchangeScope_MatchesPath() { aMatch = true break } - if matchesPathValues(scope, ExchangeMail, pvs) { - aMatch = true - break - } } test.expect(t, aMatch) }) @@ -789,16 +785,44 @@ func (suite *ExchangeSelectorSuite) TestExchangeScope_MatchesPath() { func (suite *ExchangeSelectorSuite) TestExchangeRestore_Reduce() { var ( - contact = stubRepoRef(path.ExchangeService, path.ContactsCategory, "uid", "cfld", "cid") - event = stubRepoRef(path.ExchangeService, path.EventsCategory, "uid", "ecld", "eid") - mail = stubRepoRef(path.ExchangeService, path.EmailCategory, "uid", "mfld", "mid") - contactInSubFolder = stubRepoRef(path.ExchangeService, path.ContactsCategory, "uid", "cfld1/cfld2", "cid") + contact = stubPath( + suite.T(), + "uid", + []string{"cfld", "cid"}, + path.ContactsCategory) + event = stubPath( + suite.T(), + "uid", + []string{"efld", "eid"}, + path.EventsCategory) + mail = stubPath( + suite.T(), + "uid", + []string{"mfld", "mid"}, + path.EmailCategory) + contactInSubFolder = stubPath( + suite.T(), + "uid", + []string{"cfld1/cfld2", "cid"}, + path.ContactsCategory) ) - makeDeets := func(refs ...string) *details.Details { + toRR := func(p path.Path) string { + newElems := []string{} + + for _, e := range p.Folders() { + newElems = append(newElems, e+".d") + } + + joinedFldrs := strings.Join(newElems, "/") + + return stubRepoRef(p.Service(), p.Category(), p.ResourceOwner(), joinedFldrs, p.Item()) + } + + makeDeets := func(refs ...path.Path) *details.Details { deets := &details.Details{ DetailsModel: details.DetailsModel{ - Entries: []details.DetailsEntry{}, + Entries: []details.Entry{}, }, } @@ -814,8 +838,10 @@ func (suite *ExchangeSelectorSuite) TestExchangeRestore_Reduce() { itype = details.ExchangeMail } - deets.Entries = append(deets.Entries, details.DetailsEntry{ - RepoRef: r, + deets.Entries = append(deets.Entries, details.Entry{ + RepoRef: toRR(r), + // Don't escape because we assume nice paths. + LocationRef: r.Folder(false), ItemInfo: details.ItemInfo{ Exchange: &details.ExchangeInfo{ ItemType: itype, @@ -851,7 +877,7 @@ func (suite *ExchangeSelectorSuite) TestExchangeRestore_Reduce() { er.Include(er.AllData()) return er }, - []string{contact}, + []string{toRR(contact)}, }, { "event only", @@ -861,7 +887,7 @@ func (suite *ExchangeSelectorSuite) TestExchangeRestore_Reduce() { er.Include(er.AllData()) return er }, - []string{event}, + []string{toRR(event)}, }, { "mail only", @@ -871,7 +897,7 @@ func (suite *ExchangeSelectorSuite) TestExchangeRestore_Reduce() { er.Include(er.AllData()) return er }, - []string{mail}, + []string{toRR(mail)}, }, { "all", @@ -881,7 +907,7 @@ func (suite *ExchangeSelectorSuite) TestExchangeRestore_Reduce() { er.Include(er.AllData()) return er }, - []string{contact, event, mail}, + []string{toRR(contact), toRR(event), toRR(mail)}, }, { "only match contact", @@ -891,7 +917,7 @@ func (suite *ExchangeSelectorSuite) TestExchangeRestore_Reduce() { er.Include(er.Contacts([]string{"cfld"}, []string{"cid"})) return er }, - []string{contact}, + []string{toRR(contact)}, }, { "only match contactInSubFolder", @@ -901,7 +927,7 @@ func (suite *ExchangeSelectorSuite) TestExchangeRestore_Reduce() { er.Include(er.ContactFolders([]string{"cfld1/cfld2"})) return er }, - []string{contactInSubFolder}, + []string{toRR(contactInSubFolder)}, }, { "only match contactInSubFolder by prefix", @@ -911,7 +937,7 @@ func (suite *ExchangeSelectorSuite) TestExchangeRestore_Reduce() { er.Include(er.ContactFolders([]string{"cfld1/cfld2"}, PrefixMatch())) return er }, - []string{contactInSubFolder}, + []string{toRR(contactInSubFolder)}, }, { "only match contactInSubFolder by leaf folder", @@ -921,17 +947,17 @@ func (suite *ExchangeSelectorSuite) TestExchangeRestore_Reduce() { er.Include(er.ContactFolders([]string{"cfld2"})) return er }, - []string{contactInSubFolder}, + []string{toRR(contactInSubFolder)}, }, { "only match event", makeDeets(contact, event, mail), func() *ExchangeRestore { er := NewExchangeRestore([]string{"uid"}) - er.Include(er.Events([]string{"ecld"}, []string{"eid"})) + er.Include(er.Events([]string{"efld"}, []string{"eid"})) return er }, - []string{event}, + []string{toRR(event)}, }, { "only match mail", @@ -941,7 +967,7 @@ func (suite *ExchangeSelectorSuite) TestExchangeRestore_Reduce() { er.Include(er.Mails([]string{"mfld"}, []string{"mid"})) return er }, - []string{mail}, + []string{toRR(mail)}, }, { "exclude contact", @@ -952,7 +978,7 @@ func (suite *ExchangeSelectorSuite) TestExchangeRestore_Reduce() { er.Exclude(er.Contacts([]string{"cfld"}, []string{"cid"})) return er }, - []string{event, mail}, + []string{toRR(event), toRR(mail)}, }, { "exclude event", @@ -960,10 +986,10 @@ func (suite *ExchangeSelectorSuite) TestExchangeRestore_Reduce() { func() *ExchangeRestore { er := NewExchangeRestore(Any()) er.Include(er.AllData()) - er.Exclude(er.Events([]string{"ecld"}, []string{"eid"})) + er.Exclude(er.Events([]string{"efld"}, []string{"eid"})) return er }, - []string{contact, mail}, + []string{toRR(contact), toRR(mail)}, }, { "exclude mail", @@ -974,7 +1000,7 @@ func (suite *ExchangeSelectorSuite) TestExchangeRestore_Reduce() { er.Exclude(er.Mails([]string{"mfld"}, []string{"mid"})) return er }, - []string{contact, event}, + []string{toRR(contact), toRR(event)}, }, { "filter on mail subject", @@ -991,7 +1017,7 @@ func (suite *ExchangeSelectorSuite) TestExchangeRestore_Reduce() { er.Filter(er.MailSubject("subj")) return er }, - []string{mail}, + []string{toRR(mail)}, }, { "filter on mail subject multiple input categories", @@ -1012,7 +1038,7 @@ func (suite *ExchangeSelectorSuite) TestExchangeRestore_Reduce() { er.Filter(er.MailSubject("subj")) return er }, - []string{mail}, + []string{toRR(mail)}, }, } for _, test := range table { @@ -1043,7 +1069,7 @@ func (suite *ExchangeSelectorSuite) TestExchangeRestore_Reduce_locationRef() { makeDeets := func(refs ...string) *details.Details { deets := &details.Details{ DetailsModel: details.DetailsModel{ - Entries: []details.DetailsEntry{}, + Entries: []details.Entry{}, }, } @@ -1065,7 +1091,7 @@ func (suite *ExchangeSelectorSuite) TestExchangeRestore_Reduce_locationRef() { location = mailLocation } - deets.Entries = append(deets.Entries, details.DetailsEntry{ + deets.Entries = append(deets.Entries, details.Entry{ RepoRef: r, LocationRef: location, ItemInfo: details.ItemInfo{ @@ -1319,7 +1345,7 @@ func (suite *ExchangeSelectorSuite) TestPasses() { ) short := "thisisahashofsomekind" - entry := details.DetailsEntry{ + entry := details.Entry{ ShortRef: short, ItemRef: mid, } @@ -1331,7 +1357,7 @@ func (suite *ExchangeSelectorSuite) TestPasses() { noMail = setScopesToDefault(es.Mails(Any(), None())) allMail = setScopesToDefault(es.Mails(Any(), Any())) repo = stubPath(suite.T(), "user", []string{"folder", mid}, path.EmailCategory) - ent = details.DetailsEntry{ + ent = details.Entry{ RepoRef: repo.String(), } ) @@ -1466,43 +1492,74 @@ func (suite *ExchangeSelectorSuite) TestExchangeCategory_leafCat() { func (suite *ExchangeSelectorSuite) TestExchangeCategory_PathValues() { t := suite.T() - contactPath := stubPath(t, "user", []string{"cfolder", "contactitem"}, path.ContactsCategory) - contactMap := map[categorizer][]string{ - ExchangeContactFolder: {contactPath.Folder(false)}, - ExchangeContact: {contactPath.Item(), "short"}, - } - eventPath := stubPath(t, "user", []string{"ecalendar", "eventitem"}, path.EventsCategory) - eventMap := map[categorizer][]string{ - ExchangeEventCalendar: {eventPath.Folder(false)}, - ExchangeEvent: {eventPath.Item(), "short"}, - } - mailPath := stubPath(t, "user", []string{"mfolder", "mailitem"}, path.EmailCategory) - mailMap := map[categorizer][]string{ - ExchangeMailFolder: {mailPath.Folder(false)}, - ExchangeMail: {mailPath.Item(), "short"}, - } + var ( + contactPath = stubPath(t, "u", []string{"cfolder.d", "contactitem.d"}, path.ContactsCategory) + contactLoc = stubPath(t, "u", []string{"cfolder", "contactitem"}, path.ContactsCategory) + contactMap = map[categorizer][]string{ + ExchangeContactFolder: {contactLoc.Folder(false)}, + ExchangeContact: {contactPath.Item(), "contact-short"}, + } + contactOnlyNameMap = map[categorizer][]string{ + ExchangeContactFolder: {contactLoc.Folder(false)}, + ExchangeContact: {"contact-short"}, + } + eventPath = stubPath(t, "u", []string{"ecalendar.d", "eventitem.d"}, path.EventsCategory) + eventLoc = stubPath(t, "u", []string{"ecalendar", "eventitem"}, path.EventsCategory) + eventMap = map[categorizer][]string{ + ExchangeEventCalendar: {eventLoc.Folder(false)}, + ExchangeEvent: {eventPath.Item(), "event-short"}, + } + eventOnlyNameMap = map[categorizer][]string{ + ExchangeEventCalendar: {eventLoc.Folder(false)}, + ExchangeEvent: {"event-short"}, + } + mailPath = stubPath(t, "u", []string{"mfolder.d", "mailitem.d"}, path.EmailCategory) + mailLoc = stubPath(t, "u", []string{"mfolder", "mailitem"}, path.EmailCategory) + mailMap = map[categorizer][]string{ + ExchangeMailFolder: {mailLoc.Folder(false)}, + ExchangeMail: {mailPath.Item(), "mail-short"}, + } + mailOnlyNameMap = map[categorizer][]string{ + ExchangeMailFolder: {mailLoc.Folder(false)}, + ExchangeMail: {"mail-short"}, + } + ) table := []struct { - cat exchangeCategory - path path.Path - expect map[categorizer][]string + cat exchangeCategory + path path.Path + loc path.Path + short string + expect map[categorizer][]string + expectOnlyName map[categorizer][]string }{ - {ExchangeContact, contactPath, contactMap}, - {ExchangeEvent, eventPath, eventMap}, - {ExchangeMail, mailPath, mailMap}, + {ExchangeContact, contactPath, contactLoc, "contact-short", contactMap, contactOnlyNameMap}, + {ExchangeEvent, eventPath, eventLoc, "event-short", eventMap, eventOnlyNameMap}, + {ExchangeMail, mailPath, mailLoc, "mail-short", mailMap, mailOnlyNameMap}, } for _, test := range table { suite.Run(string(test.cat), func() { t := suite.T() - ent := details.DetailsEntry{ - RepoRef: test.path.String(), - ShortRef: "short", - ItemRef: test.path.Item(), + ent := details.Entry{ + RepoRef: test.path.String(), + ShortRef: test.short, + LocationRef: test.loc.Folder(true), + ItemRef: test.path.Item(), } pvs, err := test.cat.pathValues(test.path, ent, Config{}) require.NoError(t, err) - assert.Equal(t, test.expect, pvs) + + for k := range test.expect { + assert.ElementsMatch(t, test.expect[k], pvs[k]) + } + + pvs, err = test.cat.pathValues(test.path, ent, Config{OnlyMatchItemNames: true}) + require.NoError(t, err) + + for k := range test.expectOnlyName { + assert.ElementsMatch(t, test.expectOnlyName[k], pvs[k], k) + } }) } } diff --git a/src/pkg/selectors/helpers_test.go b/src/pkg/selectors/helpers_test.go index e85aa8d86..82e68791e 100644 --- a/src/pkg/selectors/helpers_test.go +++ b/src/pkg/selectors/helpers_test.go @@ -59,7 +59,7 @@ func (mc mockCategorizer) isLeaf() bool { func (mc mockCategorizer) pathValues( repo path.Path, - ent details.DetailsEntry, + ent details.Entry, cfg Config, ) (map[categorizer][]string, error) { return map[categorizer][]string{ @@ -146,6 +146,14 @@ func stubInfoScope(match string) mockScope { return sc } +func makeStubScope(cfg Config, match []string) mockScope { + return makeScope[mockScope](leafCatStub, match, defaultItemOptions(cfg)...) +} + +func (s mockScope) Matches(cat mockCategorizer, target string) bool { + return matches(s, cat, target) +} + // --------------------------------------------------------------------------- // Stringers and Concealers // --------------------------------------------------------------------------- diff --git a/src/pkg/selectors/onedrive.go b/src/pkg/selectors/onedrive.go index bd1837feb..18fa0fca3 100644 --- a/src/pkg/selectors/onedrive.go +++ b/src/pkg/selectors/onedrive.go @@ -6,7 +6,7 @@ import ( "github.com/alcionai/clues" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/filters" @@ -223,8 +223,7 @@ func (s *oneDrive) Folders(folders []string, opts ...option) []OneDriveScope { scopes = append( scopes, - makeScope[OneDriveScope](OneDriveFolder, folders, os...), - ) + makeScope[OneDriveScope](OneDriveFolder, folders, os...)) return scopes } @@ -239,9 +238,8 @@ func (s *oneDrive) Items(folders, items []string, opts ...option) []OneDriveScop scopes = append( scopes, - makeScope[OneDriveScope](OneDriveItem, items). - set(OneDriveFolder, folders, opts...), - ) + makeScope[OneDriveScope](OneDriveItem, items, defaultItemOptions(s.Cfg)...). + set(OneDriveFolder, folders, opts...)) return scopes } @@ -391,7 +389,7 @@ func (c oneDriveCategory) isLeaf() bool { // => {odFolder: folder, odFileID: fileID} func (c oneDriveCategory) pathValues( repo path.Path, - ent details.DetailsEntry, + ent details.Entry, cfg Config, ) (map[categorizer][]string, error) { if ent.OneDrive == nil { @@ -399,7 +397,7 @@ func (c oneDriveCategory) pathValues( } // Ignore `drives//root:` for folder comparison - rFld := path.Builder{}.Append(repo.Folders()...).PopFront().PopFront().PopFront().String() + rFld := ent.OneDrive.ParentPath item := ent.ItemRef if len(item) == 0 { @@ -543,9 +541,9 @@ func (s OneDriveScope) matchesInfo(dii details.ItemInfo) bool { switch infoCat { case FileInfoCreatedAfter, FileInfoCreatedBefore: - i = common.FormatTime(info.Created) + i = dttm.Format(info.Created) case FileInfoModifiedAfter, FileInfoModifiedBefore: - i = common.FormatTime(info.Modified) + i = dttm.Format(info.Modified) } return s.Matches(infoCat, i) diff --git a/src/pkg/selectors/onedrive_test.go b/src/pkg/selectors/onedrive_test.go index 3bf953bf9..41835875b 100644 --- a/src/pkg/selectors/onedrive_test.go +++ b/src/pkg/selectors/onedrive_test.go @@ -9,7 +9,8 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" + odConsts "github.com/alcionai/corso/src/internal/connector/onedrive/consts" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/fault" @@ -163,21 +164,40 @@ func (suite *OneDriveSelectorSuite) TestToOneDriveRestore() { func (suite *OneDriveSelectorSuite) TestOneDriveRestore_Reduce() { var ( - file = stubRepoRef(path.OneDriveService, path.FilesCategory, "uid", "drive/driveID/root:/folderA/folderB", "file") - file2 = stubRepoRef(path.OneDriveService, path.FilesCategory, "uid", "drive/driveID/root:/folderA/folderC", "file2") - file3 = stubRepoRef(path.OneDriveService, path.FilesCategory, "uid", "drive/driveID/root:/folderD/folderE", "file3") + file = stubRepoRef( + path.OneDriveService, + path.FilesCategory, + "uid", + "drive/driveID/root:/folderA.d/folderB.d", + "file") + fileParent = "folderA/folderB" + file2 = stubRepoRef( + path.OneDriveService, + path.FilesCategory, + "uid", + "drive/driveID/root:/folderA.d/folderC.d", + "file2") + fileParent2 = "folderA/folderC" + file3 = stubRepoRef( + path.OneDriveService, + path.FilesCategory, + "uid", + "drive/driveID/root:/folderD.d/folderE.d", + "file3") + fileParent3 = "folderD/folderE" ) deets := &details.Details{ DetailsModel: details.DetailsModel{ - Entries: []details.DetailsEntry{ + Entries: []details.Entry{ { RepoRef: file, ItemRef: "file", ItemInfo: details.ItemInfo{ OneDrive: &details.OneDriveInfo{ - ItemType: details.OneDriveItem, - ItemName: "fileName", + ItemType: details.OneDriveItem, + ItemName: "fileName", + ParentPath: fileParent, }, }, }, @@ -186,8 +206,9 @@ func (suite *OneDriveSelectorSuite) TestOneDriveRestore_Reduce() { ItemRef: "file2", ItemInfo: details.ItemInfo{ OneDrive: &details.OneDriveInfo{ - ItemType: details.OneDriveItem, - ItemName: "fileName2", + ItemType: details.OneDriveItem, + ItemName: "fileName2", + ParentPath: fileParent2, }, }, }, @@ -196,8 +217,9 @@ func (suite *OneDriveSelectorSuite) TestOneDriveRestore_Reduce() { // item ref intentionally blank to assert fallback case ItemInfo: details.ItemInfo{ OneDrive: &details.OneDriveInfo{ - ItemType: details.OneDriveItem, - ItemName: "fileName3", + ItemType: details.OneDriveItem, + ItemName: "fileName3", + ParentPath: fileParent3, }, }, }, @@ -211,14 +233,12 @@ func (suite *OneDriveSelectorSuite) TestOneDriveRestore_Reduce() { table := []struct { name string - deets *details.Details makeSelector func() *OneDriveRestore expect []string cfg Config }{ { - name: "all", - deets: deets, + name: "all", makeSelector: func() *OneDriveRestore { odr := NewOneDriveRestore(Any()) odr.Include(odr.AllData()) @@ -227,8 +247,7 @@ func (suite *OneDriveSelectorSuite) TestOneDriveRestore_Reduce() { expect: arr(file, file2, file3), }, { - name: "only match file", - deets: deets, + name: "only match file", makeSelector: func() *OneDriveRestore { odr := NewOneDriveRestore(Any()) odr.Include(odr.Items(Any(), []string{"file2"})) @@ -237,8 +256,7 @@ func (suite *OneDriveSelectorSuite) TestOneDriveRestore_Reduce() { expect: arr(file2), }, { - name: "id doesn't match name", - deets: deets, + name: "id doesn't match name", makeSelector: func() *OneDriveRestore { odr := NewOneDriveRestore(Any()) odr.Include(odr.Items(Any(), []string{"file2"})) @@ -248,8 +266,7 @@ func (suite *OneDriveSelectorSuite) TestOneDriveRestore_Reduce() { cfg: Config{OnlyMatchItemNames: true}, }, { - name: "only match file name", - deets: deets, + name: "only match file name", makeSelector: func() *OneDriveRestore { odr := NewOneDriveRestore(Any()) odr.Include(odr.Items(Any(), []string{"fileName2"})) @@ -259,8 +276,7 @@ func (suite *OneDriveSelectorSuite) TestOneDriveRestore_Reduce() { cfg: Config{OnlyMatchItemNames: true}, }, { - name: "name doesn't match id", - deets: deets, + name: "name doesn't match id", makeSelector: func() *OneDriveRestore { odr := NewOneDriveRestore(Any()) odr.Include(odr.Items(Any(), []string{"fileName2"})) @@ -269,8 +285,7 @@ func (suite *OneDriveSelectorSuite) TestOneDriveRestore_Reduce() { expect: []string{}, }, { - name: "only match folder", - deets: deets, + name: "only match folder", makeSelector: func() *OneDriveRestore { odr := NewOneDriveRestore([]string{"uid"}) odr.Include(odr.Folders([]string{"folderA/folderB", "folderA/folderC"})) @@ -288,7 +303,7 @@ func (suite *OneDriveSelectorSuite) TestOneDriveRestore_Reduce() { sel := test.makeSelector() sel.Configure(test.cfg) - results := sel.Reduce(ctx, test.deets, fault.New(true)) + results := sel.Reduce(ctx, deets, fault.New(true)) paths := results.Paths() assert.Equal(t, test.expect, paths) }) @@ -301,11 +316,13 @@ func (suite *OneDriveSelectorSuite) TestOneDriveCategory_PathValues() { fileName := "file" fileID := fileName + "-id" shortRef := "short" - elems := []string{"drive", "driveID", "root:", "dir1", "dir2", fileID} + elems := []string{odConsts.DrivesPathDir, "driveID", odConsts.RootPathDir, "dir1.d", "dir2.d", fileID} filePath, err := path.Build("tenant", "user", path.OneDriveService, path.FilesCategory, true, elems...) require.NoError(t, err, clues.ToCore(err)) + fileLoc := path.Builder{}.Append("dir1", "dir2") + table := []struct { name string pathElems []string @@ -345,13 +362,14 @@ func (suite *OneDriveSelectorSuite) TestOneDriveCategory_PathValues() { test.pathElems...) require.NoError(t, err, clues.ToCore(err)) - ent := details.DetailsEntry{ + ent := details.Entry{ RepoRef: filePath.String(), ShortRef: shortRef, ItemRef: fileID, ItemInfo: details.ItemInfo{ OneDrive: &details.OneDriveInfo{ - ItemName: fileName, + ItemName: fileName, + ParentPath: fileLoc.String(), }, }, } @@ -389,18 +407,18 @@ func (suite *OneDriveSelectorSuite) TestOneDriveScope_MatchesInfo() { scope []OneDriveScope expect assert.BoolAssertionFunc }{ - {"file create after the epoch", ods.CreatedAfter(common.FormatTime(epoch)), assert.True}, - {"file create after now", ods.CreatedAfter(common.FormatTime(now)), assert.False}, - {"file create after later", ods.CreatedAfter(common.FormatTime(future)), assert.False}, - {"file create before future", ods.CreatedBefore(common.FormatTime(future)), assert.True}, - {"file create before now", ods.CreatedBefore(common.FormatTime(now)), assert.False}, - {"file create before epoch", ods.CreatedBefore(common.FormatTime(now)), assert.False}, - {"file modified after the epoch", ods.ModifiedAfter(common.FormatTime(epoch)), assert.True}, - {"file modified after now", ods.ModifiedAfter(common.FormatTime(now)), assert.False}, - {"file modified after later", ods.ModifiedAfter(common.FormatTime(future)), assert.False}, - {"file modified before future", ods.ModifiedBefore(common.FormatTime(future)), assert.True}, - {"file modified before now", ods.ModifiedBefore(common.FormatTime(now)), assert.False}, - {"file modified before epoch", ods.ModifiedBefore(common.FormatTime(now)), assert.False}, + {"file create after the epoch", ods.CreatedAfter(dttm.Format(epoch)), assert.True}, + {"file create after now", ods.CreatedAfter(dttm.Format(now)), assert.False}, + {"file create after later", ods.CreatedAfter(dttm.Format(future)), assert.False}, + {"file create before future", ods.CreatedBefore(dttm.Format(future)), assert.True}, + {"file create before now", ods.CreatedBefore(dttm.Format(now)), assert.False}, + {"file create before epoch", ods.CreatedBefore(dttm.Format(now)), assert.False}, + {"file modified after the epoch", ods.ModifiedAfter(dttm.Format(epoch)), assert.True}, + {"file modified after now", ods.ModifiedAfter(dttm.Format(now)), assert.False}, + {"file modified after later", ods.ModifiedAfter(dttm.Format(future)), assert.False}, + {"file modified before future", ods.ModifiedBefore(dttm.Format(future)), assert.True}, + {"file modified before now", ods.ModifiedBefore(dttm.Format(now)), assert.False}, + {"file modified before epoch", ods.ModifiedBefore(dttm.Format(now)), assert.False}, } for _, test := range table { suite.Run(test.name, func() { diff --git a/src/pkg/selectors/scopes.go b/src/pkg/selectors/scopes.go index 51519546f..f0c5fb4da 100644 --- a/src/pkg/selectors/scopes.go +++ b/src/pkg/selectors/scopes.go @@ -89,7 +89,7 @@ type ( // folderCat: folder, // itemCat: itemID, // } - pathValues(path.Path, details.DetailsEntry, Config) (map[categorizer][]string, error) + pathValues(path.Path, details.Entry, Config) (map[categorizer][]string, error) // pathKeys produces a list of categorizers that can be used as keys in the pathValues // map. The combination of the two funcs generically interprets the context of the @@ -360,7 +360,7 @@ func reduce[T scopeT, C categoryT]( filts := scopesByCategory[T](s.Filters, dataCategories, true) incls := scopesByCategory[T](s.Includes, dataCategories, false) - ents := []details.DetailsEntry{} + ents := []details.Entry{} // for each entry, compare that entry against the scopes of the same data type for _, ent := range deets.Items() { @@ -441,7 +441,7 @@ func scopesByCategory[T scopeT, C categoryT]( func passes[T scopeT, C categoryT]( cat C, pathValues map[categorizer][]string, - entry details.DetailsEntry, + entry details.Entry, excs, filts, incs []T, ) bool { // a passing match requires either a filter or an inclusion @@ -490,7 +490,7 @@ func matchesEntry[T scopeT, C categoryT]( sc T, cat C, pathValues map[categorizer][]string, - entry details.DetailsEntry, + entry details.Entry, ) bool { // InfoCategory requires matching against service-specific info values if len(getInfoCategory(sc)) > 0 { diff --git a/src/pkg/selectors/scopes_test.go b/src/pkg/selectors/scopes_test.go index ed4020f42..92af1a572 100644 --- a/src/pkg/selectors/scopes_test.go +++ b/src/pkg/selectors/scopes_test.go @@ -257,7 +257,7 @@ func (suite *SelectorScopesSuite) TestReduce() { deets := func() details.Details { return details.Details{ DetailsModel: details.DetailsModel{ - Entries: []details.DetailsEntry{ + Entries: []details.Entry{ { RepoRef: stubRepoRef( pathServiceStub, @@ -302,7 +302,7 @@ func (suite *SelectorScopesSuite) TestReduce_locationRef() { deets := func() details.Details { return details.Details{ DetailsModel: details.DetailsModel{ - Entries: []details.DetailsEntry{ + Entries: []details.Entry{ { RepoRef: stubRepoRef( pathServiceStub, @@ -361,7 +361,7 @@ func (suite *SelectorScopesSuite) TestPasses() { var ( cat = rootCatStub pth = stubPath(suite.T(), "uid", []string{"fld"}, path.EventsCategory) - entry = details.DetailsEntry{ + entry = details.Entry{ RepoRef: pth.String(), } ) @@ -460,6 +460,67 @@ func (suite *SelectorScopesSuite) TestMatchesPathValues() { } } +func (suite *SelectorScopesSuite) TestDefaultItemOptions() { + table := []struct { + name string + cfg Config + match []string + target string + expect assert.BoolAssertionFunc + }{ + { + name: "no config, matches same value", + cfg: Config{}, + match: []string{"foo"}, + target: "foo", + expect: assert.True, + }, + { + name: "no config, does not match different case", + cfg: Config{}, + match: []string{"bar"}, + target: "BAR", + expect: assert.False, + }, + { + name: "no config, does not match substring", + cfg: Config{}, + match: []string{"bar"}, + target: "ba", + expect: assert.False, + }, + { + name: "only names, matches same same value", + cfg: Config{OnlyMatchItemNames: true}, + match: []string{"fnords"}, + target: "fnords", + expect: assert.True, + }, + { + name: "only names, matches different case", + cfg: Config{OnlyMatchItemNames: true}, + match: []string{"smarf"}, + target: "SMARF", + expect: assert.True, + }, + { + name: "only names, does not match substring", + cfg: Config{OnlyMatchItemNames: true}, + match: []string{"brunhilda"}, + target: "unhild", + expect: assert.False, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + sc := makeStubScope(test.cfg, test.match) + + test.expect(t, sc.Matches(leafCatStub, test.target)) + }) + } +} + func (suite *SelectorScopesSuite) TestClean() { table := []struct { name string @@ -513,17 +574,17 @@ func (suite *SelectorScopesSuite) TestScopeConfig() { table := []struct { name string config scopeConfig - expect int + expect string }{ { name: "no configs set", config: scopeConfig{}, - expect: int(filters.EqualTo), + expect: filters.EqualTo, }, { name: "force prefix", config: scopeConfig{usePrefixFilter: true}, - expect: int(filters.TargetPrefixes), + expect: filters.TargetPrefixes, }, } for _, test := range table { @@ -531,7 +592,7 @@ func (suite *SelectorScopesSuite) TestScopeConfig() { t := suite.T() result := filterFor(test.config, input) - assert.Equal(t, test.expect, int(result.Comparator)) + assert.Equal(t, test.expect, string(result.Comparator)) }) } } diff --git a/src/pkg/selectors/selectors.go b/src/pkg/selectors/selectors.go index 02dd4427f..86a1cb56c 100644 --- a/src/pkg/selectors/selectors.go +++ b/src/pkg/selectors/selectors.go @@ -8,6 +8,7 @@ import ( "github.com/alcionai/clues" "golang.org/x/exp/maps" + "github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/filters" @@ -89,6 +90,8 @@ type pathCategorier interface { // Selector // --------------------------------------------------------------------------- +var _ idname.Provider = &Selector{} + // The core selector. Has no api for setting or retrieving data. // Is only used to pass along more specific selector instances. type Selector struct { @@ -460,11 +463,26 @@ func pathCategoriesIn[T scopeT, C categoryT](ss []scope) []path.CategoryType { // scope constructors // --------------------------------------------------------------------------- +// constructs the default item-scope comparator options according +// to the selector configuration. +// - if cfg.OnlyMatchItemNames == false, then comparison assumes item IDs, +// which are case sensitive, resulting in StrictEqualsMatch +func defaultItemOptions(cfg Config) []option { + opts := []option{} + + if !cfg.OnlyMatchItemNames { + opts = append(opts, StrictEqualMatch()) + } + + return opts +} + type scopeConfig struct { - usePathFilter bool - usePrefixFilter bool - useSuffixFilter bool - useEqualsFilter bool + usePathFilter bool + usePrefixFilter bool + useSuffixFilter bool + useEqualsFilter bool + useStrictEqualsFilter bool } type option func(*scopeConfig) @@ -493,9 +511,16 @@ func SuffixMatch() option { } } +// StrictEqualsMatch ensures the selector uses a StrictEquals comparator, instead +// of contains. Will not override a default Any() or None() comparator. +func StrictEqualMatch() option { + return func(sc *scopeConfig) { + sc.useStrictEqualsFilter = true + } +} + // ExactMatch ensures the selector uses an Equals comparator, instead -// of contains. Will not override a default Any() or None() -// comparator. +// of contains. Will not override a default Any() or None() comparator. func ExactMatch() option { return func(sc *scopeConfig) { sc.useEqualsFilter = true @@ -596,6 +621,10 @@ func filterize( return filters.Suffix(targets) } + if sc.useStrictEqualsFilter { + return filters.StrictEqual(targets) + } + if defaultFilter != nil { return defaultFilter(targets) } diff --git a/src/pkg/selectors/selectors_reduce_test.go b/src/pkg/selectors/selectors_reduce_test.go index 6229dc164..c57cde409 100644 --- a/src/pkg/selectors/selectors_reduce_test.go +++ b/src/pkg/selectors/selectors_reduce_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/backup/details/testdata" @@ -31,7 +31,7 @@ func (suite *SelectorReduceSuite) TestReduce() { table := []struct { name string selFunc func() selectors.Reducer - expected []details.DetailsEntry + expected []details.Entry }{ { name: "ExchangeAllMail", @@ -48,7 +48,7 @@ func (suite *SelectorReduceSuite) TestReduce() { selFunc: func() selectors.Reducer { sel := selectors.NewExchangeRestore(selectors.Any()) sel.Include(sel.MailFolders( - []string{testdata.ExchangeEmailInboxPath.Folder(false)}, + []string{testdata.ExchangeEmailInboxPath.FolderLocation()}, )) return sel @@ -63,7 +63,7 @@ func (suite *SelectorReduceSuite) TestReduce() { return sel }, - expected: []details.DetailsEntry{testdata.ExchangeEmailItems[0]}, + expected: []details.Entry{testdata.ExchangeEmailItems[0]}, }, { name: "ExchangeMailSubjectExcludeItem", @@ -72,12 +72,12 @@ func (suite *SelectorReduceSuite) TestReduce() { sel.Filter(sel.MailSender("a-person")) sel.Exclude(sel.Mails( selectors.Any(), - []string{testdata.ExchangeEmailItemPath2.ShortRef()}, + []string{testdata.ExchangeEmailItemPath2.RR.ShortRef()}, )) return sel }, - expected: []details.DetailsEntry{testdata.ExchangeEmailItems[0]}, + expected: []details.Entry{testdata.ExchangeEmailItems[0]}, }, { name: "ExchangeMailSender", @@ -87,7 +87,7 @@ func (suite *SelectorReduceSuite) TestReduce() { return sel }, - expected: []details.DetailsEntry{ + expected: []details.Entry{ testdata.ExchangeEmailItems[0], testdata.ExchangeEmailItems[1], }, @@ -97,12 +97,12 @@ func (suite *SelectorReduceSuite) TestReduce() { selFunc: func() selectors.Reducer { sel := selectors.NewExchangeRestore(selectors.Any()) sel.Filter(sel.MailReceivedBefore( - common.FormatTime(testdata.Time1.Add(time.Second)), + dttm.Format(testdata.Time1.Add(time.Second)), )) return sel }, - expected: []details.DetailsEntry{testdata.ExchangeEmailItems[0]}, + expected: []details.Entry{testdata.ExchangeEmailItems[0]}, }, { name: "ExchangeMailID", @@ -110,12 +110,12 @@ func (suite *SelectorReduceSuite) TestReduce() { sel := selectors.NewExchangeRestore(selectors.Any()) sel.Include(sel.Mails( selectors.Any(), - []string{testdata.ExchangeEmailItemPath1.Item()}, + []string{testdata.ExchangeEmailItemPath1.ItemLocation()}, )) return sel }, - expected: []details.DetailsEntry{testdata.ExchangeEmailItems[0]}, + expected: []details.Entry{testdata.ExchangeEmailItems[0]}, }, { name: "ExchangeMailShortRef", @@ -123,12 +123,12 @@ func (suite *SelectorReduceSuite) TestReduce() { sel := selectors.NewExchangeRestore(selectors.Any()) sel.Include(sel.Mails( selectors.Any(), - []string{testdata.ExchangeEmailItemPath1.ShortRef()}, + []string{testdata.ExchangeEmailItemPath1.RR.ShortRef()}, )) return sel }, - expected: []details.DetailsEntry{testdata.ExchangeEmailItems[0]}, + expected: []details.Entry{testdata.ExchangeEmailItems[0]}, }, { name: "ExchangeAllEventsAndMailWithSubject", @@ -142,7 +142,7 @@ func (suite *SelectorReduceSuite) TestReduce() { return sel }, - expected: []details.DetailsEntry{testdata.ExchangeEmailItems[0]}, + expected: []details.Entry{testdata.ExchangeEmailItems[0]}, }, { name: "ExchangeEventsAndMailWithSubject", @@ -153,7 +153,7 @@ func (suite *SelectorReduceSuite) TestReduce() { return sel }, - expected: []details.DetailsEntry{}, + expected: []details.Entry{}, }, { name: "ExchangeAll", @@ -166,7 +166,7 @@ func (suite *SelectorReduceSuite) TestReduce() { expected: append( append( append( - []details.DetailsEntry{}, + []details.Entry{}, testdata.ExchangeEmailItems...), testdata.ExchangeContactsItems...), testdata.ExchangeEventsItems..., @@ -177,12 +177,12 @@ func (suite *SelectorReduceSuite) TestReduce() { selFunc: func() selectors.Reducer { sel := selectors.NewExchangeRestore(selectors.Any()) sel.Include(sel.MailFolders( - []string{testdata.ExchangeEmailBasePath.Folder(false)}, + []string{testdata.ExchangeEmailBasePath.FolderLocation()}, )) return sel }, - expected: []details.DetailsEntry{testdata.ExchangeEmailItems[0]}, + expected: []details.Entry{testdata.ExchangeEmailItems[0]}, }, // TODO (keepers): all folders are treated as prefix-matches at this time. // so this test actually does nothing different. In the future, we'll @@ -192,20 +192,20 @@ func (suite *SelectorReduceSuite) TestReduce() { selFunc: func() selectors.Reducer { sel := selectors.NewExchangeRestore(selectors.Any()) sel.Include(sel.MailFolders( - []string{testdata.ExchangeEmailBasePath.Folder(false)}, + []string{testdata.ExchangeEmailBasePath.FolderLocation()}, selectors.PrefixMatch(), // force prefix matching )) return sel }, - expected: []details.DetailsEntry{testdata.ExchangeEmailItems[0]}, + expected: []details.Entry{testdata.ExchangeEmailItems[0]}, }, { name: "ExchangeMailByFolderRoot", selFunc: func() selectors.Reducer { sel := selectors.NewExchangeRestore(selectors.Any()) sel.Include(sel.MailFolders( - []string{testdata.ExchangeEmailInboxPath.Folder(false)}, + []string{testdata.ExchangeEmailInboxPath.FolderLocation()}, )) return sel @@ -217,19 +217,19 @@ func (suite *SelectorReduceSuite) TestReduce() { selFunc: func() selectors.Reducer { sel := selectors.NewExchangeRestore(selectors.Any()) sel.Include(sel.ContactFolders( - []string{testdata.ExchangeContactsBasePath.Folder(false)}, + []string{testdata.ExchangeContactsBasePath.FolderLocation()}, )) return sel }, - expected: []details.DetailsEntry{testdata.ExchangeContactsItems[0]}, + expected: []details.Entry{testdata.ExchangeContactsItems[0]}, }, { name: "ExchangeContactByFolderRoot", selFunc: func() selectors.Reducer { sel := selectors.NewExchangeRestore(selectors.Any()) sel.Include(sel.ContactFolders( - []string{testdata.ExchangeContactsRootPath.Folder(false)}, + []string{testdata.ExchangeContactsRootPath.FolderLocation()}, )) return sel @@ -242,24 +242,12 @@ func (suite *SelectorReduceSuite) TestReduce() { selFunc: func() selectors.Reducer { sel := selectors.NewExchangeRestore(selectors.Any()) sel.Include(sel.EventCalendars( - []string{testdata.ExchangeEventsBasePath.Folder(false)}, + []string{testdata.ExchangeEventsBasePath.FolderLocation()}, )) return sel }, - expected: []details.DetailsEntry{testdata.ExchangeEventsItems[0]}, - }, - { - name: "ExchangeEventsByFolderRoot", - selFunc: func() selectors.Reducer { - sel := selectors.NewExchangeRestore(selectors.Any()) - sel.Include(sel.EventCalendars( - []string{testdata.ExchangeEventsRootPath.Folder(false)}, - )) - - return sel - }, - expected: testdata.ExchangeEventsItems, + expected: []details.Entry{testdata.ExchangeEventsItems[0]}, }, } diff --git a/src/pkg/selectors/sharepoint.go b/src/pkg/selectors/sharepoint.go index defa6d206..a408f6339 100644 --- a/src/pkg/selectors/sharepoint.go +++ b/src/pkg/selectors/sharepoint.go @@ -6,7 +6,7 @@ import ( "github.com/alcionai/clues" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/filters" @@ -245,8 +245,7 @@ func (s *sharePoint) AllData() []SharePointScope { scopes, makeScope[SharePointScope](SharePointLibraryFolder, Any()), makeScope[SharePointScope](SharePointList, Any()), - makeScope[SharePointScope](SharePointPageFolder, Any()), - ) + makeScope[SharePointScope](SharePointPageFolder, Any())) return scopes } @@ -276,9 +275,8 @@ func (s *sharePoint) ListItems(lists, items []string, opts ...option) []SharePoi scopes = append( scopes, - makeScope[SharePointScope](SharePointListItem, items). - set(SharePointList, lists, opts...), - ) + makeScope[SharePointScope](SharePointListItem, items, defaultItemOptions(s.Cfg)...). + set(SharePointList, lists, opts...)) return scopes } @@ -312,8 +310,7 @@ func (s *sharePoint) LibraryFolders(libraryFolders []string, opts ...option) []S scopes = append( scopes, - makeScope[SharePointScope](SharePointLibraryFolder, libraryFolders, os...), - ) + makeScope[SharePointScope](SharePointLibraryFolder, libraryFolders, os...)) return scopes } @@ -328,9 +325,8 @@ func (s *sharePoint) LibraryItems(libraries, items []string, opts ...option) []S scopes = append( scopes, - makeScope[SharePointScope](SharePointLibraryItem, items). - set(SharePointLibraryFolder, libraries, opts...), - ) + makeScope[SharePointScope](SharePointLibraryItem, items, defaultItemOptions(s.Cfg)...). + set(SharePointLibraryFolder, libraries, opts...)) return scopes } @@ -361,8 +357,7 @@ func (s *sharePoint) PageItems(pages, items []string, opts ...option) []SharePoi scopes = append( scopes, makeScope[SharePointScope](SharePointPage, items). - set(SharePointPageFolder, pages, opts...), - ) + set(SharePointPageFolder, pages, opts...)) return scopes } @@ -516,13 +511,13 @@ func (c sharePointCategory) isLeaf() bool { // => {spFolder: folder, spItemID: itemID} func (c sharePointCategory) pathValues( repo path.Path, - ent details.DetailsEntry, + ent details.Entry, cfg Config, ) (map[categorizer][]string, error) { var ( - folderCat, itemCat categorizer - dropDriveFolderPrefix bool - itemID string + folderCat, itemCat categorizer + itemID string + rFld string ) switch c { @@ -531,25 +526,21 @@ func (c sharePointCategory) pathValues( return nil, clues.New("no SharePoint ItemInfo in details") } - dropDriveFolderPrefix = true folderCat, itemCat = SharePointLibraryFolder, SharePointLibraryItem + rFld = ent.SharePoint.ParentPath case SharePointList, SharePointListItem: folderCat, itemCat = SharePointList, SharePointListItem + rFld = ent.LocationRef case SharePointPage, SharePointPageFolder: folderCat, itemCat = SharePointPageFolder, SharePointPage + rFld = ent.LocationRef default: return nil, clues.New("unrecognized sharePointCategory").With("category", c) } - rFld := repo.Folder(false) - if dropDriveFolderPrefix { - // like onedrive, ignore `drives//root:` for library folder comparison - rFld = path.Builder{}.Append(repo.Folders()...).PopFront().PopFront().PopFront().String() - } - item := ent.ItemRef if len(item) == 0 { item = repo.Item() @@ -568,10 +559,6 @@ func (c sharePointCategory) pathValues( result[itemCat] = append(result[itemCat], itemID) } - if len(ent.LocationRef) > 0 { - result[folderCat] = append(result[folderCat], ent.LocationRef) - } - return result, nil } @@ -711,9 +698,9 @@ func (s SharePointScope) matchesInfo(dii details.ItemInfo) bool { case SharePointWebURL: i = info.WebURL case SharePointInfoCreatedAfter, SharePointInfoCreatedBefore: - i = common.FormatTime(info.Created) + i = dttm.Format(info.Created) case SharePointInfoModifiedAfter, SharePointInfoModifiedBefore: - i = common.FormatTime(info.Modified) + i = dttm.Format(info.Modified) case SharePointInfoLibraryDrive: ds := []string{} diff --git a/src/pkg/selectors/sharepoint_test.go b/src/pkg/selectors/sharepoint_test.go index e606ff5e2..2b8f3edf4 100644 --- a/src/pkg/selectors/sharepoint_test.go +++ b/src/pkg/selectors/sharepoint_test.go @@ -1,6 +1,7 @@ package selectors import ( + "strings" "testing" "time" @@ -8,8 +9,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "golang.org/x/exp/slices" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" + odConsts "github.com/alcionai/corso/src/internal/connector/onedrive/consts" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/fault" @@ -204,67 +207,111 @@ func (suite *SharePointSelectorSuite) TestToSharePointRestore() { } func (suite *SharePointSelectorSuite) TestSharePointRestore_Reduce() { + toRR := func(cat path.CategoryType, siteID string, folders []string, item string) string { + folderElems := make([]string, 0, len(folders)) + + for _, f := range folders { + folderElems = append(folderElems, f+".d") + } + + return stubRepoRef( + path.SharePointService, + cat, + siteID, + strings.Join(folderElems, "/"), + item) + } + var ( - drivePfx = "drive/drive!id/root:/" - pairAC = "folderA/folderC" - pairGH = "folderG/folderH" - item = stubRepoRef(path.SharePointService, path.LibrariesCategory, "sid", drivePfx+"folderA/folderB", "item") - item2 = stubRepoRef(path.SharePointService, path.LibrariesCategory, "sid", drivePfx+pairAC, "item2") - item3 = stubRepoRef(path.SharePointService, path.LibrariesCategory, "sid", drivePfx+"folderD/folderE", "item3") - item4 = stubRepoRef(path.SharePointService, path.PagesCategory, "sid", pairGH, "item4") - item5 = stubRepoRef(path.SharePointService, path.PagesCategory, "sid", pairGH, "item5") + prefixElems = []string{ + odConsts.DrivesPathDir, + "drive!id", + odConsts.RootPathDir, + } + itemElems1 = []string{"folderA", "folderB"} + itemElems2 = []string{"folderA", "folderC"} + itemElems3 = []string{"folderD", "folderE"} + pairAC = "folderA/folderC" + pairGH = "folderG/folderH" + item = toRR( + path.LibrariesCategory, + "sid", + append(slices.Clone(prefixElems), itemElems1...), + "item") + item2 = toRR( + path.LibrariesCategory, + "sid", + append(slices.Clone(prefixElems), itemElems2...), + "item2") + item3 = toRR( + path.LibrariesCategory, + "sid", + append(slices.Clone(prefixElems), itemElems3...), + "item3") + item4 = stubRepoRef(path.SharePointService, path.PagesCategory, "sid", pairGH, "item4") + item5 = stubRepoRef(path.SharePointService, path.PagesCategory, "sid", pairGH, "item5") ) deets := &details.Details{ DetailsModel: details.DetailsModel{ - Entries: []details.DetailsEntry{ + Entries: []details.Entry{ { - RepoRef: item, - ItemRef: "item", + RepoRef: item, + ItemRef: "item", + LocationRef: strings.Join(append([]string{odConsts.RootPathDir}, itemElems1...), "/"), ItemInfo: details.ItemInfo{ SharePoint: &details.SharePointInfo{ - ItemType: details.SharePointLibrary, - ItemName: "itemName", + ItemType: details.SharePointLibrary, + ItemName: "itemName", + ParentPath: strings.Join(itemElems1, "/"), }, }, }, { - RepoRef: item2, + RepoRef: item2, + LocationRef: strings.Join(append([]string{odConsts.RootPathDir}, itemElems2...), "/"), // ItemRef intentionally blank to test fallback case ItemInfo: details.ItemInfo{ SharePoint: &details.SharePointInfo{ - ItemType: details.SharePointLibrary, - ItemName: "itemName2", + ItemType: details.SharePointLibrary, + ItemName: "itemName2", + ParentPath: strings.Join(itemElems2, "/"), }, }, }, { - RepoRef: item3, - ItemRef: "item3", + RepoRef: item3, + ItemRef: "item3", + LocationRef: strings.Join(append([]string{odConsts.RootPathDir}, itemElems3...), "/"), ItemInfo: details.ItemInfo{ SharePoint: &details.SharePointInfo{ - ItemType: details.SharePointLibrary, - ItemName: "itemName3", + ItemType: details.SharePointLibrary, + ItemName: "itemName3", + ParentPath: strings.Join(itemElems3, "/"), }, }, }, { - RepoRef: item4, - ItemRef: "item4", + RepoRef: item4, + LocationRef: pairGH, + ItemRef: "item4", ItemInfo: details.ItemInfo{ SharePoint: &details.SharePointInfo{ - ItemType: details.SharePointPage, - ItemName: "itemName4", + ItemType: details.SharePointPage, + ItemName: "itemName4", + ParentPath: pairGH, }, }, }, { - RepoRef: item5, + RepoRef: item5, + LocationRef: pairGH, // ItemRef intentionally blank to test fallback case ItemInfo: details.ItemInfo{ SharePoint: &details.SharePointInfo{ - ItemType: details.SharePointPage, - ItemName: "itemName5", + ItemType: details.SharePointPage, + ItemName: "itemName5", + ParentPath: pairGH, }, }, }, @@ -278,14 +325,12 @@ func (suite *SharePointSelectorSuite) TestSharePointRestore_Reduce() { table := []struct { name string - deets *details.Details makeSelector func() *SharePointRestore expect []string cfg Config }{ { - name: "all", - deets: deets, + name: "all", makeSelector: func() *SharePointRestore { odr := NewSharePointRestore(Any()) odr.Include(odr.AllData()) @@ -294,8 +339,7 @@ func (suite *SharePointSelectorSuite) TestSharePointRestore_Reduce() { expect: arr(item, item2, item3, item4, item5), }, { - name: "only match item", - deets: deets, + name: "only match item", makeSelector: func() *SharePointRestore { odr := NewSharePointRestore(Any()) odr.Include(odr.LibraryItems(Any(), []string{"item2"})) @@ -304,8 +348,7 @@ func (suite *SharePointSelectorSuite) TestSharePointRestore_Reduce() { expect: arr(item2), }, { - name: "id doesn't match name", - deets: deets, + name: "id doesn't match name", makeSelector: func() *SharePointRestore { odr := NewSharePointRestore(Any()) odr.Include(odr.LibraryItems(Any(), []string{"item2"})) @@ -315,8 +358,7 @@ func (suite *SharePointSelectorSuite) TestSharePointRestore_Reduce() { cfg: Config{OnlyMatchItemNames: true}, }, { - name: "only match item name", - deets: deets, + name: "only match item name", makeSelector: func() *SharePointRestore { odr := NewSharePointRestore(Any()) odr.Include(odr.LibraryItems(Any(), []string{"itemName2"})) @@ -326,8 +368,7 @@ func (suite *SharePointSelectorSuite) TestSharePointRestore_Reduce() { cfg: Config{OnlyMatchItemNames: true}, }, { - name: "name doesn't match", - deets: deets, + name: "name doesn't match", makeSelector: func() *SharePointRestore { odr := NewSharePointRestore(Any()) odr.Include(odr.LibraryItems(Any(), []string{"itemName2"})) @@ -336,8 +377,7 @@ func (suite *SharePointSelectorSuite) TestSharePointRestore_Reduce() { expect: []string{}, }, { - name: "only match folder", - deets: deets, + name: "only match folder", makeSelector: func() *SharePointRestore { odr := NewSharePointRestore([]string{"sid"}) odr.Include(odr.LibraryFolders([]string{"folderA/folderB", pairAC})) @@ -346,8 +386,7 @@ func (suite *SharePointSelectorSuite) TestSharePointRestore_Reduce() { expect: arr(item, item2), }, { - name: "pages match folder", - deets: deets, + name: "pages match folder", makeSelector: func() *SharePointRestore { odr := NewSharePointRestore([]string{"sid"}) odr.Include(odr.Pages([]string{pairGH, pairAC})) @@ -365,7 +404,7 @@ func (suite *SharePointSelectorSuite) TestSharePointRestore_Reduce() { sel := test.makeSelector() sel.Configure(test.cfg) - results := sel.Reduce(ctx, test.deets, fault.New(true)) + results := sel.Reduce(ctx, deets, fault.New(true)) paths := results.Paths() assert.Equal(t, test.expect, paths) }) @@ -377,21 +416,32 @@ func (suite *SharePointSelectorSuite) TestSharePointCategory_PathValues() { itemName = "item" itemID = "item-id" shortRef = "short" - driveElems = []string{"drive", "drive!id", "root:", "dir1", "dir2", itemID} - elems = []string{"dir1", "dir2", itemID} + driveElems = []string{ + odConsts.DrivesPathDir, + "drive!id", + odConsts.RootPathDir + ".d", + "dir1.d", + "dir2.d", + itemID, + } + elems = []string{"dir1", "dir2", itemID} ) table := []struct { - name string - sc sharePointCategory - pathElems []string - expected map[categorizer][]string - cfg Config + name string + sc sharePointCategory + pathElems []string + locRef string + parentPath string + expected map[categorizer][]string + cfg Config }{ { - name: "SharePoint Libraries", - sc: SharePointLibraryItem, - pathElems: driveElems, + name: "SharePoint Libraries", + sc: SharePointLibraryItem, + pathElems: driveElems, + locRef: "root:/dir1/dir2", + parentPath: "dir1/dir2", expected: map[categorizer][]string{ SharePointLibraryFolder: {"dir1/dir2"}, SharePointLibraryItem: {itemID, shortRef}, @@ -399,9 +449,11 @@ func (suite *SharePointSelectorSuite) TestSharePointCategory_PathValues() { cfg: Config{}, }, { - name: "SharePoint Libraries w/ name", - sc: SharePointLibraryItem, - pathElems: driveElems, + name: "SharePoint Libraries w/ name", + sc: SharePointLibraryItem, + pathElems: driveElems, + locRef: "root:/dir1/dir2", + parentPath: "dir1/dir2", expected: map[categorizer][]string{ SharePointLibraryFolder: {"dir1/dir2"}, SharePointLibraryItem: {itemName, shortRef}, @@ -412,6 +464,7 @@ func (suite *SharePointSelectorSuite) TestSharePointCategory_PathValues() { name: "SharePoint Lists", sc: SharePointListItem, pathElems: elems, + locRef: "dir1/dir2", expected: map[categorizer][]string{ SharePointList: {"dir1/dir2"}, SharePointListItem: {itemID, shortRef}, @@ -433,13 +486,15 @@ func (suite *SharePointSelectorSuite) TestSharePointCategory_PathValues() { test.pathElems...) require.NoError(t, err, clues.ToCore(err)) - ent := details.DetailsEntry{ - RepoRef: itemPath.String(), - ShortRef: shortRef, - ItemRef: itemPath.Item(), + ent := details.Entry{ + RepoRef: itemPath.String(), + ShortRef: shortRef, + ItemRef: itemPath.Item(), + LocationRef: test.locRef, ItemInfo: details.ItemInfo{ SharePoint: &details.SharePointInfo{ - ItemName: itemName, + ItemName: itemName, + ParentPath: test.parentPath, }, }, } @@ -477,19 +532,19 @@ func (suite *SharePointSelectorSuite) TestSharePointScope_MatchesInfo() { {"host does not contain substring", host, sel.WebURL([]string{"website"}), assert.False}, {"url does not suffix substring", url, sel.WebURL([]string{"oo"}, SuffixMatch()), assert.False}, {"host mismatch", host, sel.WebURL([]string{"www.google.com"}), assert.False}, - {"file create after the epoch", host, sel.CreatedAfter(common.FormatTime(epoch)), assert.True}, - {"file create after now", host, sel.CreatedAfter(common.FormatTime(now)), assert.False}, - {"file create after later", url, sel.CreatedAfter(common.FormatTime(future)), assert.False}, - {"file create before future", host, sel.CreatedBefore(common.FormatTime(future)), assert.True}, - {"file create before now", host, sel.CreatedBefore(common.FormatTime(now)), assert.False}, - {"file create before modification", host, sel.CreatedBefore(common.FormatTime(modification)), assert.True}, - {"file create before epoch", host, sel.CreatedBefore(common.FormatTime(now)), assert.False}, - {"file modified after the epoch", host, sel.ModifiedAfter(common.FormatTime(epoch)), assert.True}, - {"file modified after now", host, sel.ModifiedAfter(common.FormatTime(now)), assert.True}, - {"file modified after later", host, sel.ModifiedAfter(common.FormatTime(future)), assert.False}, - {"file modified before future", host, sel.ModifiedBefore(common.FormatTime(future)), assert.True}, - {"file modified before now", host, sel.ModifiedBefore(common.FormatTime(now)), assert.False}, - {"file modified before epoch", host, sel.ModifiedBefore(common.FormatTime(now)), assert.False}, + {"file create after the epoch", host, sel.CreatedAfter(dttm.Format(epoch)), assert.True}, + {"file create after now", host, sel.CreatedAfter(dttm.Format(now)), assert.False}, + {"file create after later", url, sel.CreatedAfter(dttm.Format(future)), assert.False}, + {"file create before future", host, sel.CreatedBefore(dttm.Format(future)), assert.True}, + {"file create before now", host, sel.CreatedBefore(dttm.Format(now)), assert.False}, + {"file create before modification", host, sel.CreatedBefore(dttm.Format(modification)), assert.True}, + {"file create before epoch", host, sel.CreatedBefore(dttm.Format(now)), assert.False}, + {"file modified after the epoch", host, sel.ModifiedAfter(dttm.Format(epoch)), assert.True}, + {"file modified after now", host, sel.ModifiedAfter(dttm.Format(now)), assert.True}, + {"file modified after later", host, sel.ModifiedAfter(dttm.Format(future)), assert.False}, + {"file modified before future", host, sel.ModifiedBefore(dttm.Format(future)), assert.True}, + {"file modified before now", host, sel.ModifiedBefore(dttm.Format(now)), assert.False}, + {"file modified before epoch", host, sel.ModifiedBefore(dttm.Format(now)), assert.False}, {"in library", host, sel.Library("included-library"), assert.True}, {"not in library", host, sel.Library("not-included-library"), assert.False}, {"library id", host, sel.Library("1234"), assert.True}, diff --git a/src/pkg/selectors/testdata/onedrive.go b/src/pkg/selectors/testdata/onedrive.go new file mode 100644 index 000000000..8592d3d80 --- /dev/null +++ b/src/pkg/selectors/testdata/onedrive.go @@ -0,0 +1,9 @@ +package testdata + +import "github.com/alcionai/corso/src/pkg/selectors" + +// OneDriveBackupFolderScope is the standard folder scope that should be used +// in integration backups with onedrive. +func OneDriveBackupFolderScope(sel *selectors.OneDriveBackup) []selectors.OneDriveScope { + return sel.Folders([]string{"test"}, selectors.PrefixMatch()) +} diff --git a/src/pkg/services/m365/api/api.go b/src/pkg/services/m365/api/api.go index 37b9b48a7..1500840fe 100644 --- a/src/pkg/services/m365/api/api.go +++ b/src/pkg/services/m365/api/api.go @@ -1,7 +1,10 @@ package api import ( + "context" + "github.com/alcionai/clues" + "github.com/microsoft/kiota-abstractions-go/serialization" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/pkg/account" @@ -19,37 +22,87 @@ import ( type Client struct { Credentials account.M365Config - // The stable service is re-usable for any non-paged request. + // The Stable service is re-usable for any non-paged request. // This allows us to maintain performance across async requests. - stable graph.Servicer + Stable graph.Servicer + + // The LargeItem graph servicer is configured specifically for + // downloading large items such as drive item content or outlook + // mail and event attachments. + LargeItem graph.Servicer } -// NewClient produces a new api client. Must be used in +// NewClient produces a new exchange api client. Must be used in // place of creating an ad-hoc client struct. func NewClient(creds account.M365Config) (Client, error) { - s, err := newService(creds) + s, err := NewService(creds) if err != nil { return Client{}, err } - return Client{creds, s}, nil -} - -// service generates a new service. Used for paged and other long-running -// requests instead of the client's stable service, so that in-flight state -// within the adapter doesn't get clobbered -func (c Client) Service() (*graph.Service, error) { - return newService(c.Credentials) -} - -func newService(creds account.M365Config) (*graph.Service, error) { - adapter, err := graph.CreateAdapter( - creds.AzureTenantID, - creds.AzureClientID, - creds.AzureClientSecret) + li, err := newLargeItemService(creds) if err != nil { - return nil, clues.Wrap(err, "generating graph api service client") + return Client{}, err } - return graph.NewService(adapter), nil + return Client{creds, s, li}, nil } + +// Service generates a new graph servicer. New servicers are used for paged +// and other long-running requests instead of the client's stable service, +// so that in-flight state within the adapter doesn't get clobbered. +// Most calls should use the Client.Stable property instead of calling this +// func, unless it is explicitly necessary. +func (c Client) Service() (graph.Servicer, error) { + return NewService(c.Credentials) +} + +func NewService(creds account.M365Config, opts ...graph.Option) (*graph.Service, error) { + a, err := graph.CreateAdapter( + creds.AzureTenantID, + creds.AzureClientID, + creds.AzureClientSecret, + opts...) + if err != nil { + return nil, clues.Wrap(err, "generating graph api adapter") + } + + return graph.NewService(a), nil +} + +func newLargeItemService(creds account.M365Config) (*graph.Service, error) { + a, err := NewService(creds, graph.NoTimeout()) + if err != nil { + return nil, clues.Wrap(err, "generating no-timeout graph adapter") + } + + return a, nil +} + +// --------------------------------------------------------------------------- +// common types and consts +// --------------------------------------------------------------------------- + +// DeltaUpdate holds the results of a current delta token. It normally +// gets produced when aggregating the addition and removal of items in +// a delta-queryable folder. +type DeltaUpdate struct { + // the deltaLink itself + URL string + // true if the old delta was marked as invalid + Reset bool +} + +// GraphQuery represents functions which perform exchange-specific queries +// into M365 backstore. Responses -> returned items will only contain the information +// that is included in the options +// TODO: use selector or path for granularity into specific folders or specific date ranges +type GraphQuery func(ctx context.Context, userID string) (serialization.Parsable, error) + +// GraphRetrievalFunctions are functions from the Microsoft Graph API that retrieve +// the default associated data of a M365 object. This varies by object. Additional +// Queries must be run to obtain the omitted fields. +type GraphRetrievalFunc func( + ctx context.Context, + user, m365ID string, +) (serialization.Parsable, error) diff --git a/src/internal/connector/exchange/api/api_test.go b/src/pkg/services/m365/api/api_test.go similarity index 100% rename from src/internal/connector/exchange/api/api_test.go rename to src/pkg/services/m365/api/api_test.go diff --git a/src/internal/connector/exchange/api/contacts.go b/src/pkg/services/m365/api/contacts.go similarity index 60% rename from src/internal/connector/exchange/api/contacts.go rename to src/pkg/services/m365/api/contacts.go index 78d6d7366..2f5395b37 100644 --- a/src/internal/connector/exchange/api/contacts.go +++ b/src/pkg/services/m365/api/contacts.go @@ -3,7 +3,6 @@ package api import ( "context" "fmt" - "os" "github.com/alcionai/clues" "github.com/microsoft/kiota-abstractions-go/serialization" @@ -16,7 +15,6 @@ import ( "github.com/alcionai/corso/src/internal/connector/graph/api" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/fault" - "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/selectors" ) @@ -47,7 +45,7 @@ func (c Contacts) CreateContactFolder( temp := folderName requestBody.SetDisplayName(&temp) - mdl, err := c.Stable.Client().UsersById(user).ContactFolders().Post(ctx, requestBody, nil) + mdl, err := c.Stable.Client().Users().ByUserId(user).ContactFolders().Post(ctx, requestBody, nil) if err != nil { return nil, graph.Wrap(ctx, err, "creating contact folder") } @@ -67,7 +65,7 @@ func (c Contacts) DeleteContainer( return graph.Stack(ctx, err) } - err = srv.Client().UsersById(user).ContactFoldersById(folderID).Delete(ctx, nil) + err = srv.Client().Users().ByUserId(user).ContactFolders().ByContactFolderId(folderID).Delete(ctx, nil) if err != nil { return graph.Stack(ctx, err) } @@ -86,7 +84,7 @@ func (c Contacts) GetItem( Headers: buildPreferHeaders(false, immutableIDs), } - cont, err := c.Stable.Client().UsersById(user).ContactsById(itemID).Get(ctx, options) + cont, err := c.Stable.Client().Users().ByUserId(user).Contacts().ByContactId(itemID).Get(ctx, options) if err != nil { return nil, nil, graph.Stack(ctx, err) } @@ -103,7 +101,7 @@ func (c Contacts) GetContainerByID( return nil, graph.Wrap(ctx, err, "setting contact folder options") } - resp, err := c.Stable.Client().UsersById(userID).ContactFoldersById(dirID).Get(ctx, ofcf) + resp, err := c.Stable.Client().Users().ByUserId(userID).ContactFolders().ByContactFolderId(dirID).Get(ctx, ofcf) if err != nil { return nil, graph.Stack(ctx, err) } @@ -122,7 +120,7 @@ func (c Contacts) EnumerateContainers( fn func(graph.CacheFolder) error, errs *fault.Bus, ) error { - service, err := c.service() + service, err := c.Service() if err != nil { return graph.Stack(ctx, err) } @@ -136,8 +134,10 @@ func (c Contacts) EnumerateContainers( el := errs.Local() builder := service.Client(). - UsersById(userID). - ContactFoldersById(baseDirID). + Users(). + ByUserId(userID). + ContactFolders(). + ByContactFolderId(baseDirID). ChildFolders() for { @@ -191,8 +191,37 @@ var _ itemPager = &contactPager{} type contactPager struct { gs graph.Servicer - builder *users.ItemContactFoldersItemContactsDeltaRequestBuilder - options *users.ItemContactFoldersItemContactsDeltaRequestBuilderGetRequestConfiguration + builder *users.ItemContactFoldersItemContactsRequestBuilder + options *users.ItemContactFoldersItemContactsRequestBuilderGetRequestConfiguration +} + +func NewContactPager( + ctx context.Context, + gs graph.Servicer, + user, directoryID string, + immutableIDs bool, +) (itemPager, error) { + selecting, err := buildOptions([]string{"parentFolderId"}, fieldsForContacts) + if err != nil { + return nil, err + } + + requestParameters := &users.ItemContactFoldersItemContactsRequestBuilderGetQueryParameters{ + Select: selecting, + } + + options := &users.ItemContactFoldersItemContactsRequestBuilderGetRequestConfiguration{ + QueryParameters: requestParameters, + Headers: buildPreferHeaders(true, immutableIDs), + } + + if err != nil { + return &contactPager{}, err + } + + builder := gs.Client().Users().ByUserId(user).ContactFolders().ByContactFolderId(directoryID).Contacts() + + return &contactPager{gs, builder, options}, nil } func (p *contactPager) getPage(ctx context.Context) (api.DeltaPageLinker, error) { @@ -201,14 +230,97 @@ func (p *contactPager) getPage(ctx context.Context) (api.DeltaPageLinker, error) return nil, graph.Stack(ctx, err) } - return resp, nil + return api.EmptyDeltaLinker[models.Contactable]{PageLinkValuer: resp}, nil } func (p *contactPager) setNext(nextLink string) { + p.builder = users.NewItemContactFoldersItemContactsRequestBuilder(nextLink, p.gs.Adapter()) +} + +// non delta pagers don't need reset +func (p *contactPager) reset(context.Context) {} + +func (p *contactPager) valuesIn(pl api.PageLinker) ([]getIDAndAddtler, error) { + return toValues[models.Contactable](pl) +} + +// --------------------------------------------------------------------------- +// delta item pager +// --------------------------------------------------------------------------- + +var _ itemPager = &contactDeltaPager{} + +type contactDeltaPager struct { + gs graph.Servicer + user string + directoryID string + builder *users.ItemContactFoldersItemContactsDeltaRequestBuilder + options *users.ItemContactFoldersItemContactsDeltaRequestBuilderGetRequestConfiguration +} + +func getContactDeltaBuilder( + ctx context.Context, + gs graph.Servicer, + user string, + directoryID string, + options *users.ItemContactFoldersItemContactsDeltaRequestBuilderGetRequestConfiguration, +) *users.ItemContactFoldersItemContactsDeltaRequestBuilder { + builder := gs.Client().Users().ByUserId(user).ContactFolders().ByContactFolderId(directoryID).Contacts().Delta() + return builder +} + +func NewContactDeltaPager( + ctx context.Context, + gs graph.Servicer, + user, directoryID, deltaURL string, + immutableIDs bool, +) (itemPager, error) { + selecting, err := buildOptions([]string{"parentFolderId"}, fieldsForContacts) + if err != nil { + return nil, err + } + + requestParameters := &users.ItemContactFoldersItemContactsDeltaRequestBuilderGetQueryParameters{ + Select: selecting, + } + + options := &users.ItemContactFoldersItemContactsDeltaRequestBuilderGetRequestConfiguration{ + QueryParameters: requestParameters, + Headers: buildPreferHeaders(true, immutableIDs), + } + + if err != nil { + return &contactDeltaPager{}, err + } + + var builder *users.ItemContactFoldersItemContactsDeltaRequestBuilder + if deltaURL != "" { + builder = users.NewItemContactFoldersItemContactsDeltaRequestBuilder(deltaURL, gs.Adapter()) + } else { + builder = getContactDeltaBuilder(ctx, gs, user, directoryID, options) + } + + return &contactDeltaPager{gs, user, directoryID, builder, options}, nil +} + +func (p *contactDeltaPager) getPage(ctx context.Context) (api.DeltaPageLinker, error) { + resp, err := p.builder.Get(ctx, p.options) + if err != nil { + return nil, graph.Stack(ctx, err) + } + + return resp, nil +} + +func (p *contactDeltaPager) setNext(nextLink string) { p.builder = users.NewItemContactFoldersItemContactsDeltaRequestBuilder(nextLink, p.gs.Adapter()) } -func (p *contactPager) valuesIn(pl api.DeltaPageLinker) ([]getIDAndAddtler, error) { +func (p *contactDeltaPager) reset(ctx context.Context) { + p.builder = getContactDeltaBuilder(ctx, p.gs, p.user, p.directoryID, p.options) +} + +func (p *contactDeltaPager) valuesIn(pl api.PageLinker) ([]getIDAndAddtler, error) { return toValues[models.Contactable](pl) } @@ -216,69 +328,29 @@ func (c Contacts) GetAddedAndRemovedItemIDs( ctx context.Context, user, directoryID, oldDelta string, immutableIDs bool, + canMakeDeltaQueries bool, ) ([]string, []string, DeltaUpdate, error) { - service, err := c.service() + service, err := c.Service() if err != nil { return nil, nil, DeltaUpdate{}, graph.Stack(ctx, err) } - var resetDelta bool - ctx = clues.Add( ctx, "category", selectors.ExchangeContact, "container_id", directoryID) - options, err := optionsForContactFoldersItemDelta( - []string{"parentFolderId"}, - immutableIDs) + pager, err := NewContactPager(ctx, service, user, directoryID, immutableIDs) if err != nil { - return nil, - nil, - DeltaUpdate{}, - graph.Wrap(ctx, err, "setting contact folder options") + return nil, nil, DeltaUpdate{}, graph.Wrap(ctx, err, "creating non-delta pager") } - if len(oldDelta) > 0 { - var ( - builder = users.NewItemContactFoldersItemContactsDeltaRequestBuilder(oldDelta, service.Adapter()) - pgr = &contactPager{service, builder, options} - ) - - added, removed, deltaURL, err := getItemsAddedAndRemovedFromContainer(ctx, pgr) - // note: happy path, not the error condition - if err == nil { - return added, removed, DeltaUpdate{deltaURL, false}, err - } - - // only return on error if it is NOT a delta issue. - // on bad deltas we retry the call with the regular builder - if !graph.IsErrInvalidDelta(err) { - return nil, nil, DeltaUpdate{}, graph.Stack(ctx, err) - } - - resetDelta = true - } - - builder := service.Client().UsersById(user).ContactFoldersById(directoryID).Contacts().Delta() - pgr := &contactPager{service, builder, options} - - if len(os.Getenv("CORSO_URL_LOGGING")) > 0 { - gri, err := builder.ToGetRequestInformation(ctx, options) - if err != nil { - logger.CtxErr(ctx, err).Error("getting builder info") - } else { - logger.Ctx(ctx). - Infow("builder path-parameters", "path_parameters", gri.PathParameters) - } - } - - added, removed, deltaURL, err := getItemsAddedAndRemovedFromContainer(ctx, pgr) + deltaPager, err := NewContactDeltaPager(ctx, service, user, directoryID, oldDelta, immutableIDs) if err != nil { - return nil, nil, DeltaUpdate{}, err + return nil, nil, DeltaUpdate{}, graph.Wrap(ctx, err, "creating delta pager") } - return added, removed, DeltaUpdate{deltaURL, resetDelta}, nil + return getAddedAndRemovedItemIDs(ctx, service, pager, deltaPager, oldDelta, canMakeDeltaQueries) } // --------------------------------------------------------------------------- diff --git a/src/internal/connector/exchange/api/contacts_test.go b/src/pkg/services/m365/api/contacts_test.go similarity index 100% rename from src/internal/connector/exchange/api/contacts_test.go rename to src/pkg/services/m365/api/contacts_test.go diff --git a/src/internal/connector/onedrive/api/drive.go b/src/pkg/services/m365/api/drive.go similarity index 60% rename from src/internal/connector/onedrive/api/drive.go rename to src/pkg/services/m365/api/drive.go index f72cdf10f..33c357033 100644 --- a/src/internal/connector/onedrive/api/drive.go +++ b/src/pkg/services/m365/api/drive.go @@ -16,6 +16,7 @@ import ( "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/graph/api" + onedrive "github.com/alcionai/corso/src/internal/connector/onedrive/consts" "github.com/alcionai/corso/src/pkg/logger" ) @@ -34,8 +35,8 @@ const pageSize = int32(999) type driveItemPager struct { gs graph.Servicer driveID string - builder *drives.ItemRootDeltaRequestBuilder - options *drives.ItemRootDeltaRequestBuilderGetRequestConfiguration + builder *drives.ItemItemsItemDeltaRequestBuilder + options *drives.ItemItemsItemDeltaRequestBuilderGetRequestConfiguration } func NewItemPager( @@ -54,9 +55,9 @@ func NewItemPager( } headers.Add("Prefer", strings.Join(preferHeaderItems, ",")) - requestConfig := &drives.ItemRootDeltaRequestBuilderGetRequestConfiguration{ + requestConfig := &drives.ItemItemsItemDeltaRequestBuilderGetRequestConfiguration{ Headers: headers, - QueryParameters: &drives.ItemRootDeltaRequestBuilderGetQueryParameters{ + QueryParameters: &drives.ItemItemsItemDeltaRequestBuilderGetQueryParameters{ Top: &pageCount, Select: fields, }, @@ -66,11 +67,14 @@ func NewItemPager( gs: gs, driveID: driveID, options: requestConfig, - builder: gs.Client().DrivesById(driveID).Root().Delta(), + builder: gs.Client(). + Drives(). + ByDriveId(driveID). + Items().ByDriveItemId(onedrive.RootID).Delta(), } if len(link) > 0 { - res.builder = drives.NewItemRootDeltaRequestBuilder(link, gs.Adapter()) + res.builder = drives.NewItemItemsItemDeltaRequestBuilder(link, gs.Adapter()) } return res @@ -91,11 +95,16 @@ func (p *driveItemPager) GetPage(ctx context.Context) (api.DeltaPageLinker, erro } func (p *driveItemPager) SetNext(link string) { - p.builder = drives.NewItemRootDeltaRequestBuilder(link, p.gs.Adapter()) + p.builder = drives.NewItemItemsItemDeltaRequestBuilder(link, p.gs.Adapter()) } func (p *driveItemPager) Reset() { - p.builder = p.gs.Client().DrivesById(p.driveID).Root().Delta() + p.builder = p.gs.Client(). + Drives(). + ByDriveId(p.driveID). + Items(). + ByDriveItemId(onedrive.RootID). + Delta() } func (p *driveItemPager) ValuesIn(l api.DeltaPageLinker) ([]models.DriveItemable, error) { @@ -103,6 +112,7 @@ func (p *driveItemPager) ValuesIn(l api.DeltaPageLinker) ([]models.DriveItemable } type userDrivePager struct { + userID string gs graph.Servicer builder *users.ItemDrivesRequestBuilder options *users.ItemDrivesRequestBuilderGetRequestConfiguration @@ -120,25 +130,42 @@ func NewUserDrivePager( } res := &userDrivePager{ + userID: userID, gs: gs, options: requestConfig, - builder: gs.Client().UsersById(userID).Drives(), + builder: gs.Client().Users().ByUserId(userID).Drives(), } return res } +type nopUserDrivePageLinker struct { + drive models.Driveable +} + +func (nl nopUserDrivePageLinker) GetOdataNextLink() *string { return nil } + func (p *userDrivePager) GetPage(ctx context.Context) (api.PageLinker, error) { var ( resp api.PageLinker err error ) - resp, err = p.builder.Get(ctx, p.options) + d, err := p.gs.Client().Users().ByUserId(p.userID).Drive().Get(ctx, nil) if err != nil { return nil, graph.Stack(ctx, err) } + resp = &nopUserDrivePageLinker{drive: d} + + // TODO(keepers): turn back on when we can separate drive enumeration + // from default drive lookup. + + // resp, err = p.builder.Get(ctx, p.options) + // if err != nil { + // return nil, graph.Stack(ctx, err) + // } + return resp, nil } @@ -147,7 +174,17 @@ func (p *userDrivePager) SetNext(link string) { } func (p *userDrivePager) ValuesIn(l api.PageLinker) ([]models.Driveable, error) { - return getValues[models.Driveable](l) + nl, ok := l.(*nopUserDrivePageLinker) + if !ok || nl == nil { + return nil, clues.New(fmt.Sprintf("improper page linker struct for user drives: %T", l)) + } + + // TODO(keepers): turn back on when we can separate drive enumeration + // from default drive lookup. + + // return getValues[models.Driveable](l) + + return []models.Driveable{nl.drive}, nil } type siteDrivePager struct { @@ -175,7 +212,7 @@ func NewSiteDrivePager( res := &siteDrivePager{ gs: gs, options: requestConfig, - builder: gs.Client().SitesById(siteID).Drives(), + builder: gs.Client().Sites().BySiteId(siteID).Drives(), } return res @@ -279,8 +316,10 @@ func GetDriveItem( driveID, itemID string, ) (models.DriveItemable, error) { di, err := srv.Client(). - DrivesById(driveID). - ItemsById(itemID). + Drives(). + ByDriveId(driveID). + Items(). + ByDriveItemId(itemID). Get(ctx, nil) if err != nil { return nil, graph.Wrap(ctx, err, "getting item") @@ -296,8 +335,10 @@ func GetItemPermission( ) (models.PermissionCollectionResponseable, error) { perm, err := service. Client(). - DrivesById(driveID). - ItemsById(itemID). + Drives(). + ByDriveId(driveID). + Items(). + ByDriveItemId(itemID). Permissions(). Get(ctx, nil) if err != nil { @@ -307,19 +348,106 @@ func GetItemPermission( return perm, nil } -func GetDriveByID( +func GetUsersDrive( ctx context.Context, srv graph.Servicer, - userID string, + user string, ) (models.Driveable, error) { - //revive:enable:context-as-argument d, err := srv.Client(). - UsersById(userID). + Users(). + ByUserId(user). Drive(). Get(ctx, nil) if err != nil { - return nil, graph.Wrap(ctx, err, "getting drive") + return nil, graph.Wrap(ctx, err, "getting user's drive") } return d, nil } + +func GetSitesDefaultDrive( + ctx context.Context, + srv graph.Servicer, + site string, +) (models.Driveable, error) { + d, err := srv.Client(). + Sites(). + BySiteId(site). + Drive(). + Get(ctx, nil) + if err != nil { + return nil, graph.Wrap(ctx, err, "getting site's drive") + } + + return d, nil +} + +func GetDriveRoot( + ctx context.Context, + srv graph.Servicer, + driveID string, +) (models.DriveItemable, error) { + root, err := srv.Client().Drives().ByDriveId(driveID).Root().Get(ctx, nil) + if err != nil { + return nil, graph.Wrap(ctx, err, "getting drive root") + } + + return root, nil +} + +const itemByPathRawURLFmt = "https://graph.microsoft.com/v1.0/drives/%s/items/%s:/%s" + +var ErrFolderNotFound = clues.New("folder not found") + +// GetFolderByName will lookup the specified folder by name within the parentFolderID folder. +func GetFolderByName( + ctx context.Context, + service graph.Servicer, + driveID, parentFolderID, folder string, +) (models.DriveItemable, error) { + // The `Children().Get()` API doesn't yet support $filter, so using that to find a folder + // will be sub-optimal. + // Instead, we leverage OneDrive path-based addressing - + // https://learn.microsoft.com/en-us/graph/onedrive-addressing-driveitems#path-based-addressing + // - which allows us to lookup an item by its path relative to the parent ID + rawURL := fmt.Sprintf(itemByPathRawURLFmt, driveID, parentFolderID, folder) + builder := drives.NewItemItemsDriveItemItemRequestBuilder(rawURL, service.Adapter()) + + foundItem, err := builder.Get(ctx, nil) + if err != nil { + if graph.IsErrDeletedInFlight(err) { + return nil, graph.Stack(ctx, clues.Stack(ErrFolderNotFound, err)) + } + + return nil, graph.Wrap(ctx, err, "getting folder") + } + + // Check if the item found is a folder, fail the call if not + if foundItem.GetFolder() == nil { + return nil, graph.Wrap(ctx, ErrFolderNotFound, "item is not a folder") + } + + return foundItem, nil +} + +func PostItemPermissionUpdate( + ctx context.Context, + service graph.Servicer, + driveID, itemID string, + body *drives.ItemItemsItemInvitePostRequestBody, +) (drives.ItemItemsItemInviteResponseable, error) { + ctx = graph.ConsumeNTokens(ctx, graph.PermissionsLC) + + itm, err := service.Client(). + Drives(). + ByDriveId(driveID). + Items(). + ByDriveItemId(itemID). + Invite(). + Post(ctx, body, nil) + if err != nil { + return nil, graph.Wrap(ctx, err, "posting permissions") + } + + return itm, nil +} diff --git a/src/internal/connector/onedrive/api/drive_test.go b/src/pkg/services/m365/api/drive_test.go similarity index 95% rename from src/internal/connector/onedrive/api/drive_test.go rename to src/pkg/services/m365/api/drive_test.go index 26d189d9d..154615a1b 100644 --- a/src/internal/connector/onedrive/api/drive_test.go +++ b/src/pkg/services/m365/api/drive_test.go @@ -9,9 +9,9 @@ import ( "github.com/stretchr/testify/suite" "github.com/alcionai/corso/src/internal/connector/graph" - "github.com/alcionai/corso/src/internal/connector/onedrive/api" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) type OneDriveAPISuite struct { diff --git a/src/internal/connector/exchange/api/events.go b/src/pkg/services/m365/api/events.go similarity index 67% rename from src/internal/connector/exchange/api/events.go rename to src/pkg/services/m365/api/events.go index 84a6fa6ce..3d84a9c53 100644 --- a/src/internal/connector/exchange/api/events.go +++ b/src/pkg/services/m365/api/events.go @@ -3,7 +3,6 @@ package api import ( "context" "fmt" - "os" "time" "github.com/alcionai/clues" @@ -12,13 +11,12 @@ import ( "github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/microsoftgraph/msgraph-sdk-go/users" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/graph/api" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/fault" - "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" ) @@ -48,7 +46,7 @@ func (c Events) CreateCalendar( requestbody := models.NewCalendar() requestbody.SetName(&calendarName) - mdl, err := c.Stable.Client().UsersById(user).Calendars().Post(ctx, requestbody, nil) + mdl, err := c.Stable.Client().Users().ByUserId(user).Calendars().Post(ctx, requestbody, nil) if err != nil { return nil, graph.Wrap(ctx, err, "creating calendar") } @@ -69,7 +67,7 @@ func (c Events) DeleteContainer( return graph.Stack(ctx, err) } - err = srv.Client().UsersById(user).CalendarsById(calendarID).Delete(ctx, nil) + err = srv.Client().Users().ByUserId(user).Calendars().ByCalendarId(calendarID).Delete(ctx, nil) if err != nil { return graph.Stack(ctx, err) } @@ -81,7 +79,7 @@ func (c Events) GetContainerByID( ctx context.Context, userID, containerID string, ) (graph.Container, error) { - service, err := c.service() + service, err := c.Service() if err != nil { return nil, graph.Stack(ctx, err) } @@ -91,7 +89,7 @@ func (c Events) GetContainerByID( return nil, graph.Wrap(ctx, err, "setting event calendar options") } - cal, err := service.Client().UsersById(userID).CalendarsById(containerID).Get(ctx, ofc) + cal, err := service.Client().Users().ByUserId(userID).Calendars().ByCalendarId(containerID).Get(ctx, ofc) if err != nil { return nil, graph.Stack(ctx, err).WithClues(ctx) } @@ -99,6 +97,45 @@ func (c Events) GetContainerByID( return graph.CalendarDisplayable{Calendarable: cal}, nil } +// GetContainerByName fetches a calendar by name +func (c Events) GetContainerByName( + ctx context.Context, + userID, name string, +) (models.Calendarable, error) { + filter := fmt.Sprintf("name eq '%s'", name) + options := &users.ItemCalendarsRequestBuilderGetRequestConfiguration{ + QueryParameters: &users.ItemCalendarsRequestBuilderGetQueryParameters{ + Filter: &filter, + }, + } + + ctx = clues.Add(ctx, "calendar_name", name) + + resp, err := c.Stable.Client().Users().ByUserId(userID).Calendars().Get(ctx, options) + if err != nil { + return nil, graph.Stack(ctx, err).WithClues(ctx) + } + + // We only allow the api to match one calendar with provided name. + // Return an error if multiple calendars exist (unlikely) or if no calendar + // is found. + if len(resp.GetValue()) != 1 { + err = clues.New("unexpected number of calendars returned"). + With("returned_calendar_count", len(resp.GetValue())) + return nil, err + } + + // Sanity check ID and name + cal := resp.GetValue()[0] + cd := CalendarDisplayable{Calendarable: cal} + + if err := checkIDAndName(cd); err != nil { + return nil, err + } + + return cal, nil +} + // GetItem retrieves an Eventable item. func (c Events) GetItem( ctx context.Context, @@ -115,7 +152,7 @@ func (c Events) GetItem( } ) - event, err = c.Stable.Client().UsersById(user).EventsById(itemID).Get(ctx, itemOpts) + event, err = c.Stable.Client().Users().ByUserId(user).Events().ByEventId(itemID).Get(ctx, itemOpts) if err != nil { return nil, nil, graph.Stack(ctx, err) } @@ -130,8 +167,10 @@ func (c Events) GetItem( attached, err := c.LargeItem. Client(). - UsersById(user). - EventsById(itemID). + Users(). + ByUserId(user). + Events(). + ByEventId(itemID). Attachments(). Get(ctx, options) if err != nil { @@ -155,7 +194,7 @@ func (c Events) EnumerateContainers( fn func(graph.CacheFolder) error, errs *fault.Bus, ) error { - service, err := c.service() + service, err := c.Service() if err != nil { return graph.Stack(ctx, err) } @@ -166,7 +205,7 @@ func (c Events) EnumerateContainers( } el := errs.Local() - builder := service.Client().UsersById(userID).Calendars() + builder := service.Client().Users().ByUserId(userID).Calendars() for { if el.Failure() != nil { @@ -215,20 +254,35 @@ func (c Events) EnumerateContainers( return el.Failure() } +const ( + eventBetaDeltaURLTemplate = "https://graph.microsoft.com/beta/users/%s/calendars/%s/events/delta" +) + // --------------------------------------------------------------------------- // item pager // --------------------------------------------------------------------------- var _ itemPager = &eventPager{} -const ( - eventBetaDeltaURLTemplate = "https://graph.microsoft.com/beta/users/%s/calendars/%s/events/delta" -) - type eventPager struct { gs graph.Servicer - builder *users.ItemCalendarsItemEventsDeltaRequestBuilder - options *users.ItemCalendarsItemEventsDeltaRequestBuilderGetRequestConfiguration + builder *users.ItemCalendarsItemEventsRequestBuilder + options *users.ItemCalendarsItemEventsRequestBuilderGetRequestConfiguration +} + +func NewEventPager( + ctx context.Context, + gs graph.Servicer, + user, calendarID string, + immutableIDs bool, +) (itemPager, error) { + options := &users.ItemCalendarsItemEventsRequestBuilderGetRequestConfiguration{ + Headers: buildPreferHeaders(true, immutableIDs), + } + + builder := gs.Client().Users().ByUserId(user).Calendars().ByCalendarId(calendarID).Events() + + return &eventPager{gs, builder, options}, nil } func (p *eventPager) getPage(ctx context.Context) (api.DeltaPageLinker, error) { @@ -237,58 +291,62 @@ func (p *eventPager) getPage(ctx context.Context) (api.DeltaPageLinker, error) { return nil, graph.Stack(ctx, err) } - return resp, nil + return api.EmptyDeltaLinker[models.Eventable]{PageLinkValuer: resp}, nil } func (p *eventPager) setNext(nextLink string) { - p.builder = users.NewItemCalendarsItemEventsDeltaRequestBuilder(nextLink, p.gs.Adapter()) + p.builder = users.NewItemCalendarsItemEventsRequestBuilder(nextLink, p.gs.Adapter()) } -func (p *eventPager) valuesIn(pl api.DeltaPageLinker) ([]getIDAndAddtler, error) { +// non delta pagers don't need reset +func (p *eventPager) reset(context.Context) {} + +func (p *eventPager) valuesIn(pl api.PageLinker) ([]getIDAndAddtler, error) { return toValues[models.Eventable](pl) } -func (c Events) GetAddedAndRemovedItemIDs( +// --------------------------------------------------------------------------- +// delta item pager +// --------------------------------------------------------------------------- + +var _ itemPager = &eventDeltaPager{} + +type eventDeltaPager struct { + gs graph.Servicer + user string + calendarID string + builder *users.ItemCalendarsItemEventsDeltaRequestBuilder + options *users.ItemCalendarsItemEventsDeltaRequestBuilderGetRequestConfiguration +} + +func NewEventDeltaPager( ctx context.Context, - user, calendarID, oldDelta string, + gs graph.Servicer, + user, calendarID, deltaURL string, immutableIDs bool, -) ([]string, []string, DeltaUpdate, error) { - service, err := c.service() - if err != nil { - return nil, nil, DeltaUpdate{}, err +) (itemPager, error) { + options := &users.ItemCalendarsItemEventsDeltaRequestBuilderGetRequestConfiguration{ + Headers: buildPreferHeaders(true, immutableIDs), } - var ( - resetDelta bool - opts = &users.ItemCalendarsItemEventsDeltaRequestBuilderGetRequestConfiguration{ - Headers: buildPreferHeaders(true, immutableIDs), - } - ) + var builder *users.ItemCalendarsItemEventsDeltaRequestBuilder - ctx = clues.Add( - ctx, - "container_id", calendarID) - - if len(oldDelta) > 0 { - var ( - builder = users.NewItemCalendarsItemEventsDeltaRequestBuilder(oldDelta, service.Adapter()) - pgr = &eventPager{service, builder, opts} - ) - - added, removed, deltaURL, err := getItemsAddedAndRemovedFromContainer(ctx, pgr) - // note: happy path, not the error condition - if err == nil { - return added, removed, DeltaUpdate{deltaURL, false}, nil - } - // only return on error if it is NOT a delta issue. - // on bad deltas we retry the call with the regular builder - if !graph.IsErrInvalidDelta(err) { - return nil, nil, DeltaUpdate{}, graph.Stack(ctx, err) - } - - resetDelta = true + if deltaURL == "" { + builder = getEventDeltaBuilder(ctx, gs, user, calendarID, options) + } else { + builder = users.NewItemCalendarsItemEventsDeltaRequestBuilder(deltaURL, gs.Adapter()) } + return &eventDeltaPager{gs, user, calendarID, builder, options}, nil +} + +func getEventDeltaBuilder( + ctx context.Context, + gs graph.Servicer, + user string, + calendarID string, + options *users.ItemCalendarsItemEventsDeltaRequestBuilderGetRequestConfiguration, +) *users.ItemCalendarsItemEventsDeltaRequestBuilder { // Graph SDK only supports delta queries against events on the beta version, so we're // manufacturing use of the beta version url to make the call instead. // See: https://learn.microsoft.com/ko-kr/graph/api/event-delta?view=graph-rest-beta&tabs=http @@ -298,26 +356,58 @@ func (c Events) GetAddedAndRemovedItemIDs( // Likewise, the NextLink and DeltaLink odata tags carry our hack forward, so the rest of the code // works as intended (until, at least, we want to _not_ call the beta anymore). rawURL := fmt.Sprintf(eventBetaDeltaURLTemplate, user, calendarID) - builder := users.NewItemCalendarsItemEventsDeltaRequestBuilder(rawURL, service.Adapter()) - pgr := &eventPager{service, builder, opts} + builder := users.NewItemCalendarsItemEventsDeltaRequestBuilder(rawURL, gs.Adapter()) - if len(os.Getenv("CORSO_URL_LOGGING")) > 0 { - gri, err := builder.ToGetRequestInformation(ctx, nil) - if err != nil { - logger.CtxErr(ctx, err).Error("getting builder info") - } else { - logger.Ctx(ctx). - Infow("builder path-parameters", "path_parameters", gri.PathParameters) - } + return builder +} + +func (p *eventDeltaPager) getPage(ctx context.Context) (api.DeltaPageLinker, error) { + resp, err := p.builder.Get(ctx, p.options) + if err != nil { + return nil, graph.Stack(ctx, err) } - added, removed, deltaURL, err := getItemsAddedAndRemovedFromContainer(ctx, pgr) + return resp, nil +} + +func (p *eventDeltaPager) setNext(nextLink string) { + p.builder = users.NewItemCalendarsItemEventsDeltaRequestBuilder(nextLink, p.gs.Adapter()) +} + +func (p *eventDeltaPager) reset(ctx context.Context) { + p.builder = getEventDeltaBuilder(ctx, p.gs, p.user, p.calendarID, p.options) +} + +func (p *eventDeltaPager) valuesIn(pl api.PageLinker) ([]getIDAndAddtler, error) { + return toValues[models.Eventable](pl) +} + +func (c Events) GetAddedAndRemovedItemIDs( + ctx context.Context, + user, calendarID, oldDelta string, + immutableIDs bool, + canMakeDeltaQueries bool, +) ([]string, []string, DeltaUpdate, error) { + service, err := c.Service() if err != nil { return nil, nil, DeltaUpdate{}, err } - // Events don't have a delta endpoint so just return an empty string. - return added, removed, DeltaUpdate{deltaURL, resetDelta}, nil + ctx = clues.Add( + ctx, + "container_id", calendarID) + + pager, err := NewEventPager(ctx, service, user, calendarID, immutableIDs) + if err != nil { + return nil, nil, DeltaUpdate{}, graph.Wrap(ctx, err, "creating non-delta pager") + } + + deltaPager, err := NewEventDeltaPager(ctx, service, user, calendarID, oldDelta, immutableIDs) + if err != nil { + return nil, nil, DeltaUpdate{}, graph.Wrap(ctx, err, "creating delta pager") + } + + return getAddedAndRemovedItemIDs(ctx, service, pager, deltaPager, oldDelta, canMakeDeltaQueries) } // --------------------------------------------------------------------------- @@ -407,7 +497,7 @@ func EventInfo(evt models.Eventable) *details.ExchangeInfo { // DateTime is not: time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC) startTime := ptr.Val(evt.GetStart().GetDateTime()) + "Z" - output, err := common.ParseTime(startTime) + output, err := dttm.ParseTime(startTime) if err == nil { start = output } @@ -418,7 +508,7 @@ func EventInfo(evt models.Eventable) *details.ExchangeInfo { // DateTime is not: time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC) endTime := ptr.Val(evt.GetEnd().GetDateTime()) + "Z" - output, err := common.ParseTime(endTime) + output, err := dttm.ParseTime(endTime) if err == nil { end = output } diff --git a/src/internal/connector/exchange/api/events_test.go b/src/pkg/services/m365/api/events_test.go similarity index 96% rename from src/internal/connector/exchange/api/events_test.go rename to src/pkg/services/m365/api/events_test.go index a8cf1270b..6939c67cf 100644 --- a/src/internal/connector/exchange/api/events_test.go +++ b/src/pkg/services/m365/api/events_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/dttm" exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock" "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/tester" @@ -31,7 +31,7 @@ func (suite *EventsAPIUnitSuite) TestEventInfo() { // Exchange stores start/end times in UTC and the below compares hours // directly so we need to "normalize" the timezone here. initial := time.Now().UTC() - now := common.FormatTimeWith(initial, common.M365DateTimeTimeZone) + now := dttm.FormatTo(initial, dttm.M365DateTimeTimeZone) suite.T().Logf("Initial: %v\nFormatted: %v\n", initial, now) @@ -87,7 +87,7 @@ func (suite *EventsAPIUnitSuite) TestEventInfo() { startTime.SetDateTime(&now) event.SetStart(startTime) - nowp30m := common.FormatTimeWith(initial.Add(30*time.Minute), common.M365DateTimeTimeZone) + nowp30m := dttm.FormatTo(initial.Add(30*time.Minute), dttm.M365DateTimeTimeZone) endTime.SetDateTime(&nowp30m) event.SetEnd(endTime) diff --git a/src/pkg/services/m365/api/exchange_common.go b/src/pkg/services/m365/api/exchange_common.go new file mode 100644 index 000000000..7f4f6afe2 --- /dev/null +++ b/src/pkg/services/m365/api/exchange_common.go @@ -0,0 +1,43 @@ +package api + +import ( + "strings" + + "github.com/alcionai/clues" + "github.com/microsoftgraph/msgraph-sdk-go/models" + + "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/internal/connector/graph" +) + +// checkIDAndName is a helper function to ensure that +// the ID and name pointers are set prior to being called. +func checkIDAndName(c graph.Container) error { + id := ptr.Val(c.GetId()) + if len(id) == 0 { + return clues.New("container missing ID") + } + + dn := ptr.Val(c.GetDisplayName()) + if len(dn) == 0 { + return clues.New("container missing display name").With("container_id", id) + } + + return nil +} + +func HasAttachments(body models.ItemBodyable) bool { + if body == nil { + return false + } + + if ct, ok := ptr.ValOK(body.GetContentType()); !ok || ct == models.TEXT_BODYTYPE { + return false + } + + if body, ok := ptr.ValOK(body.GetContent()); !ok || len(body) == 0 { + return false + } + + return strings.Contains(ptr.Val(body.GetContent()), "src=\"cid:") +} diff --git a/src/internal/connector/exchange/api/mail.go b/src/pkg/services/m365/api/mail.go similarity index 58% rename from src/internal/connector/exchange/api/mail.go rename to src/pkg/services/m365/api/mail.go index b011921b5..2abb889c0 100644 --- a/src/internal/connector/exchange/api/mail.go +++ b/src/pkg/services/m365/api/mail.go @@ -3,7 +3,6 @@ package api import ( "context" "fmt" - "os" "github.com/alcionai/clues" "github.com/microsoft/kiota-abstractions-go/serialization" @@ -20,6 +19,10 @@ import ( "github.com/alcionai/corso/src/pkg/selectors" ) +const ( + mailFoldersBetaURLTemplate = "https://graph.microsoft.com/beta/users/%s/mailFolders" +) + // --------------------------------------------------------------------------- // controller // --------------------------------------------------------------------------- @@ -48,7 +51,7 @@ func (c Mail) CreateMailFolder( requestBody.SetDisplayName(&folder) requestBody.SetIsHidden(&isHidden) - mdl, err := c.Stable.Client().UsersById(user).MailFolders().Post(ctx, requestBody, nil) + mdl, err := c.Stable.Client().Users().ByUserId(user).MailFolders().Post(ctx, requestBody, nil) if err != nil { return nil, graph.Wrap(ctx, err, "creating mail folder") } @@ -60,7 +63,7 @@ func (c Mail) CreateMailFolderWithParent( ctx context.Context, user, folder, parentID string, ) (models.MailFolderable, error) { - service, err := c.service() + service, err := c.Service() if err != nil { return nil, graph.Stack(ctx, err) } @@ -72,8 +75,10 @@ func (c Mail) CreateMailFolderWithParent( mdl, err := service. Client(). - UsersById(user). - MailFoldersById(parentID). + Users(). + ByUserId(user). + MailFolders(). + ByMailFolderId(parentID). ChildFolders(). Post(ctx, requestBody, nil) if err != nil { @@ -96,7 +101,12 @@ func (c Mail) DeleteContainer( return graph.Stack(ctx, err) } - err = srv.Client().UsersById(user).MailFoldersById(folderID).Delete(ctx, nil) + err = srv.Client(). + Users(). + ByUserId(user). + MailFolders(). + ByMailFolderId(folderID). + Delete(ctx, nil) if err != nil { return graph.Stack(ctx, err) } @@ -108,7 +118,7 @@ func (c Mail) GetContainerByID( ctx context.Context, userID, dirID string, ) (graph.Container, error) { - service, err := c.service() + service, err := c.Service() if err != nil { return nil, graph.Stack(ctx, err) } @@ -118,7 +128,12 @@ func (c Mail) GetContainerByID( return nil, graph.Wrap(ctx, err, "setting mail folder options") } - resp, err := service.Client().UsersById(userID).MailFoldersById(dirID).Get(ctx, ofmf) + resp, err := service.Client(). + Users(). + ByUserId(userID). + MailFolders(). + ByMailFolderId(dirID). + Get(ctx, ofmf) if err != nil { return nil, graph.Stack(ctx, err) } @@ -134,19 +149,31 @@ func (c Mail) GetItem( immutableIDs bool, errs *fault.Bus, ) (serialization.Parsable, *details.ExchangeInfo, error) { + var ( + size int64 + mailBody models.ItemBodyable + ) // Will need adjusted if attachments start allowing paging. headers := buildPreferHeaders(false, immutableIDs) itemOpts := &users.ItemMessagesMessageItemRequestBuilderGetRequestConfiguration{ Headers: headers, } - mail, err := c.Stable.Client().UsersById(user).MessagesById(itemID).Get(ctx, itemOpts) + mail, err := c.Stable.Client().Users().ByUserId(user).Messages().ByMessageId(itemID).Get(ctx, itemOpts) if err != nil { return nil, nil, graph.Stack(ctx, err) } - if !ptr.Val(mail.GetHasAttachments()) && !HasAttachments(mail.GetBody()) { - return mail, MailInfo(mail), nil + mailBody = mail.GetBody() + if mailBody != nil { + content := ptr.Val(mailBody.GetContent()) + if len(content) > 0 { + size = int64(len(content)) + } + } + + if !ptr.Val(mail.GetHasAttachments()) && !HasAttachments(mailBody) { + return mail, MailInfo(mail, size), nil } options := &users.ItemMessagesItemAttachmentsRequestBuilderGetRequestConfiguration{ @@ -158,13 +185,21 @@ func (c Mail) GetItem( attached, err := c.LargeItem. Client(). - UsersById(user). - MessagesById(itemID). + Users(). + ByUserId(user). + Messages(). + ByMessageId(itemID). Attachments(). Get(ctx, options) if err == nil { + for _, a := range attached.GetValue() { + attachSize := ptr.Val(a.GetSize()) + size = +int64(attachSize) + } + mail.SetAttachments(attached.GetValue()) - return mail, MailInfo(mail), nil + + return mail, MailInfo(mail, size), nil } // A failure can be caused by having a lot of attachments as @@ -183,8 +218,10 @@ func (c Mail) GetItem( attachments, err := c.LargeItem. Client(). - UsersById(user). - MessagesById(itemID). + Users(). + ByUserId(user). + Messages(). + ByMessageId(itemID). Attachments(). Get(ctx, options) if err != nil { @@ -203,9 +240,12 @@ func (c Mail) GetItem( att, err := c.Stable. Client(). - UsersById(user). - MessagesById(itemID). - AttachmentsById(ptr.Val(a.GetId())). + Users(). + ByUserId(user). + Messages(). + ByMessageId(itemID). + Attachments(). + ByAttachmentId(ptr.Val(a.GetId())). Get(ctx, options) if err != nil { return nil, nil, @@ -214,11 +254,50 @@ func (c Mail) GetItem( } atts = append(atts, att) + attachSize := ptr.Val(a.GetSize()) + size = +int64(attachSize) } mail.SetAttachments(atts) - return mail, MailInfo(mail), nil + return mail, MailInfo(mail, size), nil +} + +type mailFolderPager struct { + service graph.Servicer + builder *users.ItemMailFoldersRequestBuilder +} + +func NewMailFolderPager(service graph.Servicer, user string) mailFolderPager { + // v1.0 non delta /mailFolders endpoint does not return any of the nested folders + rawURL := fmt.Sprintf(mailFoldersBetaURLTemplate, user) + builder := users.NewItemMailFoldersRequestBuilder(rawURL, service.Adapter()) + + return mailFolderPager{service, builder} +} + +func (p *mailFolderPager) getPage(ctx context.Context) (api.PageLinker, error) { + page, err := p.builder.Get(ctx, nil) + if err != nil { + return nil, graph.Stack(ctx, err) + } + + return page, nil +} + +func (p *mailFolderPager) setNext(nextLink string) { + p.builder = users.NewItemMailFoldersRequestBuilder(nextLink, p.service.Adapter()) +} + +func (p *mailFolderPager) valuesIn(pl api.PageLinker) ([]models.MailFolderable, error) { + // Ideally this should be `users.ItemMailFoldersResponseable`, but + // that is not a thing as stable returns different result + page, ok := pl.(models.MailFolderCollectionResponseable) + if !ok { + return nil, clues.New("converting to ItemMailFoldersResponseable") + } + + return page.GetValue(), nil } // EnumerateContainers iterates through all of the users current @@ -232,28 +311,31 @@ func (c Mail) EnumerateContainers( fn func(graph.CacheFolder) error, errs *fault.Bus, ) error { - service, err := c.service() + service, err := c.Service() if err != nil { return graph.Stack(ctx, err) } el := errs.Local() - builder := service.Client(). - UsersById(userID). - MailFolders(). - Delta() + + pgr := NewMailFolderPager(service, userID) for { if el.Failure() != nil { break } - resp, err := builder.Get(ctx, nil) + page, err := pgr.getPage(ctx) if err != nil { return graph.Stack(ctx, err) } - for _, v := range resp.GetValue() { + resp, err := pgr.valuesIn(page) + if err != nil { + return graph.Stack(ctx, err) + } + + for _, v := range resp { if el.Failure() != nil { break } @@ -270,12 +352,12 @@ func (c Mail) EnumerateContainers( } } - link, ok := ptr.ValOK(resp.GetOdataNextLink()) + link, ok := ptr.ValOK(page.GetOdataNextLink()) if !ok { break } - builder = users.NewItemMailFoldersDeltaRequestBuilder(link, service.Adapter()) + pgr.setNext(link) } return el.Failure() @@ -289,8 +371,42 @@ var _ itemPager = &mailPager{} type mailPager struct { gs graph.Servicer - builder *users.ItemMailFoldersItemMessagesDeltaRequestBuilder - options *users.ItemMailFoldersItemMessagesDeltaRequestBuilderGetRequestConfiguration + builder *users.ItemMailFoldersItemMessagesRequestBuilder + options *users.ItemMailFoldersItemMessagesRequestBuilderGetRequestConfiguration +} + +func NewMailPager( + ctx context.Context, + gs graph.Servicer, + user, directoryID string, + immutableIDs bool, +) (itemPager, error) { + selecting, err := buildOptions([]string{"isRead"}, fieldsForMessages) + if err != nil { + return nil, err + } + + requestParameters := &users.ItemMailFoldersItemMessagesRequestBuilderGetQueryParameters{ + Select: selecting, + } + + options := &users.ItemMailFoldersItemMessagesRequestBuilderGetRequestConfiguration{ + QueryParameters: requestParameters, + Headers: buildPreferHeaders(true, immutableIDs), + } + + if err != nil { + return &mailPager{}, err + } + + builder := gs.Client(). + Users(). + ByUserId(user). + MailFolders(). + ByMailFolderId(directoryID). + Messages() + + return &mailPager{gs, builder, options}, nil } func (p *mailPager) getPage(ctx context.Context) (api.DeltaPageLinker, error) { @@ -299,14 +415,111 @@ func (p *mailPager) getPage(ctx context.Context) (api.DeltaPageLinker, error) { return nil, graph.Stack(ctx, err) } - return page, nil + return api.EmptyDeltaLinker[models.Messageable]{PageLinkValuer: page}, nil } func (p *mailPager) setNext(nextLink string) { + p.builder = users.NewItemMailFoldersItemMessagesRequestBuilder(nextLink, p.gs.Adapter()) +} + +// non delta pagers don't have reset +func (p *mailPager) reset(context.Context) {} + +func (p *mailPager) valuesIn(pl api.PageLinker) ([]getIDAndAddtler, error) { + return toValues[models.Messageable](pl) +} + +// --------------------------------------------------------------------------- +// delta item pager +// --------------------------------------------------------------------------- + +var _ itemPager = &mailDeltaPager{} + +type mailDeltaPager struct { + gs graph.Servicer + user string + directoryID string + builder *users.ItemMailFoldersItemMessagesDeltaRequestBuilder + options *users.ItemMailFoldersItemMessagesDeltaRequestBuilderGetRequestConfiguration +} + +func getMailDeltaBuilder( + ctx context.Context, + gs graph.Servicer, + user string, + directoryID string, + options *users.ItemMailFoldersItemMessagesDeltaRequestBuilderGetRequestConfiguration, +) *users.ItemMailFoldersItemMessagesDeltaRequestBuilder { + builder := gs.Client(). + Users(). + ByUserId(user). + MailFolders(). + ByMailFolderId(directoryID). + Messages(). + Delta() + + return builder +} + +func NewMailDeltaPager( + ctx context.Context, + gs graph.Servicer, + user, directoryID, oldDelta string, + immutableIDs bool, +) (itemPager, error) { + selecting, err := buildOptions([]string{"isRead"}, fieldsForMessages) + if err != nil { + return nil, err + } + + requestParameters := &users.ItemMailFoldersItemMessagesDeltaRequestBuilderGetQueryParameters{ + Select: selecting, + } + + options := &users.ItemMailFoldersItemMessagesDeltaRequestBuilderGetRequestConfiguration{ + QueryParameters: requestParameters, + Headers: buildPreferHeaders(true, immutableIDs), + } + + if err != nil { + return &mailDeltaPager{}, err + } + + var builder *users.ItemMailFoldersItemMessagesDeltaRequestBuilder + + if len(oldDelta) > 0 { + builder = users.NewItemMailFoldersItemMessagesDeltaRequestBuilder(oldDelta, gs.Adapter()) + } else { + builder = getMailDeltaBuilder(ctx, gs, user, directoryID, options) + } + + return &mailDeltaPager{gs, user, directoryID, builder, options}, nil +} + +func (p *mailDeltaPager) getPage(ctx context.Context) (api.DeltaPageLinker, error) { + page, err := p.builder.Get(ctx, p.options) + if err != nil { + return nil, graph.Stack(ctx, err) + } + + return page, nil +} + +func (p *mailDeltaPager) setNext(nextLink string) { p.builder = users.NewItemMailFoldersItemMessagesDeltaRequestBuilder(nextLink, p.gs.Adapter()) } -func (p *mailPager) valuesIn(pl api.DeltaPageLinker) ([]getIDAndAddtler, error) { +func (p *mailDeltaPager) reset(ctx context.Context) { + p.builder = p.gs.Client(). + Users(). + ByUserId(p.user). + MailFolders(). + ByMailFolderId(p.directoryID). + Messages(). + Delta() +} + +func (p *mailDeltaPager) valuesIn(pl api.PageLinker) ([]getIDAndAddtler, error) { return toValues[models.Messageable](pl) } @@ -314,69 +527,29 @@ func (c Mail) GetAddedAndRemovedItemIDs( ctx context.Context, user, directoryID, oldDelta string, immutableIDs bool, + canMakeDeltaQueries bool, ) ([]string, []string, DeltaUpdate, error) { - service, err := c.service() + service, err := c.Service() if err != nil { return nil, nil, DeltaUpdate{}, err } - var ( - deltaURL string - resetDelta bool - ) - ctx = clues.Add( ctx, "category", selectors.ExchangeMail, "container_id", directoryID) - options, err := optionsForFolderMessagesDelta([]string{"isRead"}, immutableIDs) + pager, err := NewMailPager(ctx, service, user, directoryID, immutableIDs) if err != nil { - return nil, - nil, - DeltaUpdate{}, - graph.Wrap(ctx, err, "setting contact folder options") + return nil, nil, DeltaUpdate{}, graph.Wrap(ctx, err, "creating delta pager") } - if len(oldDelta) > 0 { - var ( - builder = users.NewItemMailFoldersItemMessagesDeltaRequestBuilder(oldDelta, service.Adapter()) - pgr = &mailPager{service, builder, options} - ) - - added, removed, deltaURL, err := getItemsAddedAndRemovedFromContainer(ctx, pgr) - // note: happy path, not the error condition - if err == nil { - return added, removed, DeltaUpdate{deltaURL, false}, err - } - // only return on error if it is NOT a delta issue. - // on bad deltas we retry the call with the regular builder - if !graph.IsErrInvalidDelta(err) { - return nil, nil, DeltaUpdate{}, err - } - - resetDelta = true - } - - builder := service.Client().UsersById(user).MailFoldersById(directoryID).Messages().Delta() - pgr := &mailPager{service, builder, options} - - if len(os.Getenv("CORSO_URL_LOGGING")) > 0 { - gri, err := builder.ToGetRequestInformation(ctx, options) - if err != nil { - logger.CtxErr(ctx, err).Error("getting builder info") - } else { - logger.Ctx(ctx). - Infow("builder path-parameters", "path_parameters", gri.PathParameters) - } - } - - added, removed, deltaURL, err := getItemsAddedAndRemovedFromContainer(ctx, pgr) + deltaPager, err := NewMailDeltaPager(ctx, service, user, directoryID, oldDelta, immutableIDs) if err != nil { - return nil, nil, DeltaUpdate{}, err + return nil, nil, DeltaUpdate{}, graph.Wrap(ctx, err, "creating delta pager") } - return added, removed, DeltaUpdate{deltaURL, resetDelta}, nil + return getAddedAndRemovedItemIDs(ctx, service, pager, deltaPager, oldDelta, canMakeDeltaQueries) } // --------------------------------------------------------------------------- @@ -419,7 +592,7 @@ func (c Mail) Serialize( // Helpers // --------------------------------------------------------------------------- -func MailInfo(msg models.Messageable) *details.ExchangeInfo { +func MailInfo(msg models.Messageable, size int64) *details.ExchangeInfo { var ( sender = UnwrapEmailAddress(msg.GetSender()) subject = ptr.Val(msg.GetSubject()) @@ -444,6 +617,7 @@ func MailInfo(msg models.Messageable) *details.ExchangeInfo { Recipient: recipients, Subject: subject, Received: received, + Size: size, Created: created, Modified: ptr.OrNow(msg.GetLastModifiedDateTime()), } diff --git a/src/internal/connector/exchange/api/mail_test.go b/src/pkg/services/m365/api/mail_test.go similarity index 89% rename from src/internal/connector/exchange/api/mail_test.go rename to src/pkg/services/m365/api/mail_test.go index ad134041e..5d3641f25 100644 --- a/src/internal/connector/exchange/api/mail_test.go +++ b/src/pkg/services/m365/api/mail_test.go @@ -15,12 +15,12 @@ import ( "github.com/stretchr/testify/suite" "github.com/alcionai/corso/src/internal/common/ptr" - "github.com/alcionai/corso/src/internal/connector/exchange/api" - "github.com/alcionai/corso/src/internal/connector/exchange/api/mock" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/services/m365/api" + "github.com/alcionai/corso/src/pkg/services/m365/api/mock" ) type MailAPIUnitSuite struct { @@ -152,12 +152,12 @@ func (suite *MailAPIUnitSuite) TestMailInfo() { for _, tt := range tests { suite.Run(tt.name, func() { msg, expected := tt.msgAndRP() - assert.Equal(suite.T(), expected, api.MailInfo(msg)) + assert.Equal(suite.T(), expected, api.MailInfo(msg, 0)) }) } } -type MailAPIE2ESuite struct { +type MailAPIIntgSuite struct { tester.Suite credentials account.M365Config ac api.Client @@ -165,9 +165,9 @@ type MailAPIE2ESuite struct { } // We do end up mocking the actual request, but creating the rest -// similar to E2E suite -func TestMailAPIE2ESuite(t *testing.T) { - suite.Run(t, &MailAPIE2ESuite{ +// similar to full integration tests. +func TestMailAPIIntgSuite(t *testing.T) { + suite.Run(t, &MailAPIIntgSuite{ Suite: tester.NewIntegrationSuite( t, [][]string{tester.M365AcctCredEnvs}, @@ -175,7 +175,7 @@ func TestMailAPIE2ESuite(t *testing.T) { }) } -func (suite *MailAPIE2ESuite) SetupSuite() { +func (suite *MailAPIIntgSuite) SetupSuite() { t := suite.T() a := tester.NewM365Account(t) @@ -205,7 +205,7 @@ func getJSONObject(t *testing.T, thing serialization.Parsable) map[string]interf return out } -func (suite *MailAPIE2ESuite) TestHugeAttachmentListDownload() { +func (suite *MailAPIIntgSuite) TestHugeAttachmentListDownload() { mid := "fake-message-id" aid := "fake-attachment-id" @@ -213,6 +213,7 @@ func (suite *MailAPIE2ESuite) TestHugeAttachmentListDownload() { name string setupf func() attachmentCount int + size int64 expect assert.ErrorAssertionFunc }{ { @@ -242,6 +243,9 @@ func (suite *MailAPIE2ESuite) TestHugeAttachmentListDownload() { atts := models.NewAttachmentCollectionResponse() aitem := models.NewAttachment() + + asize := int32(50) + aitem.SetSize(&asize) atts.SetValue([]models.Attachmentable{aitem}) gock.New("https://graph.microsoft.com"). @@ -250,6 +254,7 @@ func (suite *MailAPIE2ESuite) TestHugeAttachmentListDownload() { JSON(getJSONObject(suite.T(), atts)) }, attachmentCount: 1, + size: 50, expect: assert.NoError, }, { @@ -289,6 +294,7 @@ func (suite *MailAPIE2ESuite) TestHugeAttachmentListDownload() { JSON(getJSONObject(suite.T(), aitem)) }, attachmentCount: 1, + size: 200, expect: assert.NoError, }, { @@ -330,6 +336,7 @@ func (suite *MailAPIE2ESuite) TestHugeAttachmentListDownload() { } }, attachmentCount: 5, + size: 200, expect: assert.NoError, }, } @@ -348,8 +355,23 @@ func (suite *MailAPIE2ESuite) TestHugeAttachmentListDownload() { it, ok := item.(models.Messageable) require.True(suite.T(), ok, "convert to messageable") + var size int64 + mailBody := it.GetBody() + if mailBody != nil { + content := ptr.Val(mailBody.GetContent()) + if len(content) > 0 { + size = int64(len(content)) + } + } + + attachments := it.GetAttachments() + for _, attachment := range attachments { + size = +int64(*attachment.GetSize()) + } + assert.Equal(suite.T(), *it.GetId(), mid) - assert.Equal(suite.T(), tt.attachmentCount, len(it.GetAttachments()), "attachment count") + assert.Equal(suite.T(), tt.attachmentCount, len(attachments), "attachment count") + assert.Equal(suite.T(), tt.size, size, "mail size") assert.True(suite.T(), gock.IsDone(), "made all requests") }) } diff --git a/src/internal/connector/onedrive/api/mock/drive.go b/src/pkg/services/m365/api/mock/drive.go similarity index 100% rename from src/internal/connector/onedrive/api/mock/drive.go rename to src/pkg/services/m365/api/mock/drive.go diff --git a/src/internal/connector/exchange/api/mock/mail.go b/src/pkg/services/m365/api/mock/mail.go similarity index 51% rename from src/internal/connector/exchange/api/mock/mail.go rename to src/pkg/services/m365/api/mock/mail.go index 43f6f8d5c..b05cec1a4 100644 --- a/src/internal/connector/exchange/api/mock/mail.go +++ b/src/pkg/services/m365/api/mock/mail.go @@ -1,36 +1,21 @@ package mock import ( - "github.com/alcionai/clues" - - "github.com/alcionai/corso/src/internal/connector/exchange/api" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/graph/mock" "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) -func NewService(creds account.M365Config, opts ...graph.Option) (*graph.Service, error) { - a, err := mock.CreateAdapter( - creds.AzureTenantID, - creds.AzureClientID, - creds.AzureClientSecret, - opts...) - if err != nil { - return nil, clues.Wrap(err, "generating graph adapter") - } - - return graph.NewService(a), nil -} - // NewClient produces a new exchange api client that can be // mocked using gock. func NewClient(creds account.M365Config) (api.Client, error) { - s, err := NewService(creds) + s, err := mock.NewService(creds) if err != nil { return api.Client{}, err } - li, err := NewService(creds, graph.NoTimeout()) + li, err := mock.NewService(creds, graph.NoTimeout()) if err != nil { return api.Client{}, err } diff --git a/src/internal/connector/exchange/api/options.go b/src/pkg/services/m365/api/options.go similarity index 84% rename from src/internal/connector/exchange/api/options.go rename to src/pkg/services/m365/api/options.go index 54f6bb1e7..ff506e7d5 100644 --- a/src/internal/connector/exchange/api/options.go +++ b/src/pkg/services/m365/api/options.go @@ -75,27 +75,6 @@ const ( // which reduces the overall latency of complex calls // ----------------------------------------------------------------------- -func optionsForFolderMessagesDelta( - moreOps []string, - immutableIDs bool, -) (*users.ItemMailFoldersItemMessagesDeltaRequestBuilderGetRequestConfiguration, error) { - selecting, err := buildOptions(moreOps, fieldsForMessages) - if err != nil { - return nil, err - } - - requestParameters := &users.ItemMailFoldersItemMessagesDeltaRequestBuilderGetQueryParameters{ - Select: selecting, - } - - options := &users.ItemMailFoldersItemMessagesDeltaRequestBuilderGetRequestConfiguration{ - QueryParameters: requestParameters, - Headers: buildPreferHeaders(true, immutableIDs), - } - - return options, nil -} - // optionsForCalendars places allowed options for exchange.Calendar object // @param moreOps should reflect elements from fieldsForCalendars // @return is first call in Calendars().GetWithRequestConfigurationAndResponseHandler @@ -180,27 +159,6 @@ func optionsForMailFoldersItem( return options, nil } -func optionsForContactFoldersItemDelta( - moreOps []string, - immutableIDs bool, -) (*users.ItemContactFoldersItemContactsDeltaRequestBuilderGetRequestConfiguration, error) { - selecting, err := buildOptions(moreOps, fieldsForContacts) - if err != nil { - return nil, err - } - - requestParameters := &users.ItemContactFoldersItemContactsDeltaRequestBuilderGetQueryParameters{ - Select: selecting, - } - - options := &users.ItemContactFoldersItemContactsDeltaRequestBuilderGetRequestConfiguration{ - QueryParameters: requestParameters, - Headers: buildPreferHeaders(true, immutableIDs), - } - - return options, nil -} - // optionsForContactChildFolders builds a contacts child folders request. func optionsForContactChildFolders( moreOps []string, diff --git a/src/internal/connector/exchange/api/shared.go b/src/pkg/services/m365/api/shared.go similarity index 67% rename from src/internal/connector/exchange/api/shared.go rename to src/pkg/services/m365/api/shared.go index 0fbfa33f3..139eb5ecc 100644 --- a/src/internal/connector/exchange/api/shared.go +++ b/src/pkg/services/m365/api/shared.go @@ -3,7 +3,6 @@ package api import ( "context" "fmt" - "os" "github.com/alcionai/clues" @@ -18,9 +17,16 @@ import ( // --------------------------------------------------------------------------- type itemPager interface { + // getPage get a page with the specified options from graph getPage(context.Context) (api.DeltaPageLinker, error) + // setNext is used to pass in the next url got from graph setNext(string) - valuesIn(api.DeltaPageLinker) ([]getIDAndAddtler, error) + // reset is used to clear delta url in delta pagers. When + // reset is called, we reset the state(delta url) that we + // currently have and start a new delta query without the token. + reset(context.Context) + // valuesIn gets us the values in a page + valuesIn(api.PageLinker) ([]getIDAndAddtler, error) } type getIDAndAddtler interface { @@ -56,6 +62,54 @@ func toValues[T any](a any) ([]getIDAndAddtler, error) { return r, nil } +func getAddedAndRemovedItemIDs( + ctx context.Context, + service graph.Servicer, + pager itemPager, + deltaPager itemPager, + oldDelta string, + canMakeDeltaQueries bool, +) ([]string, []string, DeltaUpdate, error) { + var ( + pgr itemPager + resetDelta bool + ) + + if canMakeDeltaQueries { + pgr = deltaPager + resetDelta = len(oldDelta) == 0 + } else { + pgr = pager + resetDelta = true + } + + added, removed, deltaURL, err := getItemsAddedAndRemovedFromContainer(ctx, pgr) + // note: happy path, not the error condition + if err == nil { + return added, removed, DeltaUpdate{deltaURL, resetDelta}, err + } + + // If we already tried with a non-delta url, we can return + if !canMakeDeltaQueries { + return nil, nil, DeltaUpdate{}, err + } + + // return error if invalid not delta error or oldDelta was empty + if !graph.IsErrInvalidDelta(err) || len(oldDelta) == 0 { + return nil, nil, DeltaUpdate{}, err + } + + // reset deltaPager + pgr.reset(ctx) + + added, removed, deltaURL, err = getItemsAddedAndRemovedFromContainer(ctx, pgr) + if err != nil { + return nil, nil, DeltaUpdate{}, err + } + + return added, removed, DeltaUpdate{deltaURL, true}, nil +} + // generic controller for retrieving all item ids in a container. func getItemsAddedAndRemovedFromContainer( ctx context.Context, @@ -104,19 +158,14 @@ func getItemsAddedAndRemovedFromContainer( } } - nextLink, delta := api.NextAndDeltaLink(resp) - if len(os.Getenv("CORSO_URL_LOGGING")) > 0 { - if !api.IsNextLinkValid(nextLink) || api.IsNextLinkValid(delta) { - logger.Ctx(ctx).Infof("Received invalid link from M365:\nNext Link: %s\nDelta Link: %s\n", nextLink, delta) - } - } + nextLink, deltaLink := api.NextAndDeltaLink(resp) // the deltaLink is kind of like a cursor for overall data state. // once we run through pages of nextLinks, the last query will // produce a deltaLink instead (if supported), which we'll use on // the next backup to only get the changes since this run. - if len(delta) > 0 { - deltaURL = delta + if len(deltaLink) > 0 { + deltaURL = deltaLink } // the nextLink is our page cursor within this query. diff --git a/src/pkg/services/m365/api/shared_test.go b/src/pkg/services/m365/api/shared_test.go new file mode 100644 index 000000000..6a2fd1e25 --- /dev/null +++ b/src/pkg/services/m365/api/shared_test.go @@ -0,0 +1,262 @@ +package api + +import ( + "context" + "testing" + + "github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/internal/connector/graph" + "github.com/alcionai/corso/src/internal/connector/graph/api" + "github.com/alcionai/corso/src/internal/tester" +) + +type testPagerValue struct { + id string + removed bool +} + +func (v testPagerValue) GetId() *string { return &v.id } //revive:disable-line:var-naming +func (v testPagerValue) GetAdditionalData() map[string]any { + if v.removed { + return map[string]any{graph.AddtlDataRemoved: true} + } + + return map[string]any{} +} + +type testPage struct{} + +func (p testPage) GetOdataNextLink() *string { + // no next, just one page + return ptr.To("") +} + +func (p testPage) GetOdataDeltaLink() *string { + // delta is not tested here + return ptr.To("") +} + +var _ itemPager = &testPager{} + +type testPager struct { + t *testing.T + added []string + removed []string + errorCode string + needsReset bool +} + +func (p *testPager) getPage(ctx context.Context) (api.DeltaPageLinker, error) { + if p.errorCode != "" { + ierr := odataerrors.NewMainError() + ierr.SetCode(&p.errorCode) + + err := odataerrors.NewODataError() + err.SetError(ierr) + + return nil, err + } + + return testPage{}, nil +} +func (p *testPager) setNext(string) {} +func (p *testPager) reset(context.Context) { + if !p.needsReset { + require.Fail(p.t, "reset should not be called") + } + + p.needsReset = false + p.errorCode = "" +} + +func (p *testPager) valuesIn(pl api.PageLinker) ([]getIDAndAddtler, error) { + items := []getIDAndAddtler{} + + for _, id := range p.added { + items = append(items, testPagerValue{id: id}) + } + + for _, id := range p.removed { + items = append(items, testPagerValue{id: id, removed: true}) + } + + return items, nil +} + +type SharedAPIUnitSuite struct { + tester.Suite +} + +func TestSharedAPIUnitSuite(t *testing.T) { + suite.Run(t, &SharedAPIUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *SharedAPIUnitSuite) TestGetAddedAndRemovedItemIDs() { + tests := []struct { + name string + pagerGetter func(context.Context, graph.Servicer, string, string, bool) (itemPager, error) + deltaPagerGetter func(context.Context, graph.Servicer, string, string, string, bool) (itemPager, error) + added []string + removed []string + deltaUpdate DeltaUpdate + delta string + canMakeDeltaQueries bool + }{ + { + name: "no prev delta", + pagerGetter: func( + ctx context.Context, + gs graph.Servicer, + user string, + directory string, + immutableIDs bool, + ) (itemPager, error) { + // this should not be called + return nil, assert.AnError + }, + deltaPagerGetter: func( + ctx context.Context, + gs graph.Servicer, + user string, + directory string, + delta string, + immutableIDs bool, + ) (itemPager, error) { + return &testPager{ + t: suite.T(), + added: []string{"uno", "dos"}, + removed: []string{"tres", "quatro"}, + }, nil + }, + added: []string{"uno", "dos"}, + removed: []string{"tres", "quatro"}, + deltaUpdate: DeltaUpdate{Reset: true}, + canMakeDeltaQueries: true, + }, + { + name: "with prev delta", + pagerGetter: func( + ctx context.Context, + gs graph.Servicer, + user string, + directory string, + immutableIDs bool, + ) (itemPager, error) { + // this should not be called + return nil, assert.AnError + }, + deltaPagerGetter: func( + ctx context.Context, + gs graph.Servicer, + user string, + directory string, + delta string, + immutableIDs bool, + ) (itemPager, error) { + return &testPager{ + t: suite.T(), + added: []string{"uno", "dos"}, + removed: []string{"tres", "quatro"}, + }, nil + }, + added: []string{"uno", "dos"}, + removed: []string{"tres", "quatro"}, + delta: "delta", + deltaUpdate: DeltaUpdate{Reset: false}, + canMakeDeltaQueries: true, + }, + { + name: "delta expired", + pagerGetter: func( + ctx context.Context, + gs graph.Servicer, + user string, + directory string, + immutableIDs bool, + ) (itemPager, error) { + // this should not be called + return nil, assert.AnError + }, + deltaPagerGetter: func( + ctx context.Context, + gs graph.Servicer, + user string, + directory string, + delta string, + immutableIDs bool, + ) (itemPager, error) { + return &testPager{ + t: suite.T(), + added: []string{"uno", "dos"}, + removed: []string{"tres", "quatro"}, + errorCode: "SyncStateNotFound", + needsReset: true, + }, nil + }, + added: []string{"uno", "dos"}, + removed: []string{"tres", "quatro"}, + delta: "delta", + deltaUpdate: DeltaUpdate{Reset: true}, + canMakeDeltaQueries: true, + }, + { + name: "quota exceeded", + pagerGetter: func( + ctx context.Context, + gs graph.Servicer, + user string, + directory string, + immutableIDs bool, + ) (itemPager, error) { + return &testPager{ + t: suite.T(), + added: []string{"uno", "dos"}, + removed: []string{"tres", "quatro"}, + }, nil + }, + deltaPagerGetter: func( + ctx context.Context, + gs graph.Servicer, + user string, + directory string, + delta string, + immutableIDs bool, + ) (itemPager, error) { + return &testPager{errorCode: "ErrorQuotaExceeded"}, nil + }, + added: []string{"uno", "dos"}, + removed: []string{"tres", "quatro"}, + deltaUpdate: DeltaUpdate{Reset: true}, + canMakeDeltaQueries: false, + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + ctx, flush := tester.NewContext() + defer flush() + + pager, _ := tt.pagerGetter(ctx, graph.Service{}, "user", "directory", false) + deltaPager, _ := tt.deltaPagerGetter(ctx, graph.Service{}, "user", "directory", tt.delta, false) + + added, removed, deltaUpdate, err := getAddedAndRemovedItemIDs( + ctx, + graph.Service{}, + pager, + deltaPager, + tt.delta, + tt.canMakeDeltaQueries, + ) + + require.NoError(suite.T(), err, "getting added and removed item IDs") + require.EqualValues(suite.T(), tt.added, added, "added item IDs") + require.EqualValues(suite.T(), tt.removed, removed, "removed item IDs") + require.Equal(suite.T(), tt.deltaUpdate, deltaUpdate, "delta update") + }) + } +} diff --git a/src/pkg/services/m365/api/sites.go b/src/pkg/services/m365/api/sites.go index 837d836d4..3f8025289 100644 --- a/src/pkg/services/m365/api/sites.go +++ b/src/pkg/services/m365/api/sites.go @@ -47,7 +47,7 @@ func (c Sites) GetAll(ctx context.Context, errs *fault.Bus) ([]models.Siteable, return nil, graph.Wrap(ctx, err, "getting all sites") } - iter, err := msgraphgocore.NewPageIterator( + iter, err := msgraphgocore.NewPageIterator[models.Siteable]( resp, service.Adapter(), models.CreateSiteCollectionResponseFromDiscriminatorValue) @@ -60,12 +60,12 @@ func (c Sites) GetAll(ctx context.Context, errs *fault.Bus) ([]models.Siteable, el = errs.Local() ) - iterator := func(item any) bool { + iterator := func(item models.Siteable) bool { if el.Failure() != nil { return false } - s, err := validateSite(item) + err := validateSite(item) if errors.Is(err, errKnownSkippableCase) { // safe to no-op return true @@ -76,7 +76,7 @@ func (c Sites) GetAll(ctx context.Context, errs *fault.Bus) ([]models.Siteable, return true } - us = append(us, s) + us = append(us, item) return true } @@ -109,7 +109,7 @@ func (c Sites) GetByID(ctx context.Context, identifier string) (models.Siteable, ctx = clues.Add(ctx, "given_site_id", identifier) if siteIDRE.MatchString(identifier) { - resp, err = c.stable.Client().SitesById(identifier).Get(ctx, nil) + resp, err = c.Stable.Client().Sites().BySiteId(identifier).Get(ctx, nil) if err != nil { return nil, graph.Wrap(ctx, err, "getting site by id") } @@ -136,7 +136,7 @@ func (c Sites) GetByID(ctx context.Context, identifier string) (models.Siteable, rawURL := fmt.Sprintf(webURLGetTemplate, u.Host, path) resp, err = sites. - NewItemSitesSiteItemRequestBuilder(rawURL, c.stable.Adapter()). + NewItemSitesSiteItemRequestBuilder(rawURL, c.Stable.Adapter()). Get(ctx, nil) if err != nil { return nil, graph.Wrap(ctx, err, "getting site by weburl") @@ -168,38 +168,33 @@ const personalSitePath = "sharepoint.com/personal/" // validateSite ensures the item is a Siteable, and contains the necessary // identifiers that we handle with all users. // returns the item as a Siteable model. -func validateSite(item any) (models.Siteable, error) { - m, ok := item.(models.Siteable) - if !ok { - return nil, clues.New(fmt.Sprintf("unexpected model: %T", item)) - } - - id := ptr.Val(m.GetId()) +func validateSite(item models.Siteable) error { + id := ptr.Val(item.GetId()) if len(id) == 0 { - return nil, clues.New("missing ID") + return clues.New("missing ID") } - wURL := ptr.Val(m.GetWebUrl()) + wURL := ptr.Val(item.GetWebUrl()) if len(wURL) == 0 { - return nil, clues.New("missing webURL").With("site_id", id) // TODO: pii + return clues.New("missing webURL").With("site_id", id) // TODO: pii } // personal (ie: oneDrive) sites have to be filtered out server-side. if strings.Contains(wURL, personalSitePath) { - return nil, clues.Stack(errKnownSkippableCase). + return clues.Stack(errKnownSkippableCase). With("site_id", id, "site_web_url", wURL) // TODO: pii } - name := ptr.Val(m.GetDisplayName()) + name := ptr.Val(item.GetDisplayName()) if len(name) == 0 { // the built-in site at "https://{tenant-domain}/search" never has a name. if strings.HasSuffix(wURL, "/search") { - return nil, clues.Stack(errKnownSkippableCase). + return clues.Stack(errKnownSkippableCase). With("site_id", id, "site_web_url", wURL) // TODO: pii } - return nil, clues.New("missing site display name").With("site_id", id) + return clues.New("missing site display name").With("site_id", id) } - return m, nil + return nil } diff --git a/src/pkg/services/m365/api/sites_test.go b/src/pkg/services/m365/api/sites_test.go index b0c713a3d..989668b85 100644 --- a/src/pkg/services/m365/api/sites_test.go +++ b/src/pkg/services/m365/api/sites_test.go @@ -33,16 +33,10 @@ func (suite *SitesUnitSuite) TestValidateSite() { tests := []struct { name string - args any - want models.Siteable + args models.Siteable errCheck assert.ErrorAssertionFunc errIsSkippable bool }{ - { - name: "Invalid type", - args: string("invalid type"), - errCheck: assert.Error, - }, { name: "No ID", args: models.NewSite(), @@ -92,7 +86,6 @@ func (suite *SitesUnitSuite) TestValidateSite() { { name: "Valid Site", args: site, - want: site, errCheck: assert.NoError, }, } @@ -100,14 +93,12 @@ func (suite *SitesUnitSuite) TestValidateSite() { suite.Run(test.name, func() { t := suite.T() - got, err := validateSite(test.args) + err := validateSite(test.args) test.errCheck(t, err, clues.ToCore(err)) if test.errIsSkippable { assert.ErrorIs(t, err, errKnownSkippableCase) } - - assert.Equal(t, test.want, got) }) } } diff --git a/src/pkg/services/m365/api/users.go b/src/pkg/services/m365/api/users.go index 9fa76421f..6bba52de5 100644 --- a/src/pkg/services/m365/api/users.go +++ b/src/pkg/services/m365/api/users.go @@ -3,6 +3,8 @@ package api import ( "context" "fmt" + "net/http" + "strings" "github.com/alcionai/clues" abstractions "github.com/microsoft/kiota-abstractions-go" @@ -10,12 +12,19 @@ import ( "github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/microsoftgraph/msgraph-sdk-go/users" + "github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" ) +// Variables +var ( + ErrMailBoxSettingsNotFound = clues.New("mailbox settings not found") +) + // --------------------------------------------------------------------------- // controller // --------------------------------------------------------------------------- @@ -34,12 +43,55 @@ type Users struct { // --------------------------------------------------------------------------- type UserInfo struct { - DiscoveredServices map[path.ServiceType]struct{} + ServicesEnabled map[path.ServiceType]struct{} + Mailbox MailboxInfo +} + +type MailboxInfo struct { + Purpose string + ArchiveFolder string + DateFormat string + TimeFormat string + DelegateMeetMsgDeliveryOpt string + Timezone string + AutomaticRepliesSetting AutomaticRepliesSettings + Language Language + WorkingHours WorkingHours + ErrGetMailBoxSetting []error + QuotaExceeded bool +} + +type AutomaticRepliesSettings struct { + ExternalAudience string + ExternalReplyMessage string + InternalReplyMessage string + ScheduledEndDateTime timeInfo + ScheduledStartDateTime timeInfo + Status string +} + +type timeInfo struct { + DateTime string + Timezone string +} + +type Language struct { + Locale string + DisplayName string +} + +type WorkingHours struct { + DaysOfWeek []string + StartTime string + EndTime string + TimeZone struct { + Name string + } } func newUserInfo() *UserInfo { return &UserInfo{ - DiscoveredServices: map[path.ServiceType]struct{}{ + ServicesEnabled: map[path.ServiceType]struct{}{ path.ExchangeService: {}, path.OneDriveService: {}, }, @@ -49,15 +101,21 @@ func newUserInfo() *UserInfo { // ServiceEnabled returns true if the UserInfo has an entry for the // service. If no entry exists, the service is assumed to not be enabled. func (ui *UserInfo) ServiceEnabled(service path.ServiceType) bool { - if ui == nil || len(ui.DiscoveredServices) == 0 { + if ui == nil || len(ui.ServicesEnabled) == 0 { return false } - _, ok := ui.DiscoveredServices[service] + _, ok := ui.ServicesEnabled[service] return ok } +// Returns if we can run delta queries on a mailbox. We cannot run +// them if the mailbox is full which is indicated by QuotaExceeded. +func (ui *UserInfo) CanMakeDeltaQueries() bool { + return !ui.Mailbox.QuotaExceeded +} + // --------------------------------------------------------------------------- // methods // --------------------------------------------------------------------------- @@ -113,7 +171,7 @@ func (c Users) GetAll(ctx context.Context, errs *fault.Bus) ([]models.Userable, return nil, graph.Wrap(ctx, err, "getting all users") } - iter, err := msgraphgocore.NewPageIterator( + iter, err := msgraphgocore.NewPageIterator[models.Userable]( resp, service.Adapter(), models.CreateUserCollectionResponseFromDiscriminatorValue) @@ -126,16 +184,16 @@ func (c Users) GetAll(ctx context.Context, errs *fault.Bus) ([]models.Userable, el = errs.Local() ) - iterator := func(item any) bool { + iterator := func(item models.Userable) bool { if el.Failure() != nil { return false } - u, err := validateUser(item) + err := validateUser(item) if err != nil { el.AddRecoverable(graph.Wrap(ctx, err, "validating user")) } else { - us = append(us, u) + us = append(us, item) } return true @@ -156,7 +214,7 @@ func (c Users) GetByID(ctx context.Context, identifier string) (models.Userable, err error ) - resp, err = c.stable.Client().UsersById(identifier).Get(ctx, nil) + resp, err = c.Stable.Client().Users().ByUserId(identifier).Get(ctx, nil) if err != nil { return nil, graph.Wrap(ctx, err, "getting user") @@ -176,56 +234,329 @@ func (c Users) GetIDAndName(ctx context.Context, userID string) (string, string, return ptr.Val(u.GetId()), ptr.Val(u.GetUserPrincipalName()), nil } +// GetAllIDsAndNames retrieves all users in the tenant and returns them in an idname.Cacher +func (c Users) GetAllIDsAndNames(ctx context.Context, errs *fault.Bus) (idname.Cacher, error) { + all, err := c.GetAll(ctx, errs) + if err != nil { + return nil, clues.Wrap(err, "getting all users") + } + + idToName := make(map[string]string, len(all)) + + for _, u := range all { + id := strings.ToLower(ptr.Val(u.GetId())) + name := strings.ToLower(ptr.Val(u.GetUserPrincipalName())) + + idToName[id] = name + } + + return idname.NewCache(idToName), nil +} + func (c Users) GetInfo(ctx context.Context, userID string) (*UserInfo, error) { // Assume all services are enabled // then filter down to only services the user has enabled - var ( - err error - userInfo = newUserInfo() + userInfo := newUserInfo() - requestParameters = &users.ItemMailFoldersRequestBuilderGetQueryParameters{ - Select: []string{"id"}, - Top: ptr.To[int32](1), // if we get any folders, then we have access. - } + requestParameters := users.ItemMailFoldersRequestBuilderGetQueryParameters{ + Select: []string{"id"}, + Top: ptr.To[int32](1), // if we get any folders, then we have access. + } - options = users.ItemMailFoldersRequestBuilderGetRequestConfiguration{ - QueryParameters: requestParameters, - } - ) + options := users.ItemMailFoldersRequestBuilderGetRequestConfiguration{ + QueryParameters: &requestParameters, + } - // TODO: OneDrive - _, err = c.stable.Client().UsersById(userID).MailFolders().Get(ctx, &options) + mfs, err := c.GetMailFolders(ctx, userID, options) if err != nil { - if !graph.IsErrExchangeMailFolderNotFound(err) { - return nil, graph.Wrap(ctx, err, "getting user's mail folder") + if graph.IsErrUserNotFound(err) { + logger.CtxErr(ctx, err).Error("user not found") + return nil, graph.Stack(ctx, clues.Stack(graph.ErrResourceOwnerNotFound, err)) } - delete(userInfo.DiscoveredServices, path.ExchangeService) + if !graph.IsErrExchangeMailFolderNotFound(err) || + clues.HasLabel(err, graph.LabelStatus(http.StatusNotFound)) { + logger.CtxErr(ctx, err).Error("getting user's mail folder") + return nil, err + } + + logger.Ctx(ctx).Info("resource owner does not have a mailbox enabled") + delete(userInfo.ServicesEnabled, path.ExchangeService) + } + + if _, err := c.GetDrives(ctx, userID); err != nil { + if !clues.HasLabel(err, graph.LabelsMysiteNotFound) { + logger.CtxErr(ctx, err).Error("getting user's drives") + + return nil, graph.Wrap(ctx, err, "getting user's drives") + } + + logger.Ctx(ctx).Info("resource owner does not have a drive") + + delete(userInfo.ServicesEnabled, path.OneDriveService) + } + + mbxInfo, err := c.getMailboxSettings(ctx, userID) + if err != nil { + return nil, err + } + + userInfo.Mailbox = mbxInfo + + // TODO: This tries to determine if the user has hit their mailbox + // limit by trying to fetch an item and seeing if we get the quota + // exceeded error. Ideally(if available) we should convert this to + // pull the user's usage via an api and compare if they have used + // up their quota. + if mfs != nil { + mf := mfs.GetValue()[0] // we will always have one + options := &users.ItemMailFoldersItemMessagesDeltaRequestBuilderGetRequestConfiguration{ + QueryParameters: &users.ItemMailFoldersItemMessagesDeltaRequestBuilderGetQueryParameters{ + Top: ptr.To[int32](1), // just one item is enough + }, + } + _, err = c.Stable.Client(). + Users(). + ByUserId(userID). + MailFolders(). + ByMailFolderId(ptr.Val(mf.GetId())). + Messages(). + Delta(). + Get(ctx, options) + + if err != nil && !graph.IsErrQuotaExceeded(err) { + return nil, err + } + + userInfo.Mailbox.QuotaExceeded = graph.IsErrQuotaExceeded(err) } return userInfo, nil } +// TODO: remove when exchange api goes into this package +func (c Users) GetMailFolders( + ctx context.Context, + userID string, + options users.ItemMailFoldersRequestBuilderGetRequestConfiguration, +) (models.MailFolderCollectionResponseable, error) { + mailFolders, err := c.Stable.Client().Users().ByUserId(userID).MailFolders().Get(ctx, &options) + if err != nil { + return nil, graph.Wrap(ctx, err, "getting MailFolders") + } + + return mailFolders, nil +} + +// TODO: remove when drive api goes into this package +func (c Users) GetDrives(ctx context.Context, userID string) (models.DriveCollectionResponseable, error) { + drives, err := c.Stable.Client().Users().ByUserId(userID).Drives().Get(ctx, nil) + if err != nil { + return nil, graph.Wrap(ctx, err, "getting drives") + } + + return drives, nil +} + +func (c Users) getMailboxSettings( + ctx context.Context, + userID string, +) (MailboxInfo, error) { + var ( + rawURL = fmt.Sprintf("https://graph.microsoft.com/v1.0/users/%s/mailboxSettings", userID) + adapter = c.Stable.Adapter() + mi = MailboxInfo{ + ErrGetMailBoxSetting: []error{}, + } + ) + + settings, err := users.NewUserItemRequestBuilder(rawURL, adapter).Get(ctx, nil) + if err != nil && !(graph.IsErrAccessDenied(err) || graph.IsErrExchangeMailFolderNotFound(err)) { + logger.CtxErr(ctx, err).Error("getting mailbox settings") + return mi, graph.Wrap(ctx, err, "getting additional data") + } + + if graph.IsErrAccessDenied(err) { + logger.Ctx(ctx).Info("err getting additional data: access denied") + + mi.ErrGetMailBoxSetting = append(mi.ErrGetMailBoxSetting, clues.New("access denied")) + + return mi, nil + } + + if graph.IsErrExchangeMailFolderNotFound(err) { + logger.Ctx(ctx).Info("mailfolders not found") + + mi.ErrGetMailBoxSetting = append(mi.ErrGetMailBoxSetting, ErrMailBoxSettingsNotFound) + + return mi, nil + } + + additionalData := settings.GetAdditionalData() + + mi.ArchiveFolder, err = toString(ctx, "archiveFolder", additionalData) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + + mi.Timezone, err = toString(ctx, "timeZone", additionalData) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + + mi.DateFormat, err = toString(ctx, "dateFormat", additionalData) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + + mi.TimeFormat, err = toString(ctx, "timeFormat", additionalData) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + + mi.Purpose, err = toString(ctx, "userPurpose", additionalData) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + + mi.DelegateMeetMsgDeliveryOpt, err = toString(ctx, "delegateMeetingMessageDeliveryOptions", additionalData) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + + // decode automatic replies settings + replySetting, err := toT[map[string]any](ctx, "automaticRepliesSetting", additionalData) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + + mi.AutomaticRepliesSetting.Status, err = toString(ctx, "status", replySetting) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + + mi.AutomaticRepliesSetting.ExternalAudience, err = toString(ctx, "externalAudience", replySetting) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + + mi.AutomaticRepliesSetting.ExternalReplyMessage, err = toString(ctx, "externalReplyMessage", replySetting) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + + mi.AutomaticRepliesSetting.InternalReplyMessage, err = toString(ctx, "internalReplyMessage", replySetting) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + + // decode scheduledStartDateTime + startDateTime, err := toT[map[string]any](ctx, "scheduledStartDateTime", replySetting) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + + mi.AutomaticRepliesSetting.ScheduledStartDateTime.DateTime, err = toString(ctx, "dateTime", startDateTime) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + + mi.AutomaticRepliesSetting.ScheduledStartDateTime.Timezone, err = toString(ctx, "timeZone", startDateTime) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + + endDateTime, err := toT[map[string]any](ctx, "scheduledEndDateTime", replySetting) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + + mi.AutomaticRepliesSetting.ScheduledEndDateTime.DateTime, err = toString(ctx, "dateTime", endDateTime) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + + mi.AutomaticRepliesSetting.ScheduledEndDateTime.Timezone, err = toString(ctx, "timeZone", endDateTime) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + + // Language decode + language, err := toT[map[string]any](ctx, "language", additionalData) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + + mi.Language.DisplayName, err = toString(ctx, "displayName", language) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + + mi.Language.Locale, err = toString(ctx, "locale", language) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + + // working hours + workingHours, err := toT[map[string]any](ctx, "workingHours", additionalData) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + + mi.WorkingHours.StartTime, err = toString(ctx, "startTime", workingHours) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + + mi.WorkingHours.EndTime, err = toString(ctx, "endTime", workingHours) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + + timeZone, err := toT[map[string]any](ctx, "timeZone", workingHours) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + + mi.WorkingHours.TimeZone.Name, err = toString(ctx, "name", timeZone) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + + days, err := toT[[]any](ctx, "daysOfWeek", workingHours) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + + for _, day := range days { + s, err := anyToString(ctx, "dayOfTheWeek", day) + mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err) + mi.WorkingHours.DaysOfWeek = append(mi.WorkingHours.DaysOfWeek, s) + } + + return mi, nil +} + +func appendIfErr(errs []error, err error) []error { + if err == nil { + return errs + } + + return append(errs, err) +} + // --------------------------------------------------------------------------- // helpers // --------------------------------------------------------------------------- // validateUser ensures the item is a Userable, and contains the necessary // identifiers that we handle with all users. -// returns the item as a Userable model. -func validateUser(item any) (models.Userable, error) { - m, ok := item.(models.Userable) - if !ok { - return nil, clues.New(fmt.Sprintf("unexpected model: %T", item)) +func validateUser(item models.Userable) error { + if item.GetId() == nil { + return clues.New("missing ID") } - if m.GetId() == nil { - return nil, clues.New("missing ID") + if item.GetUserPrincipalName() == nil { + return clues.New("missing principalName") } - if m.GetUserPrincipalName() == nil { - return nil, clues.New("missing principalName") - } - - return m, nil + return nil +} + +func toString(ctx context.Context, key string, data map[string]any) (string, error) { + ctx = clues.Add(ctx, "setting_name", key) + + if len(data) == 0 { + logger.Ctx(ctx).Info("not found: ", key) + return "", ErrMailBoxSettingsNotFound + } + + return anyToString(ctx, key, data[key]) +} + +func anyToString(ctx context.Context, key string, val any) (string, error) { + if val == nil { + logger.Ctx(ctx).Info("nil value: ", key) + return "", ErrMailBoxSettingsNotFound + } + + sp, ok := val.(*string) + if !ok { + logger.Ctx(ctx).Info("value is not a *string: ", key) + return "", ErrMailBoxSettingsNotFound + } + + return ptr.Val(sp), nil +} + +func toT[T any](ctx context.Context, key string, data map[string]any) (T, error) { + ctx = clues.Add(ctx, "setting_name", key) + + if len(data) == 0 { + logger.Ctx(ctx).Info("not found: ", key) + return *new(T), ErrMailBoxSettingsNotFound + } + + val := data[key] + + if data == nil { + logger.Ctx(ctx).Info("nil value: ", key) + return *new(T), ErrMailBoxSettingsNotFound + } + + value, ok := val.(T) + if !ok { + logger.Ctx(ctx).Info(fmt.Sprintf("unexpected type for %s: %T", key, val)) + return *new(T), ErrMailBoxSettingsNotFound + } + + return value, nil } diff --git a/src/pkg/services/m365/api/users_test.go b/src/pkg/services/m365/api/users_test.go index ebbd75a34..f1c554576 100644 --- a/src/pkg/services/m365/api/users_test.go +++ b/src/pkg/services/m365/api/users_test.go @@ -30,15 +30,9 @@ func (suite *UsersUnitSuite) TestValidateUser() { tests := []struct { name string - args interface{} - want models.Userable + args models.Userable errCheck assert.ErrorAssertionFunc }{ - { - name: "Invalid type", - args: string("invalid type"), - errCheck: assert.Error, - }, { name: "No ID", args: models.NewUser(), @@ -56,7 +50,6 @@ func (suite *UsersUnitSuite) TestValidateUser() { { name: "Valid User", args: user, - want: user, errCheck: assert.NoError, }, } @@ -64,10 +57,8 @@ func (suite *UsersUnitSuite) TestValidateUser() { suite.Run(tt.name, func() { t := suite.T() - got, err := validateUser(tt.args) + err := validateUser(tt.args) tt.errCheck(t, err, clues.ToCore(err)) - - assert.Equal(t, tt.want, got) }) } } diff --git a/src/pkg/services/m365/m365.go b/src/pkg/services/m365/m365.go index 97f724f76..f4851e9ef 100644 --- a/src/pkg/services/m365/m365.go +++ b/src/pkg/services/m365/m365.go @@ -2,17 +2,15 @@ package m365 import ( "context" - "strings" "github.com/alcionai/clues" "github.com/microsoftgraph/msgraph-sdk-go/models" - "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/connector/discovery" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/fault" - "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/services/m365/api" ) @@ -32,10 +30,15 @@ type User struct { PrincipalName string ID string Name string + Info api.UserInfo } -type UserInfo struct { - ServicesEnabled ServiceAccess +// UserNoInfo is the minimal information required to identify and display a user. +// TODO: Remove this once `UsersCompatNoInfo` is removed +type UserNoInfo struct { + PrincipalName string + ID string + Name string } // UsersCompat returns a list of users in the specified M365 tenant. @@ -52,6 +55,54 @@ func UsersCompat(ctx context.Context, acct account.Account) ([]*User, error) { return users, errs.Failure() } +// UsersCompatNoInfo returns a list of users in the specified M365 tenant. +// TODO: Remove this once `Info` is removed from the `User` struct and callers +// have switched over +func UsersCompatNoInfo(ctx context.Context, acct account.Account) ([]*UserNoInfo, error) { + errs := fault.New(true) + + users, err := usersNoInfo(ctx, acct, errs) + if err != nil { + return nil, err + } + + return users, errs.Failure() +} + +// usersNoInfo returns a list of users in the specified M365 tenant - with no info +// TODO: Remove this once we remove `Info` from `Users` and instead rely on the `GetUserInfo` API +// to get user information +func usersNoInfo(ctx context.Context, acct account.Account, errs *fault.Bus) ([]*UserNoInfo, error) { + uapi, err := makeUserAPI(acct) + if err != nil { + return nil, clues.Wrap(err, "getting users").WithClues(ctx) + } + + users, err := discovery.Users(ctx, uapi, errs) + if err != nil { + return nil, err + } + + ret := make([]*UserNoInfo, 0, len(users)) + + for _, u := range users { + pu, err := parseUser(u) + if err != nil { + return nil, clues.Wrap(err, "formatting user data") + } + + puNoInfo := &UserNoInfo{ + PrincipalName: pu.PrincipalName, + ID: pu.ID, + Name: pu.Name, + } + + ret = append(ret, puNoInfo) + } + + return ret, nil +} + // Users returns a list of users in the specified M365 tenant func Users(ctx context.Context, acct account.Account, errs *fault.Bus) ([]*User, error) { uapi, err := makeUserAPI(acct) @@ -72,6 +123,13 @@ func Users(ctx context.Context, acct account.Account, errs *fault.Bus) ([]*User, return nil, clues.Wrap(err, "formatting user data") } + userInfo, err := discovery.GetUserInfo(ctx, acct, pu.ID, errs) + if err != nil { + return nil, clues.Wrap(err, "getting user details") + } + + pu.Info = *userInfo + ret = append(ret, pu) } @@ -94,43 +152,12 @@ func parseUser(item models.Userable) (*User, error) { return u, nil } -// UsersMap retrieves all users in the tenant, and returns two maps: one id-to-principalName, -// and one principalName-to-id. -func UsersMap( - ctx context.Context, - acct account.Account, - errs *fault.Bus, -) (common.IDsNames, error) { - users, err := Users(ctx, acct, errs) - if err != nil { - return common.IDsNames{}, err - } - - var ( - idToName = make(map[string]string, len(users)) - nameToID = make(map[string]string, len(users)) - ) - - for _, u := range users { - id, name := strings.ToLower(u.ID), strings.ToLower(u.PrincipalName) - idToName[id] = name - nameToID[name] = id - } - - ins := common.IDsNames{ - IDToName: idToName, - NameToID: nameToID, - } - - return ins, nil -} - // UserInfo returns the corso-specific set of user metadata. func GetUserInfo( ctx context.Context, acct account.Account, userID string, -) (*UserInfo, error) { +) (*api.UserInfo, error) { uapi, err := makeUserAPI(acct) if err != nil { return nil, clues.Wrap(err, "getting user info").WithClues(ctx) @@ -141,13 +168,7 @@ func GetUserInfo( return nil, err } - info := UserInfo{ - ServicesEnabled: ServiceAccess{ - Exchange: ui.ServiceEnabled(path.ExchangeService), - }, - } - - return &info, nil + return ui, nil } // --------------------------------------------------------------------------- @@ -207,23 +228,19 @@ func SitesMap( ctx context.Context, acct account.Account, errs *fault.Bus, -) (common.IDsNames, error) { +) (idname.Cacher, error) { sites, err := Sites(ctx, acct, errs) if err != nil { - return common.IDsNames{}, err + return idname.NewCache(nil), err } - ins := common.IDsNames{ - IDToName: make(map[string]string, len(sites)), - NameToID: make(map[string]string, len(sites)), - } + itn := make(map[string]string, len(sites)) for _, s := range sites { - ins.IDToName[s.ID] = s.WebURL - ins.NameToID[s.WebURL] = s.ID + itn[s.ID] = s.WebURL } - return ins, nil + return idname.NewCache(itn), nil } // --------------------------------------------------------------------------- diff --git a/src/pkg/services/m365/m365_test.go b/src/pkg/services/m365/m365_test.go index b62b52206..46028ee3f 100644 --- a/src/pkg/services/m365/m365_test.go +++ b/src/pkg/services/m365/m365_test.go @@ -10,6 +10,7 @@ import ( "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/services/m365" ) @@ -39,6 +40,31 @@ func (suite *M365IntegrationSuite) TestUsers() { assert.NoError(t, err, clues.ToCore(err)) assert.NotEmpty(t, users) + for _, u := range users { + suite.Run("user_"+u.ID, func() { + t := suite.T() + + assert.NotEmpty(t, u.ID) + assert.NotEmpty(t, u.PrincipalName) + assert.NotEmpty(t, u.Name) + assert.NotEmpty(t, u.Info) + }) + } +} + +func (suite *M365IntegrationSuite) TestUsersCompat_HasNoInfo() { + ctx, flush := tester.NewContext() + defer flush() + + var ( + t = suite.T() + acct = tester.NewM365Account(suite.T()) + ) + + users, err := m365.UsersCompatNoInfo(ctx, acct) + assert.NoError(t, err, clues.ToCore(err)) + assert.NotEmpty(t, users) + for _, u := range users { suite.Run("user_"+u.ID, func() { t := suite.T() @@ -65,13 +91,15 @@ func (suite *M365IntegrationSuite) TestGetUserInfo() { require.NotNil(t, info) require.NotEmpty(t, info) - expect := &m365.UserInfo{ - ServicesEnabled: m365.ServiceAccess{ - Exchange: true, - }, + expectEnabled := map[path.ServiceType]struct{}{ + path.ExchangeService: {}, + path.OneDriveService: {}, } - assert.Equal(t, expect, info) + assert.NotEmpty(t, info.ServicesEnabled) + assert.NotEmpty(t, info.Mailbox) + assert.Equal(t, expectEnabled, info.ServicesEnabled) + assert.Equal(t, "user", info.Mailbox.Purpose) } func (suite *M365IntegrationSuite) TestSites() { diff --git a/src/pkg/storage/storage.go b/src/pkg/storage/storage.go index e635f9981..19cc9ddc7 100644 --- a/src/pkg/storage/storage.go +++ b/src/pkg/storage/storage.go @@ -36,6 +36,11 @@ const ( type Storage struct { Provider storageProvider Config map[string]string + // TODO: These are AWS S3 specific -> move these out + SessionTags map[string]string + Role string + SessionName string + SessionDuration string } // NewStorage aggregates all the supplied configurations into a single configuration. @@ -48,6 +53,28 @@ func NewStorage(p storageProvider, cfgs ...common.StringConfigurer) (Storage, er }, err } +// NewStorageUsingRole supports specifying an AWS IAM role the storage provider +// should assume. +func NewStorageUsingRole( + p storageProvider, + roleARN string, + sessionName string, + sessionTags map[string]string, + duration string, + cfgs ...common.StringConfigurer, +) (Storage, error) { + cs, err := common.UnionStringConfigs(cfgs...) + + return Storage{ + Provider: p, + Config: cs, + Role: roleARN, + SessionTags: sessionTags, + SessionName: sessionName, + SessionDuration: duration, + }, err +} + // Helper for parsing the values in a config object. // If the value is nil or not a string, returns an empty string. func orEmptyString(v any) string { diff --git a/src/pkg/store/mock/wrapper.go b/src/pkg/store/mock/wrapper.go index 3112fbdff..1802e1053 100644 --- a/src/pkg/store/mock/wrapper.go +++ b/src/pkg/store/mock/wrapper.go @@ -20,6 +20,8 @@ func (bw BackupWrapper) GetBackup( ctx context.Context, backupID model.StableID, ) (*backup.Backup, error) { + bw.Backup.SnapshotID = bw.Backup.ID.String() + return bw.Backup, bw.GetErr } diff --git a/website/Dockerfile b/website/Dockerfile index 1cbcc8913..690b07095 100644 --- a/website/Dockerfile +++ b/website/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:22.04 +FROM ubuntu:22.10 LABEL MAINTAINER="Niraj Tolia" ARG DEBIAN_FRONTEND=noninteractive diff --git a/website/blog/2023-04-24-backup-frequency.md b/website/blog/2023-04-24-backup-frequency.md new file mode 100644 index 000000000..602e1ffbe --- /dev/null +++ b/website/blog/2023-04-24-backup-frequency.md @@ -0,0 +1,128 @@ +--- +slug: how-often-should-you-run-microsoft-365-backups +title: "How often should you run Microsoft 365 backups?" +description: "On the ideal cadence for backups. The ideal frequency of backups should be a business-level decision - what RPO are you aiming for, any technical considerations will probably be secondary." +authors: nica +tags: [corso, microsoft 365, backups, best practices] +date: 2023-04-24 +image: ./images/astro-clock.jpg +--- + +![a closeup of the Prague Astronomical Clock By EWilson (Volunteer) - Own work, CC BY-SA 4.0, https://commons.wikimedia.org/w/index.php?curid=115416822](./images/astro-clock.jpg) + + +I was inspired by some recent conversations with [Corso users on Discord](https://discord.gg/63DTTSnuhT), and +this +[Reddit thread](https://www.reddit.com/r/Office365/comments/127rt5q/what_is_your_backup_schedule/), +to talk about the ideal cadence for backups. + +## Why do we need backups again? + +I know you’re here at the blog for Corso, a Microsoft 365 backup tool, so you +probably don’t need to be sold on the necessity of backups. But just as a +reminder, the +[Microsoft Shared Responsibility Model](https://www.veeam.com/blog/office365-shared-responsibility-model.html), +similar to that of all public cloud providers, means there’s a place where their +responsibility to help you with recovery stops. + +The most common reasons people need a backup (based on the last few months’ discussion among Microsoft 365 admins) are: + +- Malware, ransomware, or a similar attack +- Data lost in migration (for example employee leaving the org or changing roles) +- Accidental deletion + +In all of these scenarios, Microsoft will take zero responsibility for restoring your data. + +### What about the recycle bin? + +If you've been pondering the same question, you're probably already aware that +Microsoft offers a few different recycle bin options, which can prove helpful in +the event of short-term, limited data loss. Even though this solution can +provide limited backup capabilities, it's far from perfect. Data in the recycle bin +gets automatically purged after a few days and malicious users can also force +early deletion of data residing in the recycle bin. + +Further, the recycle bin can't provide the in-depth data control over important +business data that you need. To guarantee complete access and control of +important data, a comprehensive backup and disaster recovery plan is required. +This includes both short-term and long-term retention, and the ability to +recover in bulk, granularly, or from a particular point in time. + +## How frequently should you back up? + +Let’s start by defining your team’s *Recovery Point Objective (RPO).* RPO +generally refers to calculating how much +[data loss](https://www.acronis.com/products/cloud/cyber-protect/data-loss-prevention/) +a company can experience within a period most relevant to its business before +significant harm occurs, from the point of a disruptive event to the last data +backup. + +RPO helps determine how much data a company can tolerate losing during an unforeseen event. + +The ideal frequency of backups should be a business-level decision - what RPO +are you aiming for, any technical considerations will probably be secondary. + +### Shouldn’t you back up continuously? + +There have been a number of expensive backup tools in the past that offer +something like ‘continuous backups,’ where every single file change is reflected +in the backup almost instantly. This is a cool-sounding idea with some +significant drawbacks, namely: + +- Without item versioning and/or preservation of older full backups, this model + drastically increases the chances that your backups will be worthless: if data + is accidentally corrupted, an extremely rapid backup will overwrite good + backed up data with junk almost right away. +- If you want item versioning and extensive retention policies, the cost overheads for super-frequent backups can be prohibitive. + +While backup frequency will vary with each business, it’s generally not the case +that a backup interval of “nearly 0ms” will make sense. + +## Technical Considerations: Microsoft Graph State Tokens + +One of the reasons to back up fairly frequently is the use of Microsoft Graph +state tokens to show what has changed about your data. For example Corso only +captures incremental changes during backups, only needing to store the items +that have been updated or added since the last backup. It does this using +[state tokens](https://learn.microsoft.com/en-us/graph/delta-query-overview#state-tokens) +that it stores within the Microsoft 365 infrastructure to checkpoint the end of +a backup. This token is used by the next backup invocation to see what has +changed, including deletions, within your data. + +The exact expiry of state tokens isn’t published by Microsoft, but our +observations show that if you are only backing up every few days, these tokens +can expire. This will force a new full backup each time which is both +unnecessary and costly (in terms of time and bandwidth but not +storage because of Corso’s deduplication). + +You can therefore reduce data transmission overhead, improve backup performance, and reduce RPO, by backing up more frequently. + +## Cost Considerations: Storage Costs + +With the threat of ransomware and other malicious data corruption, it’s a great +idea to store full backups with some frequency. This means that, if you want to +have frequent backups **with** retention of older versions, you’re going to +need a lot of storage unless your backup system is smart. + +Intelligent tools like Corso will be cheaper than most others. First, it uses object storage which +is orders of magnitude cheaper than reliable block or file storage. +Further Corso not only deduplicates, packs, and compresses data, but it also has +a ton of smarts to only capture incremental changes between backups but always +present them as a full backup (topic for another blog post!). This ensures that +even if you have 1000s of backups per user or SharePoint site, you will always +see fast restores, minimal storage overhead, and always-full backups. + +For other tools, you should evaluate if it uses storage efficiently: + +- Since there’s a per-object storage cost with most S3 tiers, backups should bundle small items together +- Backups should include compression and de-duplication to be as small as possible + +[Take a look at some of our recent writing on selecting the best S3 storage tier](https://corsobackup.io/blog/aws-storage-class/) +(spoiler warning it’s probably Glacier IR) for your S3 backups. + +### You still haven’t answered my question: How often should you back up? + +Independent of whether it's Microsoft 365 or other systems, at least once a +day. Probably about once every 8 hours. It will ensure your backups are +efficient due to incremental data capture and that you don’t lose too much work in the event of an incident. +Higher frequencies will be necessary for higher RPO goals. diff --git a/website/blog/2023-05-12-incrementals-pt1.md b/website/blog/2023-05-12-incrementals-pt1.md new file mode 100644 index 000000000..10305a013 --- /dev/null +++ b/website/blog/2023-05-12-incrementals-pt1.md @@ -0,0 +1,158 @@ +--- +slug: incremental-backups-pt1 +title: "Speeding up Microsoft 365 backups with delta tokens" +description: "Recent additions to Corso have reduced the duration of backups after the +first backup by taking advantage of Microsoft’s delta query API. Doing so allows +Corso to retrieve only the changes to the user’s data since the last backup +instead of having to retrieve all items with the Graph API. However, +implementing backups in this manner required us to play a few tricks with the +Corso implementation, so we thought we’d share them here." +authors: amartinez +tags: [corso, microsoft 365, backups] +date: 2023-05-12 +image: ./images/incremental-encoder.jpg +--- + + +![By © Raimond Spekking / CC BY-SA 4.0 (via Wikimedia Commons), CC BY-SA 4.0, https://commons.wikimedia.org/w/index.php?curid=75914553](./images/incremental-encoder.jpg) + + +Full Microsoft 365 backups can take a long time, especially since Microsoft +throttles the number of requests an application can make in a given window of +time. Recent additions to Corso have reduced the duration of backups after the +first backup by taking advantage of Microsoft’s delta query API. Doing so allows +Corso to retrieve only the changes to the user’s data since the last backup +instead of having to retrieve all items with the Graph API. However, +implementing backups in this manner required us to play a few tricks with the +Corso implementation, so we thought we’d share them here. + + + +## Background + +Before we dive into the details of how incremental backups work, it’s useful to +have some knowledge of how delta queries work in the Microsoft Graph API and how +data is laid out in Corso backups. + +### Microsoft delta queries + +Microsoft provides a delta query API that allows developers to get only the +changes to the endpoint since the last query was made. The API represents the +idea of the “last query” with an opaque token that's returned when the set of +items is done being listed. For example, if a developer wants to get a delta +token for a specific email folder, the developer would first list all the items +in the folder using the delta endpoint. On the final page of item results from +the endpoint, the Graph API would return a token that could be used to retrieve +future updates. + +All returned tokens represent a point in time and are independent from each +other. This means that getting token a1 at time t1, making some changes, and +then getting another token a2 at time t2 would give distinct tokens. Requesting +the changes from token a1 would always give the changes made after time t1 +including those after time t2. Requesting changes from token a2 would give only +the changes made after time t2. Tokens eventually expire though, so waiting a +long time between backups (for example, a few days) may cause all items to be +enumerated again. See Nica's +[previous blog post on how backup frequency](https://corsobackup.io/blog/how-often-should-you-run-microsoft-365-backups) +can affect performance. + +## Corso full backups, incremental backups, and backup layout + +Before we get into the nuts and bolts of how Corso uses the Microsoft delta +query API, it’s important to first define what’s in a backup and the terminology +we’ll be using throughout this post. + +### Kopia + +Corso makes extensive use of [Kopia](https://github.com/kopia/kopia) to +implement our Microsoft 365 backup functionality. Kopia is a fast and secure +open-source backup/restore tool that allows you to create encrypted snapshots of +your data and save the snapshots to remote or cloud storage of your choice. + +### Backup layout + +Internally, a single Corso backup consists of three main parts: a kopia manifest +that Corso uses as the root object of the backup (BackupModel), a kopia index +for Corso, and a kopia data backup. The BackupModel contains summary information +about the status of the backup (did it have errors, how many items did it +backup, etc) and pointers to the two snapshots that contain information. + +The kopia index contains the data output during a +`corso backup details` command and is used to filter the set of restored items +during restore commands. The index contains one entry for every backed up +Microsoft 365 item in the backup. + +The data backup contains the raw bytes that Corso backed up from Microsoft 365. +Internally, Corso uses a file hierarchy in kopia that closely mirrors the layout +of the data in Microsoft 365. For example, if the user has a file in the OneDrive folder +`work/important` then Corso creates a kopia path +`/onedrive//files//root/work/important` for that +file. + +Corso also stores a few extra bits of metadata in the data snapshot to help with +incremental backups. Most importantly, it stores the Graph API’s delta tokens +retrieved during the backup process as well as a mapping relating the current +Microsoft 365 folder IDs to their paths. This information is stored with +different path prefixes (ex. uses `onedriveMetadata` instead of `onedrive`) to +make it straightforward to separate out from backed up item data. + +### Terminology + +*Full backups* are backups where all of the data being backed up is fetched from +Microsoft 365 with the Graph API. These backups may take a long time to complete (we’ve +seen backups of accounts with extremely large amounts of data run for 20+ hours) due to throttling imposed by Microsoft 365. +For the purposes of this blog, *incremental backups* are backups where Corso +fetches only a subset of items from Microsoft 365. Ideally Corso would fetch only the +items that change, though there may be reasons it needs to fetch more data. + +Whether Corso does a full backup or an incremental backup, the resulting Corso +backup has a listing of all items stored in Microsoft 365 (what we refer to as *indexing +information*). This means there’s no “chaining” between backups and restoring an +item from a backup requires only accessing information contained in or +referenced directly by the backup passed in to the restore command. This makes +backups independent from each other once they’ve been created, so we’ll refer to +them as *independent backups* for the rest of this post. + +Both independent backups and chained backups have the same information. Having +independent backups generally creates more complexity when making a new backup +while chained backups generally have more complexity during restore and backup +deletion. Independent backups have more complexity when creating the backup as +indexing information and item data references for deduplicated data may need to +be sourced from previous backups. Chained backups have more complex restore as +multiple backups may need to be searched for the item being restored. They also +have more complex backup deletion as an item’s data can only be deleted if no +backups in any chain refer to it. The figure below gives a high-level overview +of the differences between independent backups and chained backups. + +![an image of an independent backup](./images/independent_backups.png) +*both images below show how data would be stored if the user backed up two files on their first backup and then made a* +*new file and updated file1 before taking a second backup* +![an image of a chained backup](./images/chained_backups.png) +*both images below show how data would be stored if the user backed up two files on their first backup and then made a* +*new file and updated file1 before taking a second backup* + +Although having a full listing of all items present at the time of the backup in +each backups sounds wasteful, Corso takes advantage of the data deduplication +provided by kopia to only store one copy of the underlying bulk data in the data +snapshot for backed up items. What this really means is each Corso backup has a +complete set of *indexing information*. This gives Corso the best of both +worlds; allowing completed backups to have independent indexing information and +life cycles from each other while still minimizing the amount of item data +stored. + +Understanding how Microsoft provides information on item updates is a key part +of Corso's ability to provide fast, high-performance backups that still +accurately reflect all updates. If you have feedback, questions, or want more information, please join us on the [Corso Discord](https://discord.gg/63DTTSnuhT). + +> 💡 In +> [part 2 of our series](2023-05-13-incrementals-pt2.md), +> we’ll cover Incremental backups in action, and how Corso manages state and +> merges updates to the hierarchy. + +--- + +## Try Corso Today + +Corso implements compression, deduplication *and* incremental backups to give +you the best backup performance. Check +[our quickstart guide](http://localhost:3000/docs/quickstart/) to see how to get started. diff --git a/website/blog/2023-05-13-incrementals-pt2.md b/website/blog/2023-05-13-incrementals-pt2.md new file mode 100644 index 000000000..46d7840ea --- /dev/null +++ b/website/blog/2023-05-13-incrementals-pt2.md @@ -0,0 +1,547 @@ +--- +slug: incremental-backups-pt2 +title: "Incremental Microsoft 365 backups in action" +description: "In part 1 we discussed how there’s more than one way to run backups, and how full and incremental backups differ. With all the background information out of the way, it’s time to see how incremental backups actually come together in Corso. To do this, we’ll discuss things in the context of a running example." +authors: amartinez +tags: [corso, microsoft 365, backups] +date: 2023-05-13 +image: ./images/incremental-scale.png +--- + + +![diagram of an incremental scale encoder By Lambtron - Own work, CC BY-SA 4.0, https://commons.wikimedia.org/w/index.php?curid=81494644](./images/incremental-scale.png) + + +In [Part 1](2023-05-12-incrementals-pt1.md) we discussed how there’s more than +one way to run backups, and how full and incremental backups differ. + +With all the background information out of the way, it’s time to see how +incremental backups are implemented in Corso. To do this, we’ll discuss +things in the context of a running example. + +## Part 1: Starting state + +Let’s say that Corso has previously run a backup for a user’s OneDrive. At the +time of the backup, the drive had a layout like the below when listing all items +in the root folder: + +```JSON +- folder1 (directory, ID: 1) + - super secret file1.txt (file, ID: 2) + - temp.log (file, ID: 3) +- folder2 (directory, ID: 4) + - secret file.docx (file, ID: 5) +- folder3 (directory, ID: 6) + - static file.docx (file, ID: 7) +- folder4 (directory, ID: 8) + - plain file.txt (file, ID: 9) + - folder5 (directory, ID: 10) + - update.log (file, ID: 11) +``` + +The corresponding Corso backup would have the following items in the kopia item +data snapshot (some prefix folders elided for brevity and file/folder names use +directly for clarity): + +```JSON +- onedrive + - root + - folder1 + - super secret file1.txt + - temp.log + - folder2 + - secret file.docx + - folder3 + - static file.docx + - folder4 + - plain file.txt + - folder5 + - update.log +- onedriveMetadata + - folderMap.json (provides mapping of folder IDs to paths) + - delta.json (provides delta tokens for endpoints) +``` + +Since this post is all about fetching only the changes since the last backup, +let’s also assume that between the time of the last backup and now the user has +done the equivalent to the following commands in their OneDrive: + +```bash +rm -rf root/folder2 (directory subtree deletion) +mkdir root/folder2 (directory creation) +touch root/folder2/new secret file.txt (file creation) +echo "hello world" >> root/folder1/super secret file1.txt (file update) +rm root/folder1/tmp.log (file deletion) +mv root/folder4 root/folder1 (make directory subdirectory of another) +``` + +After the above commands are run, the layout in OneDrive looks like (again, listing all items in the root folder) + +```JSON +- folder1 (directory, ID: 1) + - super secret file1.txt (file, ID: 2) + - folder4 (directory, ID: 8) + - plain file.txt (file, ID: 9) + - folder5 (directory, ID: 10) + - test.log (file, ID: 11) +- folder2 (directory, ID: 12) + - new secret file.docx (file, ID: 13) +- folder3 (directory, ID: 6) + - static file.docx (file, ID: 7) +``` + +## Part 2: Finding the last backup + +Now, the user runs `corso backup create onedrive` . The first thing Corso will +do is find the most recently completed backup for OneDrive (call it the *base +backup* for this operation) for the user being backed up and load the +`folderMap.json` and `delta.json` files from it. `folderMap.json` contains the +mapping of folder IDs to paths and will help Corso determine how the folder +hierarchy evolved between the backups. `delta.json` contains all delta token(s) +that the previous backup generated. These will be used to fetch only the changes +by sending them to Microsoft’s delta endpoints. + +OneDrive has a single delta endpoint per drive, so the `delta.json` file +contains only a single opaque delta token. The data in `folderMap.json` contains +all folders that were in the previous backup, so it would look like + +```JSON +- ID1: root/folder1 +- ID4: root/folder2 +- ID6: root/folder3 +- ID8: root/folder4 +- ID10: root/folder4/folder5 +``` + +## Part 3: Getting and organizing changes + +With the delta token in hand, Corso can request Microsoft send it changes since +the last backup. Sending the token to the Microsoft endpoint would yield results +like the following: + +```JSON +{ + { + id: 3, + deleted: true, + parentPath: "/root", + }, + { + id: 4, + deleted: true, + type: folder, + parentPath: "/root", + }, + { + id: 5, + deleted: true, + type: file, + parentPath: "/root", + }, + { + id: 1, + name: "folder1", + type: folder, + parentPath: "/root", + }, + { + id: 2, + name: "super secret file1.txt", + type: file, + parentPath: "/root/folder1", + parentID: 1 + }, + { + id: 8, + name: "folder4", + type: folder, + parentPath: "/root/folder1", + parentID: 1 + }, + { + id: 12, + name: "folder2", + type: folder, + parentPath: "/root", + }, + { + id: 13, + name: "new secret file.txt", + type: file, + parentPath: "/root/folder2", + parentID: 12, + }, +} +``` + +Some high-level things to note about the returned results are: + +- deleted OneDrive items don’t show the path they used to reside at but do show if something was a folder or a file +- a new version of “folder2” was created. It has the same name and `parentPath` as the previous version but a different ID +- both folders and files are returned in the same request +- Graph API always returns all returns ancestor folders for an item before the + item itself. Folders are returned in the order of the hierarchy ( + `[root, folder1, ...]`) +- items in a deleted folder are also returned as deleted +- `folder3` and its items isn’t returned because nothing in that subtree was + changed +- moving a folder subtree only returns a result for the root of the subtree + unless other items in the subtree were individually updated (e.x. no result + was returned for `folder5` even though it was moved) + +For every returned item, Corso checks if it’s a folder or a file and reacts accordingly. + +### Handling folders + +Folders have an in-memory representation of a Collection during a backup. This +helps Corso group items together and allows Corso to express hierarchy changes +the folder may have participated in. + +Every Collection has a current and previous path, representing where the folder +is in the hierarchy now and where the folder was in the previous backup. The +current path is sourced from the returned results; it can be generated by +appending the name of an item to the `parentPath` of the item. Folders that have +been deleted have their current path set to an empty value. + +The previous path is found by looking up the folder’s ID in the `folderMap.json` +data from the previous backup. Since that map is indexed by ID, it can still +find folders that have been deleted or folders that have been renamed. Folders +that weren't in the previous backup (created between when the last backup +completed and this backup started) have their previous path set to an empty +value. + +Together, the values of the current and previous paths allow Corso to represent +a set of changes to the hierarchy in an order-independent fashion. That means +the Collections always represent the same set of changes no matter what order +other components of Corso see the Collections in. The “state” of a Collection +can always be determined by comparing the values of the two paths. The table +below shows the different possible states. + +| Previous Path | Current Path | State | +| --- | --- | --- | +| any value | empty | deleted folder | +| empty | any value | new folder | +| different from current | different from previous | folder moved | +| same as current | same as previous | no change | + +To see where this representation and order-independence really comes in handy, +consider the case of `folder2`. Between the backups the original `folder2` and +all it’s items was deleted and a new folder with the same path was created with +a new item. From the user’s perspective and if potential differences in +permissions are ignored, there’s not really a difference between the old and new +`folder2`s. The items they have may be different, but the user generally doesn’t +inspect the Microsoft 365 IDs of folders, so the fact that they reside at the same +location and have the same name makes them the “same” in some sense. + +However, Corso shouldn’t treat the old and new folders as the same as it could +lead to propagating old information instead of new information (e.x. consider +folder permissions, which aren’t discussed here but which users will eventually +be able to backup). To distinguish between the two, Corso will create two +Collections with different states. Let’s say the first Collection created +represents the deleted folder. It will have an empty current path and a previous +path equal to `/root/folder2`. The second Collection will have the opposite: a +current path of `/root/folder2` and an empty previous path. By having two +Collections, Corso can distinguish between the two versions of `folder2` and +take the appropriate action for each. + +### Handling files + +Every file in the results is added to the Collection representing the folder +containing the item. Which Collection to add the item to can be discovered with +the `parentID` field that's set on every item in the result (root’s ID not show +in the example). Fetching the actual item data is done later when Corso actually +uploads with to kopia. + +Since deleted and moved files don’t denote where they used to reside, every item +is also added to a list of item names to “exclude” from the previous backup +later on. Tracking this list ensures Corso doesn’t duplicate items or create +*zombie items:* items that stick around in the backup even after the user +deletes them in Microsoft 365. If the old location of the item is known, then Corso can +just add an entry in the corresponding Collection saying the item was deleted +(this is how Exchange works as it has a delta endpoint per folder). + +### Putting it all together + +At the end of this part, Corso has processed all delta results and created a set +of Collections with items. In the running example we’ve been discussion, Corso +will create the following Collections: + +```JSON +collections: [ + { + currentPath: "", + previousPath: "/root/folder2", + itemIDs: [], + }, + { + currentPath: "/root/folder1", + previousPath: "root/folder1", + itemIDs: [super secret file1.txt (ID: 2)], + }, + { + currentPath: "/root/folder2", + previousPath: "", + itemIDs: [new secret file.txt (ID: 13)], + }, + { + currentPath: "/root/folder1/folder4", + previousPath: "/root/folder4", + itemIDs: [], + }, +] + +excludeItemIDs: [2, 3, 5, 13] +``` + +## Part 4: Merging hierarchies in kopia + +Graph API can give Corso the changes since the last backup, but there’s still a +problem: kopia requires that all items that should appear in a snapshot be given +to kopia at the time the snapshot is created. In essence, kopia won't +automatically merge data from previous snapshots into the current snapshot. This +is problematic because if Corso passed only the set of changed items the Graph +API returned it would create a snapshot representing only those changes and a +lookup in the previous backup would be required to return information about +unchanged items. That would require Corso to implement chained backups instead +of independent backups. + +Corso works around this by *merging* the set of updated Collections with the +folder hierarchy in the base backup. Merging hierarchies is concerned with +getting Collections and folders in the correct locations, and assumes that items +will land in the right place as long as the containing Collection or folder +does. + +Merging hierarchies is done in two steps: first Corso builds an in-memory tree +of the non-deleted Collections created from Graph API results and then Corso +walks the base backup and adds folders from there to the in-memory tree. + +### Collections + +The first step is mostly straightforward, though Corso does keep some addition +metadata to help with deletions, moves, and renames of folders in the backup. + +The in-memory tree Corso creates has a node for every folder in the current path +for each Collection even if there’s no Collection for that folder. Creating a +consistent in-memory layout like this makes it easier to inform kopia of the +complete hierarchy when it comes to actually uploading data. Tree nodes that do +correspond to a Collection contain a reference to the Collection. For example, +the tree node for `root` won’t have a Collection reference because no Collection +was made for it. The tree node for `root/folder1` will have a Collection +reference though as earlier parts made a Collection for it. At the end of the +first step of hierarchy merging, Corso will have the following information +in-memory. + +```JSON +tree representation: +- root (no Collection) + - folder1 (Collection) + - folder4 (Collection) + - folder2 (Collection) + +extra metadata about changed paths, maps from old path +to new where "" means deleted: +{ + "/root/folder2": "", + "/root/folder4": "/root/folder1/folder4", +} +``` + +### Base backup entries + +The next step fills in the “unchanged” information by adding references to base +backup directories to the in-memory tree (I say “unchanged” because it does +include things like unmodified files that got moved when the file’s parent +folder moved). Recall that the base backup had the following layout in kopia: + +```JSON +- onedrive + - root + - folder1 + - super secret file1.txt + - temp.log + - folder2 + - secret file.docx + - folder3 + - static file.docx + - folder4 + - plain file.txt + - folder5 + - update.log +- onedriveMetadata + - folderMap.json (provides mapping of folder IDs to paths) + - delta.json (provides delta tokens for endpoints) +``` + +For the merging step, Corso is interested only in the subtree rooted at +`onedrive/root` as the metadata will be replaced completely with new metadata. +Corso traverses the base backup in a depth-first manner and, for every folder it +will determine the answer to the following questions: + +1. has the folder been explicitly renamed, moved, or deleted? +2. is the folder a descendant of a deleted folder? +3. is the folder a descendant of a folder that was renamed or moved? + +These questions can be answered by getting the path of the folder in the base +backup, dynamically generating the expected path of the folder in the current +backup based on any changes that may have happened to ancestors of the folder, +and checking the metadata Corso set aside in the previous step. + +The first check handles folder renames, moves, and deletions by seeing if +there’s a metadata map entry for the folder’s base backup path. The second and +third checks use dynamically generated expected paths to implement operations +that act on subtrees of the hierarchy instead of individual folders. Moving +`folder4` to be a subfolder of `folder1` (and `folder5` to be a subsubfolder of +`folder1` ) is an example where these subtree operations are needed. + +The order Corso processes these checks is important. For example, reversing the +first and second check will result in mistakenly deleting folders that were +moved prior to deleting the parent of the folder (e.x. +`mv /root/folder4/folder5; rm -rf /root/folder4`) because Corso will see the +folder is a descendent of something that was deleted and think it should be +deleted as well. + +When augmenting the in-memory tree, folders that are marked as deleted don't +have their reference added to a tree node. Folders that were moved or renamed +have their reference added to the tree node where the folder resides in the +currently running backup. + +To give a concrete example of how this would work, let’s look at how Corso would +process `folder4` and `folder5` from the base backup. When Corso reaches +`folder4` in the base backup, it generates the base backup path `/root/folder4` +, an expected path of `/root/folder4` (no ancestors of `folder4` changed), and +checks to see if the base backup path exists in the metadata map. Corso finds +the updated path `/root/folder1/folder4` in the metadata map which represents +the new path `folder4` should reside at. Since the metadata map shows `folder4` +still exists in the currently running backup, a reference to the base backup’s +`folder4` is added to the in-memory tree node for that folder. + +Next, Corso processes the subfolders of `folder4`, in this case `folder5`. At +this point `folder5` has a base backup path of `/root/folder4/folder5` and an +expected path of `/root/folder1/folder4/folder5` . As no entry for +`/root/folder4/folder5` is found in the metadata map, the expected path is used +and a new in-memory tree node for `folder5` is created with a reference to the +base backup’s `folder5`. + +By the end of merging, the in-memory tree that Corso’s building has the layout +shown below. The additional in-memory metadata map that was created in the first +step of merging can be discarded as it’s no longer needed. + +```JSON +tree representation: +- root (no Collection, base backup directory) + - folder1 (Collection, base backup directory) + - folder4 (Collection, base backup directory) + - folder5 (base backup directory) + - folder2 (Collection) + - folder3 (base backup directory) +``` + +## Part 5: Merging items in folders and uploading data + +Once the in-memory tree of the folder hierarchy is created Corso can finally +begin uploading items to S3 with kopia. This is done by starting a kopia +snapshot with the in-memory tree as the “file system” hierarchy to backup. When +each folder in the hierarchy is read by kopia, Corso first passes kopia the set +of items in the Collection for the folder, if a Collection is present. Items +sourced from a Collection also have their IDs tracked by Corso so it can exclude +those items when reading from the base backup’s folder if one a reference is +present. + +Once all the items in the Collection have been given to kopia, Corso begins +giving kopia entries from the base backup folder if there’s one associated with +the in-memory tree node. While doing this, Corso checks each item in the base +backup folder to see if it matches either the exclude list generated in part 3 +or an item that was streamed from the Collection. Filtering out these items has +the effect of “deleting” that copy of them from the current backup. If these +items weren’t filtered out then the Corso backup would either have duplicate +items (if the item was updated or moved between directories) or have *zombie +items* (if the item was deleted). Items from the base backup that are actually +included in the current backup are tracked so Corso can also retrieve their +indexing information in the next part. + +Data for items is pulled from Microsoft 365 using the Graph API when kopia actually needs +to upload the data to S3. By lazily fetching data, Corso avoids making Graph API +requests if kopia already has a copy of that item’s data. It also reduces the +memory footprint of Corso because only a few items’ data needs to be in memory +at any given point in time. + +Going back to our running example, the folder merge operation for `folder1` would proceed as follows: + +```JSON +// starting state +global exclude items: [ + super secret file1.txt (ID: 2), + tmp.log (ID: 3), + secret file.docx (ID: 5), + new secret file.docx (ID 13), +] +local exclude items: [] + +// items from Collection +super secret file1.txt (ID: 2) // uploaded, add to local exclude items + +// new state +global exclude list: [ + super secret file1.txt (ID: 2), + tmp.log (ID: 3), + secret file.docx (ID: 5), + new secret file.docx (ID 13), +] +local exclude items: [super secret file1.txt (ID: 2)] + +// items from base backup +super secret file1.txt (ID: 2) // skipped because it's in local exclude items +temp.log (ID: 3) // skipped because it's in the global exclude items +``` + +By the end of this part, Corso has also collected references for all items +sourced from the base backup. The references that Corso tracks helps it map from +the old location of the item to the new one as shown below. + +```JSON +items from base backup: [ + /root/folder3/static file.docx => /root/folder3/static file.docx + /root/folder4/plain file.txt => /root/folder1/folder4/plain file.txt + /root/folder4/folder5/update.log => /root/folder1/folder4/folder5/update.log +] +``` + +## Part 6: Merging indexing information and persisting the Backup model + +The final thing Corso needs to do is merge the set of updated indexing +information with indexing information from the base backup. Merging the two +allows Corso to filter backup details and restore inputs down to just the +selected items without having to traverse multiple backups to find all the +information. + +To merge indexing information, Corso first loads the indexing information from +the base backup. Then, it compares the entries in the base backup’s index with +the set of references collected in the previous part. Entries that match are +updated if needed (the item’s location in OneDrive may have changed) and +added to the new set of indexing information for the current backup. + +Once the indexing information has been merged all data for the backup is +complete except the BackupModel that acts as the root reference for the backup. +The backup model summarizes what happened in the backup and contains references +to the index information and the item data snapshot so they can be found later. +The model itself is also stored in kopia so it’s data is encrypted as well. Once +the BackupModel is added to kopia the Corso backup is considered complete and +can be used for future restores. + +## The Journey of Incremental Backups + +Incremental backups are a key component of a high-performance backup strategy. +In this piece (and +[in part 1](2023-05-12-incrementals-pt1.md)), we've +covered how we tackled the challenge using the Microsoft Graph API. If you have +feedback, questions, or want more information, please join us on the +[Corso Discord](https://discord.gg/63DTTSnuhT). + +--- + +## Try Corso Today + +Corso implements compression, deduplication *and* incremental backups to give +you the best backup performance. Check +[our quickstart guide](../../docs/quickstart/) to see how +quickly you can get started. diff --git a/website/blog/authors.yml b/website/blog/authors.yml index e9b5149d1..ce622f533 100644 --- a/website/blog/authors.yml +++ b/website/blog/authors.yml @@ -21,3 +21,9 @@ gmatev: title: Head of Product url: https://github.com/gmatev image_url: https://github.com/gmatev.png + +amartinez: + name: Ashlie Martinez + title: Member of Technical Staff + url: https://github.com/ashmrtn + image_url: ./images/ashlie.png diff --git a/website/blog/images/ashlie.png b/website/blog/images/ashlie.png new file mode 100644 index 000000000..a80fa4c1a Binary files /dev/null and b/website/blog/images/ashlie.png differ diff --git a/website/blog/images/astro-clock.jpg b/website/blog/images/astro-clock.jpg new file mode 100644 index 000000000..642d34894 Binary files /dev/null and b/website/blog/images/astro-clock.jpg differ diff --git a/website/blog/images/chained_backups.png b/website/blog/images/chained_backups.png new file mode 100644 index 000000000..0e51e3978 Binary files /dev/null and b/website/blog/images/chained_backups.png differ diff --git a/website/blog/images/incremental-encoder.jpg b/website/blog/images/incremental-encoder.jpg new file mode 100644 index 000000000..59351a7e6 Binary files /dev/null and b/website/blog/images/incremental-encoder.jpg differ diff --git a/website/blog/images/incremental-scale.png b/website/blog/images/incremental-scale.png new file mode 100644 index 000000000..1d6825f08 Binary files /dev/null and b/website/blog/images/incremental-scale.png differ diff --git a/website/blog/images/independent_backups.png b/website/blog/images/independent_backups.png new file mode 100644 index 000000000..e9760228f Binary files /dev/null and b/website/blog/images/independent_backups.png differ diff --git a/website/docs/quickstart.md b/website/docs/quickstart.md index 15e25ebb6..906202070 100644 --- a/website/docs/quickstart.md +++ b/website/docs/quickstart.md @@ -250,7 +250,7 @@ To restore the selected email, use the following command. ```powershell # Restore a selected email - .\corso restore exchange --backup --email + .\corso restore exchange --backup --email ``` @@ -258,7 +258,7 @@ To restore the selected email, use the following command. ```bash # Restore a selected email - ./corso restore exchange --backup --email + ./corso restore exchange --backup --email ``` @@ -268,7 +268,7 @@ To restore the selected email, use the following command. `# Restore a selected email docker run --env-file $HOME/.corso/corso.env \\ --volume $HOME/.corso:/app/corso ghcr.io/alcionai/corso:${Version()} \\ - restore exchange --backup --email ` + restore exchange --backup --email ` } diff --git a/website/docs/setup/configuration.md b/website/docs/setup/configuration.md index d9255f6b7..65c04e99b 100644 --- a/website/docs/setup/configuration.md +++ b/website/docs/setup/configuration.md @@ -132,7 +132,13 @@ directory within the container. Corso generates a unique log file named with its timestamp for every invocation. The default location of Corso's log file is shown below but the location can be overridden by using the `--log-file` flag. The log file will be appended to if multiple Corso invocations are pointed to the same file. + You can also use `stdout` or `stderr` as the `--log-file` location to redirect the logs to "stdout" and "stderr" respectively. +This setting can cause logs to compete with progress bar displays in the terminal. +We suggest using the `--hide-progress` option if you plan to log to stdout or stderr. + +Log entries, by default, include user names and file names. The `--mask-sensitive-data` option can be +used to replace this information with anonymized hashes. diff --git a/website/docs/setup/m365-access.md b/website/docs/setup/m365-access.md index 744ea9435..21e42f3e6 100644 --- a/website/docs/setup/m365-access.md +++ b/website/docs/setup/m365-access.md @@ -57,6 +57,7 @@ then click **Add permissions**. | Mail.ReadWrite | Application | Read and write mail in all mailboxes | | User.Read.All | Application | Read all users' full profiles | | Sites.FullControl.All | Application | Have full control of all site collections | +| MailboxSettings.Read | Application | Read all user mailbox settings | diff --git a/website/docs/support/known-issues.md b/website/docs/support/known-issues.md index 6f90e03cb..d4a508075 100644 --- a/website/docs/support/known-issues.md +++ b/website/docs/support/known-issues.md @@ -20,7 +20,7 @@ Below is a list of known Corso issues and limitations: while a backup is being created will be included in the running backup. Future backups run when the data isn't modified will include the data. -* OneDrive files ending in `.meta` or `.dirmeta` get omitted from Details and Restore commands. - * Exchange Calender Event instance exceptions (changes to a single event within a recurring series) aren't included in backup and restore. + +* SharePoint document library data can't be restored after the library has been deleted. diff --git a/website/package-lock.json b/website/package-lock.json index d41a0ad4c..f2d7c118f 100644 --- a/website/package-lock.json +++ b/website/package-lock.json @@ -8,9 +8,9 @@ "name": "docs", "version": "0.1.0", "dependencies": { - "@docusaurus/core": "2.4.0", + "@docusaurus/core": "2.4.1", "@docusaurus/plugin-google-gtag": "^2.4.0", - "@docusaurus/preset-classic": "2.4.0", + "@docusaurus/preset-classic": "2.4.1", "@loadable/component": "^5.15.3", "@mdx-js/react": "^1.6.22", "animate.css": "^4.1.1", @@ -24,33 +24,33 @@ "prism-react-renderer": "^1.3.5", "react": "^17.0.2", "react-dom": "^17.0.2", - "sass": "^1.62.0", + "sass": "^1.62.1", "tiny-slider": "^2.9.4", "tw-elements": "^1.0.0-alpha13", "wow.js": "^1.2.2" }, "devDependencies": { - "@docusaurus/module-type-aliases": "2.4.0", + "@docusaurus/module-type-aliases": "2.4.1", "@iconify/react": "^4.1.0", "autoprefixer": "^10.4.14", - "postcss": "^8.4.22", - "tailwindcss": "^3.3.1" + "postcss": "^8.4.23", + "tailwindcss": "^3.3.2" } }, "node_modules/@algolia/autocomplete-core": { - "version": "1.7.4", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-core/-/autocomplete-core-1.7.4.tgz", - "integrity": "sha512-daoLpQ3ps/VTMRZDEBfU8ixXd+amZcNJ4QSP3IERGyzqnL5Ch8uSRFt/4G8pUvW9c3o6GA4vtVv4I4lmnkdXyg==", + "version": "1.8.2", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-core/-/autocomplete-core-1.8.2.tgz", + "integrity": "sha512-mTeshsyFhAqw/ebqNsQpMtbnjr+qVOSKXArEj4K0d7sqc8It1XD0gkASwecm9mF/jlOQ4Z9RNg1HbdA8JPdRwQ==", "dependencies": { - "@algolia/autocomplete-shared": "1.7.4" + "@algolia/autocomplete-shared": "1.8.2" } }, "node_modules/@algolia/autocomplete-preset-algolia": { - "version": "1.7.4", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.7.4.tgz", - "integrity": "sha512-s37hrvLEIfcmKY8VU9LsAXgm2yfmkdHT3DnA3SgHaY93yjZ2qL57wzb5QweVkYuEBZkT2PIREvRoLXC2sxTbpQ==", + "version": "1.8.2", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.8.2.tgz", + "integrity": "sha512-J0oTx4me6ZM9kIKPuL3lyU3aB8DEvpVvR6xWmHVROx5rOYJGQcZsdG4ozxwcOyiiu3qxMkIbzntnV1S1VWD8yA==", "dependencies": { - "@algolia/autocomplete-shared": "1.7.4" + "@algolia/autocomplete-shared": "1.8.2" }, "peerDependencies": { "@algolia/client-search": ">= 4.9.1 < 6", @@ -58,79 +58,79 @@ } }, "node_modules/@algolia/autocomplete-shared": { - "version": "1.7.4", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-shared/-/autocomplete-shared-1.7.4.tgz", - "integrity": "sha512-2VGCk7I9tA9Ge73Km99+Qg87w0wzW4tgUruvWAn/gfey1ZXgmxZtyIRBebk35R1O8TbK77wujVtCnpsGpRy1kg==" + "version": "1.8.2", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-shared/-/autocomplete-shared-1.8.2.tgz", + "integrity": "sha512-b6Z/X4MczChMcfhk6kfRmBzPgjoPzuS9KGR4AFsiLulLNRAAqhP+xZTKtMnZGhLuc61I20d5WqlId02AZvcO6g==" }, "node_modules/@algolia/cache-browser-local-storage": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.16.0.tgz", - "integrity": "sha512-jVrk0YB3tjOhD5/lhBtYCVCeLjZmVpf2kdi4puApofytf/R0scjWz0GdozlW4HhU+Prxmt/c9ge4QFjtv5OAzQ==", + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.17.0.tgz", + "integrity": "sha512-myRSRZDIMYB8uCkO+lb40YKiYHi0fjpWRtJpR/dgkaiBlSD0plRyB6lLOh1XIfmMcSeBOqDE7y9m8xZMrXYfyQ==", "dependencies": { - "@algolia/cache-common": "4.16.0" + "@algolia/cache-common": "4.17.0" } }, "node_modules/@algolia/cache-common": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.16.0.tgz", - "integrity": "sha512-4iHjkSYQYw46pITrNQgXXhvUmcekI8INz1m+SzmqLX8jexSSy4Ky4zfGhZzhhhLHXUP3+x/PK/c0qPjxEvRwKQ==" + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.17.0.tgz", + "integrity": "sha512-g8mXzkrcUBIPZaulAuqE7xyHhLAYAcF2xSch7d9dABheybaU3U91LjBX6eJTEB7XVhEsgK4Smi27vWtAJRhIKQ==" }, "node_modules/@algolia/cache-in-memory": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.16.0.tgz", - "integrity": "sha512-p7RYykvA6Ip6QENxrh99nOD77otVh1sJRivcgcVpnjoZb5sIN3t33eUY1DpB9QSBizcrW+qk19rNkdnZ43a+PQ==", + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.17.0.tgz", + "integrity": "sha512-PT32ciC/xI8z919d0oknWVu3kMfTlhQn3MKxDln3pkn+yA7F7xrxSALysxquv+MhFfNAcrtQ/oVvQVBAQSHtdw==", "dependencies": { - "@algolia/cache-common": "4.16.0" + "@algolia/cache-common": "4.17.0" } }, "node_modules/@algolia/client-account": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.16.0.tgz", - "integrity": "sha512-eydcfpdIyuWoKgUSz5iZ/L0wE/Wl7958kACkvTHLDNXvK/b8Z1zypoJavh6/km1ZNQmFpeYS2jrmq0kUSFn02w==", + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.17.0.tgz", + "integrity": "sha512-sSEHx9GA6m7wrlsSMNBGfyzlIfDT2fkz2u7jqfCCd6JEEwmxt8emGmxAU/0qBfbhRSuGvzojoLJlr83BSZAKjA==", "dependencies": { - "@algolia/client-common": "4.16.0", - "@algolia/client-search": "4.16.0", - "@algolia/transporter": "4.16.0" + "@algolia/client-common": "4.17.0", + "@algolia/client-search": "4.17.0", + "@algolia/transporter": "4.17.0" } }, "node_modules/@algolia/client-analytics": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.16.0.tgz", - "integrity": "sha512-cONWXH3BfilgdlCofUm492bJRWtpBLVW/hsUlfoFtiX1u05xoBP7qeiDwh9RR+4pSLHLodYkHAf5U4honQ55Qg==", + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.17.0.tgz", + "integrity": "sha512-84ooP8QA3mQ958hQ9wozk7hFUbAO+81CX1CjAuerxBqjKIInh1fOhXKTaku05O/GHBvcfExpPLIQuSuLYziBXQ==", "dependencies": { - "@algolia/client-common": "4.16.0", - "@algolia/client-search": "4.16.0", - "@algolia/requester-common": "4.16.0", - "@algolia/transporter": "4.16.0" + "@algolia/client-common": "4.17.0", + "@algolia/client-search": "4.17.0", + "@algolia/requester-common": "4.17.0", + "@algolia/transporter": "4.17.0" } }, "node_modules/@algolia/client-common": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.16.0.tgz", - "integrity": "sha512-QVdR4019ukBH6f5lFr27W60trRxQF1SfS1qo0IP6gjsKhXhUVJuHxOCA6ArF87jrNkeuHEoRoDU+GlvaecNo8g==", + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.17.0.tgz", + "integrity": "sha512-jHMks0ZFicf8nRDn6ma8DNNsdwGgP/NKiAAL9z6rS7CymJ7L0+QqTJl3rYxRW7TmBhsUH40wqzmrG6aMIN/DrQ==", "dependencies": { - "@algolia/requester-common": "4.16.0", - "@algolia/transporter": "4.16.0" + "@algolia/requester-common": "4.17.0", + "@algolia/transporter": "4.17.0" } }, "node_modules/@algolia/client-personalization": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/client-personalization/-/client-personalization-4.16.0.tgz", - "integrity": "sha512-irtLafssDGPuhYqIwxqOxiWlVYvrsBD+EMA1P9VJtkKi3vSNBxiWeQ0f0Tn53cUNdSRNEssfoEH84JL97SV2SQ==", + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/client-personalization/-/client-personalization-4.17.0.tgz", + "integrity": "sha512-RMzN4dZLIta1YuwT7QC9o+OeGz2cU6eTOlGNE/6RcUBLOU3l9tkCOdln5dPE2jp8GZXPl2yk54b2nSs1+pAjqw==", "dependencies": { - "@algolia/client-common": "4.16.0", - "@algolia/requester-common": "4.16.0", - "@algolia/transporter": "4.16.0" + "@algolia/client-common": "4.17.0", + "@algolia/requester-common": "4.17.0", + "@algolia/transporter": "4.17.0" } }, "node_modules/@algolia/client-search": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.16.0.tgz", - "integrity": "sha512-xsfrAE1jO/JDh1wFrRz+alVyW+aA6qnkzmbWWWZWEgVF3EaFqzIf9r1l/aDtDdBtNTNhX9H3Lg31+BRtd5izQA==", + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.17.0.tgz", + "integrity": "sha512-x4P2wKrrRIXszT8gb7eWsMHNNHAJs0wE7/uqbufm4tZenAp+hwU/hq5KVsY50v+PfwM0LcDwwn/1DroujsTFoA==", "dependencies": { - "@algolia/client-common": "4.16.0", - "@algolia/requester-common": "4.16.0", - "@algolia/transporter": "4.16.0" + "@algolia/client-common": "4.17.0", + "@algolia/requester-common": "4.17.0", + "@algolia/transporter": "4.17.0" } }, "node_modules/@algolia/events": { @@ -139,47 +139,59 @@ "integrity": "sha512-FQzvOCgoFXAbf5Y6mYozw2aj5KCJoA3m4heImceldzPSMbdyS4atVjJzXKMsfX3wnZTFYwkkt8/z8UesLHlSBQ==" }, "node_modules/@algolia/logger-common": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.16.0.tgz", - "integrity": "sha512-U9H8uCzSDuePJmbnjjTX21aPDRU6x74Tdq3dJmdYu2+pISx02UeBJm4kSgc9RW5jcR5j35G9gnjHY9Q3ngWbyQ==" + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.17.0.tgz", + "integrity": "sha512-DGuoZqpTmIKJFDeyAJ7M8E/LOenIjWiOsg1XJ1OqAU/eofp49JfqXxbfgctlVZVmDABIyOz8LqEoJ6ZP4DTyvw==" }, "node_modules/@algolia/logger-console": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.16.0.tgz", - "integrity": "sha512-+qymusiM+lPZKrkf0tDjCQA158eEJO2IU+Nr/sJ9TFyI/xkFPjNPzw/Qbc8Iy/xcOXGlc6eMgmyjtVQqAWq6UA==", + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.17.0.tgz", + "integrity": "sha512-zMPvugQV/gbXUvWBCzihw6m7oxIKp48w37QBIUu/XqQQfxhjoOE9xyfJr1KldUt5FrYOKZJVsJaEjTsu+bIgQg==", "dependencies": { - "@algolia/logger-common": "4.16.0" + "@algolia/logger-common": "4.17.0" } }, "node_modules/@algolia/requester-browser-xhr": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.16.0.tgz", - "integrity": "sha512-gK+kvs6LHl/PaOJfDuwjkopNbG1djzFLsVBklGBsSU6h6VjFkxIpo6Qq80IK14p9cplYZfhfaL12va6Q9p3KVQ==", + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.17.0.tgz", + "integrity": "sha512-aSOX/smauyTkP21Pf52pJ1O2LmNFJ5iHRIzEeTh0mwBeADO4GdG94cAWDILFA9rNblq/nK3EDh3+UyHHjplZ1A==", "dependencies": { - "@algolia/requester-common": "4.16.0" + "@algolia/requester-common": "4.17.0" } }, "node_modules/@algolia/requester-common": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.16.0.tgz", - "integrity": "sha512-3Zmcs/iMubcm4zqZ3vZG6Zum8t+hMWxGMzo0/uY2BD8o9q5vMxIYI0c4ocdgQjkXcix189WtZNkgjSOBzSbkdw==" + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.17.0.tgz", + "integrity": "sha512-XJjmWFEUlHu0ijvcHBoixuXfEoiRUdyzQM6YwTuB8usJNIgShua8ouFlRWF8iCeag0vZZiUm4S2WCVBPkdxFgg==" }, "node_modules/@algolia/requester-node-http": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.16.0.tgz", - "integrity": "sha512-L8JxM2VwZzh8LJ1Zb8TFS6G3icYsCKZsdWW+ahcEs1rGWmyk9SybsOe1MLnjonGBaqPWJkn9NjS7mRdjEmBtKA==", + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.17.0.tgz", + "integrity": "sha512-bpb/wDA1aC6WxxM8v7TsFspB7yBN3nqCGs2H1OADolQR/hiAIjAxusbuMxVbRFOdaUvAIqioIIkWvZdpYNIn8w==", "dependencies": { - "@algolia/requester-common": "4.16.0" + "@algolia/requester-common": "4.17.0" } }, "node_modules/@algolia/transporter": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.16.0.tgz", - "integrity": "sha512-H9BVB2EAjT65w7XGBNf5drpsW39x2aSZ942j4boSAAJPPlLmjtj5IpAP7UAtsV8g9Beslonh0bLa1XGmE/P0BA==", + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.17.0.tgz", + "integrity": "sha512-6xL6H6fe+Fi0AEP3ziSgC+G04RK37iRb4uUUqVAH9WPYFI8g+LYFq6iv5HS8Cbuc5TTut+Bwj6G+dh/asdb9uA==", "dependencies": { - "@algolia/cache-common": "4.16.0", - "@algolia/logger-common": "4.16.0", - "@algolia/requester-common": "4.16.0" + "@algolia/cache-common": "4.17.0", + "@algolia/logger-common": "4.17.0", + "@algolia/requester-common": "4.17.0" + } + }, + "node_modules/@alloc/quick-lru": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz", + "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/@ampproject/remapping": { @@ -1959,18 +1971,18 @@ } }, "node_modules/@docsearch/css": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/@docsearch/css/-/css-3.3.3.tgz", - "integrity": "sha512-6SCwI7P8ao+se1TUsdZ7B4XzL+gqeQZnBc+2EONZlcVa0dVrk0NjETxozFKgMv0eEGH8QzP1fkN+A1rH61l4eg==" + "version": "3.3.4", + "resolved": "https://registry.npmjs.org/@docsearch/css/-/css-3.3.4.tgz", + "integrity": "sha512-vDwCDoVXDgopw/hvr0zEADew2wWaGP8Qq0Bxhgii1Ewz2t4fQeyJwIRN/mWADeLFYPVkpz8TpEbxya/i6Tm0WA==" }, "node_modules/@docsearch/react": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/@docsearch/react/-/react-3.3.3.tgz", - "integrity": "sha512-pLa0cxnl+G0FuIDuYlW+EBK6Rw2jwLw9B1RHIeS4N4s2VhsfJ/wzeCi3CWcs5yVfxLd5ZK50t//TMA5e79YT7Q==", + "version": "3.3.4", + "resolved": "https://registry.npmjs.org/@docsearch/react/-/react-3.3.4.tgz", + "integrity": "sha512-aeOf1WC5zMzBEi2SI6WWznOmIo9rnpN4p7a3zHXxowVciqlI4HsZGtOR9nFOufLeolv7HibwLlaM0oyUqJxasw==", "dependencies": { - "@algolia/autocomplete-core": "1.7.4", - "@algolia/autocomplete-preset-algolia": "1.7.4", - "@docsearch/css": "3.3.3", + "@algolia/autocomplete-core": "1.8.2", + "@algolia/autocomplete-preset-algolia": "1.8.2", + "@docsearch/css": "3.3.4", "algoliasearch": "^4.0.0" }, "peerDependencies": { @@ -1991,9 +2003,9 @@ } }, "node_modules/@docusaurus/core": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-2.4.0.tgz", - "integrity": "sha512-J55/WEoIpRcLf3afO5POHPguVZosKmJEQWKBL+K7TAnfuE7i+Y0NPLlkKtnWCehagGsgTqClfQEexH/UT4kELA==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-2.4.1.tgz", + "integrity": "sha512-SNsY7PshK3Ri7vtsLXVeAJGS50nJN3RgF836zkyUfAD01Fq+sAk5EwWgLw+nnm5KVNGDu7PRR2kRGDsWvqpo0g==", "dependencies": { "@babel/core": "^7.18.6", "@babel/generator": "^7.18.7", @@ -2005,13 +2017,13 @@ "@babel/runtime": "^7.18.6", "@babel/runtime-corejs3": "^7.18.6", "@babel/traverse": "^7.18.8", - "@docusaurus/cssnano-preset": "2.4.0", - "@docusaurus/logger": "2.4.0", - "@docusaurus/mdx-loader": "2.4.0", + "@docusaurus/cssnano-preset": "2.4.1", + "@docusaurus/logger": "2.4.1", + "@docusaurus/mdx-loader": "2.4.1", "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/utils": "2.4.0", - "@docusaurus/utils-common": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", + "@docusaurus/utils": "2.4.1", + "@docusaurus/utils-common": "2.4.1", + "@docusaurus/utils-validation": "2.4.1", "@slorber/static-site-generator-webpack-plugin": "^4.0.7", "@svgr/webpack": "^6.2.1", "autoprefixer": "^10.4.7", @@ -2149,9 +2161,9 @@ } }, "node_modules/@docusaurus/cssnano-preset": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-2.4.0.tgz", - "integrity": "sha512-RmdiA3IpsLgZGXRzqnmTbGv43W4OD44PCo+6Q/aYjEM2V57vKCVqNzuafE94jv0z/PjHoXUrjr69SaRymBKYYw==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-2.4.1.tgz", + "integrity": "sha512-ka+vqXwtcW1NbXxWsh6yA1Ckii1klY9E53cJ4O9J09nkMBgrNX3iEFED1fWdv8wf4mJjvGi5RLZ2p9hJNjsLyQ==", "dependencies": { "cssnano-preset-advanced": "^5.3.8", "postcss": "^8.4.14", @@ -2163,9 +2175,9 @@ } }, "node_modules/@docusaurus/logger": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-2.4.0.tgz", - "integrity": "sha512-T8+qR4APN+MjcC9yL2Es+xPJ2923S9hpzDmMtdsOcUGLqpCGBbU1vp3AAqDwXtVgFkq+NsEk7sHdVsfLWR/AXw==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-2.4.1.tgz", + "integrity": "sha512-5h5ysIIWYIDHyTVd8BjheZmQZmEgWDR54aQ1BX9pjFfpyzFo5puKXKYrYJXbjEHGyVhEzmB9UXwbxGfaZhOjcg==", "dependencies": { "chalk": "^4.1.2", "tslib": "^2.4.0" @@ -2239,14 +2251,14 @@ } }, "node_modules/@docusaurus/mdx-loader": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-2.4.0.tgz", - "integrity": "sha512-GWoH4izZKOmFoC+gbI2/y8deH/xKLvzz/T5BsEexBye8EHQlwsA7FMrVa48N063bJBH4FUOiRRXxk5rq9cC36g==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-2.4.1.tgz", + "integrity": "sha512-4KhUhEavteIAmbBj7LVFnrVYDiU51H5YWW1zY6SmBSte/YLhDutztLTBE0PQl1Grux1jzUJeaSvAzHpTn6JJDQ==", "dependencies": { "@babel/parser": "^7.18.8", "@babel/traverse": "^7.18.8", - "@docusaurus/logger": "2.4.0", - "@docusaurus/utils": "2.4.0", + "@docusaurus/logger": "2.4.1", + "@docusaurus/utils": "2.4.1", "@mdx-js/mdx": "^1.6.22", "escape-html": "^1.0.3", "file-loader": "^6.2.0", @@ -2270,12 +2282,12 @@ } }, "node_modules/@docusaurus/module-type-aliases": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-2.4.0.tgz", - "integrity": "sha512-YEQO2D3UXs72qCn8Cr+RlycSQXVGN9iEUyuHwTuK4/uL/HFomB2FHSU0vSDM23oLd+X/KibQ3Ez6nGjQLqXcHg==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-2.4.1.tgz", + "integrity": "sha512-gLBuIFM8Dp2XOCWffUDSjtxY7jQgKvYujt7Mx5s4FCTfoL5dN1EVbnrn+O2Wvh8b0a77D57qoIDY7ghgmatR1A==", "dependencies": { "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/types": "2.4.0", + "@docusaurus/types": "2.4.1", "@types/history": "^4.7.11", "@types/react": "*", "@types/react-router-config": "*", @@ -2289,17 +2301,17 @@ } }, "node_modules/@docusaurus/plugin-content-blog": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-2.4.0.tgz", - "integrity": "sha512-YwkAkVUxtxoBAIj/MCb4ohN0SCtHBs4AS75jMhPpf67qf3j+U/4n33cELq7567hwyZ6fMz2GPJcVmctzlGGThQ==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-2.4.1.tgz", + "integrity": "sha512-E2i7Knz5YIbE1XELI6RlTnZnGgS52cUO4BlCiCUCvQHbR+s1xeIWz4C6BtaVnlug0Ccz7nFSksfwDpVlkujg5Q==", "dependencies": { - "@docusaurus/core": "2.4.0", - "@docusaurus/logger": "2.4.0", - "@docusaurus/mdx-loader": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils": "2.4.0", - "@docusaurus/utils-common": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", + "@docusaurus/core": "2.4.1", + "@docusaurus/logger": "2.4.1", + "@docusaurus/mdx-loader": "2.4.1", + "@docusaurus/types": "2.4.1", + "@docusaurus/utils": "2.4.1", + "@docusaurus/utils-common": "2.4.1", + "@docusaurus/utils-validation": "2.4.1", "cheerio": "^1.0.0-rc.12", "feed": "^4.2.2", "fs-extra": "^10.1.0", @@ -2319,17 +2331,17 @@ } }, "node_modules/@docusaurus/plugin-content-docs": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-2.4.0.tgz", - "integrity": "sha512-ic/Z/ZN5Rk/RQo+Io6rUGpToOtNbtPloMR2JcGwC1xT2riMu6zzfSwmBi9tHJgdXH6CB5jG+0dOZZO8QS5tmDg==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-2.4.1.tgz", + "integrity": "sha512-Lo7lSIcpswa2Kv4HEeUcGYqaasMUQNpjTXpV0N8G6jXgZaQurqp7E8NGYeGbDXnb48czmHWbzDL4S3+BbK0VzA==", "dependencies": { - "@docusaurus/core": "2.4.0", - "@docusaurus/logger": "2.4.0", - "@docusaurus/mdx-loader": "2.4.0", - "@docusaurus/module-type-aliases": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", + "@docusaurus/core": "2.4.1", + "@docusaurus/logger": "2.4.1", + "@docusaurus/mdx-loader": "2.4.1", + "@docusaurus/module-type-aliases": "2.4.1", + "@docusaurus/types": "2.4.1", + "@docusaurus/utils": "2.4.1", + "@docusaurus/utils-validation": "2.4.1", "@types/react-router-config": "^5.0.6", "combine-promises": "^1.1.0", "fs-extra": "^10.1.0", @@ -2349,15 +2361,15 @@ } }, "node_modules/@docusaurus/plugin-content-pages": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-2.4.0.tgz", - "integrity": "sha512-Pk2pOeOxk8MeU3mrTU0XLIgP9NZixbdcJmJ7RUFrZp1Aj42nd0RhIT14BGvXXyqb8yTQlk4DmYGAzqOfBsFyGw==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-2.4.1.tgz", + "integrity": "sha512-/UjuH/76KLaUlL+o1OvyORynv6FURzjurSjvn2lbWTFc4tpYY2qLYTlKpTCBVPhlLUQsfyFnshEJDLmPneq2oA==", "dependencies": { - "@docusaurus/core": "2.4.0", - "@docusaurus/mdx-loader": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", + "@docusaurus/core": "2.4.1", + "@docusaurus/mdx-loader": "2.4.1", + "@docusaurus/types": "2.4.1", + "@docusaurus/utils": "2.4.1", + "@docusaurus/utils-validation": "2.4.1", "fs-extra": "^10.1.0", "tslib": "^2.4.0", "webpack": "^5.73.0" @@ -2371,13 +2383,13 @@ } }, "node_modules/@docusaurus/plugin-debug": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-2.4.0.tgz", - "integrity": "sha512-KC56DdYjYT7Txyux71vXHXGYZuP6yYtqwClvYpjKreWIHWus5Zt6VNi23rMZv3/QKhOCrN64zplUbdfQMvddBQ==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-2.4.1.tgz", + "integrity": "sha512-7Yu9UPzRShlrH/G8btOpR0e6INFZr0EegWplMjOqelIwAcx3PKyR8mgPTxGTxcqiYj6hxSCRN0D8R7YrzImwNA==", "dependencies": { - "@docusaurus/core": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils": "2.4.0", + "@docusaurus/core": "2.4.1", + "@docusaurus/types": "2.4.1", + "@docusaurus/utils": "2.4.1", "fs-extra": "^10.1.0", "react-json-view": "^1.21.3", "tslib": "^2.4.0" @@ -2391,13 +2403,13 @@ } }, "node_modules/@docusaurus/plugin-google-analytics": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-2.4.0.tgz", - "integrity": "sha512-uGUzX67DOAIglygdNrmMOvEp8qG03X20jMWadeqVQktS6nADvozpSLGx4J0xbkblhJkUzN21WiilsP9iVP+zkw==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-2.4.1.tgz", + "integrity": "sha512-dyZJdJiCoL+rcfnm0RPkLt/o732HvLiEwmtoNzOoz9MSZz117UH2J6U2vUDtzUzwtFLIf32KkeyzisbwUCgcaQ==", "dependencies": { - "@docusaurus/core": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", + "@docusaurus/core": "2.4.1", + "@docusaurus/types": "2.4.1", + "@docusaurus/utils-validation": "2.4.1", "tslib": "^2.4.0" }, "engines": { @@ -2409,13 +2421,13 @@ } }, "node_modules/@docusaurus/plugin-google-gtag": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-2.4.0.tgz", - "integrity": "sha512-adj/70DANaQs2+TF/nRdMezDXFAV/O/pjAbUgmKBlyOTq5qoMe0Tk4muvQIwWUmiUQxFJe+sKlZGM771ownyOg==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-2.4.1.tgz", + "integrity": "sha512-mKIefK+2kGTQBYvloNEKtDmnRD7bxHLsBcxgnbt4oZwzi2nxCGjPX6+9SQO2KCN5HZbNrYmGo5GJfMgoRvy6uA==", "dependencies": { - "@docusaurus/core": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", + "@docusaurus/core": "2.4.1", + "@docusaurus/types": "2.4.1", + "@docusaurus/utils-validation": "2.4.1", "tslib": "^2.4.0" }, "engines": { @@ -2427,13 +2439,13 @@ } }, "node_modules/@docusaurus/plugin-google-tag-manager": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-2.4.0.tgz", - "integrity": "sha512-E66uGcYs4l7yitmp/8kMEVQftFPwV9iC62ORh47Veqzs6ExwnhzBkJmwDnwIysHBF1vlxnzET0Fl2LfL5fRR3A==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-2.4.1.tgz", + "integrity": "sha512-Zg4Ii9CMOLfpeV2nG74lVTWNtisFaH9QNtEw48R5QE1KIwDBdTVaiSA18G1EujZjrzJJzXN79VhINSbOJO/r3g==", "dependencies": { - "@docusaurus/core": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", + "@docusaurus/core": "2.4.1", + "@docusaurus/types": "2.4.1", + "@docusaurus/utils-validation": "2.4.1", "tslib": "^2.4.0" }, "engines": { @@ -2445,16 +2457,16 @@ } }, "node_modules/@docusaurus/plugin-sitemap": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-2.4.0.tgz", - "integrity": "sha512-pZxh+ygfnI657sN8a/FkYVIAmVv0CGk71QMKqJBOfMmDHNN1FeDeFkBjWP49ejBqpqAhjufkv5UWq3UOu2soCw==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-2.4.1.tgz", + "integrity": "sha512-lZx+ijt/+atQ3FVE8FOHV/+X3kuok688OydDXrqKRJyXBJZKgGjA2Qa8RjQ4f27V2woaXhtnyrdPop/+OjVMRg==", "dependencies": { - "@docusaurus/core": "2.4.0", - "@docusaurus/logger": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils": "2.4.0", - "@docusaurus/utils-common": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", + "@docusaurus/core": "2.4.1", + "@docusaurus/logger": "2.4.1", + "@docusaurus/types": "2.4.1", + "@docusaurus/utils": "2.4.1", + "@docusaurus/utils-common": "2.4.1", + "@docusaurus/utils-validation": "2.4.1", "fs-extra": "^10.1.0", "sitemap": "^7.1.1", "tslib": "^2.4.0" @@ -2468,23 +2480,23 @@ } }, "node_modules/@docusaurus/preset-classic": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-2.4.0.tgz", - "integrity": "sha512-/5z5o/9bc6+P5ool2y01PbJhoGddEGsC0ej1MF6mCoazk8A+kW4feoUd68l7Bnv01rCnG3xy7kHUQP97Y0grUA==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-2.4.1.tgz", + "integrity": "sha512-P4//+I4zDqQJ+UDgoFrjIFaQ1MeS9UD1cvxVQaI6O7iBmiHQm0MGROP1TbE7HlxlDPXFJjZUK3x3cAoK63smGQ==", "dependencies": { - "@docusaurus/core": "2.4.0", - "@docusaurus/plugin-content-blog": "2.4.0", - "@docusaurus/plugin-content-docs": "2.4.0", - "@docusaurus/plugin-content-pages": "2.4.0", - "@docusaurus/plugin-debug": "2.4.0", - "@docusaurus/plugin-google-analytics": "2.4.0", - "@docusaurus/plugin-google-gtag": "2.4.0", - "@docusaurus/plugin-google-tag-manager": "2.4.0", - "@docusaurus/plugin-sitemap": "2.4.0", - "@docusaurus/theme-classic": "2.4.0", - "@docusaurus/theme-common": "2.4.0", - "@docusaurus/theme-search-algolia": "2.4.0", - "@docusaurus/types": "2.4.0" + "@docusaurus/core": "2.4.1", + "@docusaurus/plugin-content-blog": "2.4.1", + "@docusaurus/plugin-content-docs": "2.4.1", + "@docusaurus/plugin-content-pages": "2.4.1", + "@docusaurus/plugin-debug": "2.4.1", + "@docusaurus/plugin-google-analytics": "2.4.1", + "@docusaurus/plugin-google-gtag": "2.4.1", + "@docusaurus/plugin-google-tag-manager": "2.4.1", + "@docusaurus/plugin-sitemap": "2.4.1", + "@docusaurus/theme-classic": "2.4.1", + "@docusaurus/theme-common": "2.4.1", + "@docusaurus/theme-search-algolia": "2.4.1", + "@docusaurus/types": "2.4.1" }, "engines": { "node": ">=16.14" @@ -2508,22 +2520,22 @@ } }, "node_modules/@docusaurus/theme-classic": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-2.4.0.tgz", - "integrity": "sha512-GMDX5WU6Z0OC65eQFgl3iNNEbI9IMJz9f6KnOyuMxNUR6q0qVLsKCNopFUDfFNJ55UU50o7P7o21yVhkwpfJ9w==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-2.4.1.tgz", + "integrity": "sha512-Rz0wKUa+LTW1PLXmwnf8mn85EBzaGSt6qamqtmnh9Hflkc+EqiYMhtUJeLdV+wsgYq4aG0ANc+bpUDpsUhdnwg==", "dependencies": { - "@docusaurus/core": "2.4.0", - "@docusaurus/mdx-loader": "2.4.0", - "@docusaurus/module-type-aliases": "2.4.0", - "@docusaurus/plugin-content-blog": "2.4.0", - "@docusaurus/plugin-content-docs": "2.4.0", - "@docusaurus/plugin-content-pages": "2.4.0", - "@docusaurus/theme-common": "2.4.0", - "@docusaurus/theme-translations": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils": "2.4.0", - "@docusaurus/utils-common": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", + "@docusaurus/core": "2.4.1", + "@docusaurus/mdx-loader": "2.4.1", + "@docusaurus/module-type-aliases": "2.4.1", + "@docusaurus/plugin-content-blog": "2.4.1", + "@docusaurus/plugin-content-docs": "2.4.1", + "@docusaurus/plugin-content-pages": "2.4.1", + "@docusaurus/theme-common": "2.4.1", + "@docusaurus/theme-translations": "2.4.1", + "@docusaurus/types": "2.4.1", + "@docusaurus/utils": "2.4.1", + "@docusaurus/utils-common": "2.4.1", + "@docusaurus/utils-validation": "2.4.1", "@mdx-js/react": "^1.6.22", "clsx": "^1.2.1", "copy-text-to-clipboard": "^3.0.1", @@ -2547,17 +2559,17 @@ } }, "node_modules/@docusaurus/theme-common": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-2.4.0.tgz", - "integrity": "sha512-IkG/l5f/FLY6cBIxtPmFnxpuPzc5TupuqlOx+XDN+035MdQcAh8wHXXZJAkTeYDeZ3anIUSUIvWa7/nRKoQEfg==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-2.4.1.tgz", + "integrity": "sha512-G7Zau1W5rQTaFFB3x3soQoZpkgMbl/SYNG8PfMFIjKa3M3q8n0m/GRf5/H/e5BqOvt8c+ZWIXGCiz+kUCSHovA==", "dependencies": { - "@docusaurus/mdx-loader": "2.4.0", - "@docusaurus/module-type-aliases": "2.4.0", - "@docusaurus/plugin-content-blog": "2.4.0", - "@docusaurus/plugin-content-docs": "2.4.0", - "@docusaurus/plugin-content-pages": "2.4.0", - "@docusaurus/utils": "2.4.0", - "@docusaurus/utils-common": "2.4.0", + "@docusaurus/mdx-loader": "2.4.1", + "@docusaurus/module-type-aliases": "2.4.1", + "@docusaurus/plugin-content-blog": "2.4.1", + "@docusaurus/plugin-content-docs": "2.4.1", + "@docusaurus/plugin-content-pages": "2.4.1", + "@docusaurus/utils": "2.4.1", + "@docusaurus/utils-common": "2.4.1", "@types/history": "^4.7.11", "@types/react": "*", "@types/react-router-config": "*", @@ -2577,18 +2589,18 @@ } }, "node_modules/@docusaurus/theme-search-algolia": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-2.4.0.tgz", - "integrity": "sha512-pPCJSCL1Qt4pu/Z0uxBAuke0yEBbxh0s4fOvimna7TEcBLPq0x06/K78AaABXrTVQM6S0vdocFl9EoNgU17hqA==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-2.4.1.tgz", + "integrity": "sha512-6BcqW2lnLhZCXuMAvPRezFs1DpmEKzXFKlYjruuas+Xy3AQeFzDJKTJFIm49N77WFCTyxff8d3E4Q9pi/+5McQ==", "dependencies": { "@docsearch/react": "^3.1.1", - "@docusaurus/core": "2.4.0", - "@docusaurus/logger": "2.4.0", - "@docusaurus/plugin-content-docs": "2.4.0", - "@docusaurus/theme-common": "2.4.0", - "@docusaurus/theme-translations": "2.4.0", - "@docusaurus/utils": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", + "@docusaurus/core": "2.4.1", + "@docusaurus/logger": "2.4.1", + "@docusaurus/plugin-content-docs": "2.4.1", + "@docusaurus/theme-common": "2.4.1", + "@docusaurus/theme-translations": "2.4.1", + "@docusaurus/utils": "2.4.1", + "@docusaurus/utils-validation": "2.4.1", "algoliasearch": "^4.13.1", "algoliasearch-helper": "^3.10.0", "clsx": "^1.2.1", @@ -2607,9 +2619,9 @@ } }, "node_modules/@docusaurus/theme-translations": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-2.4.0.tgz", - "integrity": "sha512-kEoITnPXzDPUMBHk3+fzEzbopxLD3fR5sDoayNH0vXkpUukA88/aDL1bqkhxWZHA3LOfJ3f0vJbOwmnXW5v85Q==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-2.4.1.tgz", + "integrity": "sha512-T1RAGP+f86CA1kfE8ejZ3T3pUU3XcyvrGMfC/zxCtc2BsnoexuNI9Vk2CmuKCb+Tacvhxjv5unhxXce0+NKyvA==", "dependencies": { "fs-extra": "^10.1.0", "tslib": "^2.4.0" @@ -2619,9 +2631,9 @@ } }, "node_modules/@docusaurus/types": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-2.4.0.tgz", - "integrity": "sha512-xaBXr+KIPDkIaef06c+i2HeTqVNixB7yFut5fBXPGI2f1rrmEV2vLMznNGsFwvZ5XmA3Quuefd4OGRkdo97Dhw==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-2.4.1.tgz", + "integrity": "sha512-0R+cbhpMkhbRXX138UOc/2XZFF8hiZa6ooZAEEJFp5scytzCw4tC1gChMFXrpa3d2tYE6AX8IrOEpSonLmfQuQ==", "dependencies": { "@types/history": "^4.7.11", "@types/react": "*", @@ -2638,11 +2650,11 @@ } }, "node_modules/@docusaurus/utils": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-2.4.0.tgz", - "integrity": "sha512-89hLYkvtRX92j+C+ERYTuSUK6nF9bGM32QThcHPg2EDDHVw6FzYQXmX6/p+pU5SDyyx5nBlE4qXR92RxCAOqfg==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-2.4.1.tgz", + "integrity": "sha512-1lvEZdAQhKNht9aPXPoh69eeKnV0/62ROhQeFKKxmzd0zkcuE/Oc5Gpnt00y/f5bIsmOsYMY7Pqfm/5rteT5GA==", "dependencies": { - "@docusaurus/logger": "2.4.0", + "@docusaurus/logger": "2.4.1", "@svgr/webpack": "^6.2.1", "escape-string-regexp": "^4.0.0", "file-loader": "^6.2.0", @@ -2672,9 +2684,9 @@ } }, "node_modules/@docusaurus/utils-common": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-2.4.0.tgz", - "integrity": "sha512-zIMf10xuKxddYfLg5cS19x44zud/E9I7lj3+0bv8UIs0aahpErfNrGhijEfJpAfikhQ8tL3m35nH3hJ3sOG82A==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-2.4.1.tgz", + "integrity": "sha512-bCVGdZU+z/qVcIiEQdyx0K13OC5mYwxhSuDUR95oFbKVuXYRrTVrwZIqQljuo1fyJvFTKHiL9L9skQOPokuFNQ==", "dependencies": { "tslib": "^2.4.0" }, @@ -2691,12 +2703,12 @@ } }, "node_modules/@docusaurus/utils-validation": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-2.4.0.tgz", - "integrity": "sha512-IrBsBbbAp6y7mZdJx4S4pIA7dUyWSA0GNosPk6ZJ0fX3uYIEQgcQSGIgTeSC+8xPEx3c16o03en1jSDpgQgz/w==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-2.4.1.tgz", + "integrity": "sha512-unII3hlJlDwZ3w8U+pMO3Lx3RhI4YEbY3YNsQj4yzrkZzlpqZOLuAiZK2JyULnD+TKbceKU0WyWkQXtYbLNDFA==", "dependencies": { - "@docusaurus/logger": "2.4.0", - "@docusaurus/utils": "2.4.0", + "@docusaurus/logger": "2.4.1", + "@docusaurus/utils": "2.4.1", "joi": "^17.6.0", "js-yaml": "^4.1.0", "tslib": "^2.4.0" @@ -3903,30 +3915,30 @@ } }, "node_modules/algoliasearch": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.16.0.tgz", - "integrity": "sha512-HAjKJ6bBblaXqO4dYygF4qx251GuJ6zCZt+qbJ+kU7sOC+yc84pawEjVpJByh+cGP2APFCsao2Giz50cDlKNPA==", + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.17.0.tgz", + "integrity": "sha512-JMRh2Mw6sEnVMiz6+APsi7lx9a2jiDFF+WUtANaUVCv6uSU9UOLdo5h9K3pdP6frRRybaM2fX8b1u0nqICS9aA==", "dependencies": { - "@algolia/cache-browser-local-storage": "4.16.0", - "@algolia/cache-common": "4.16.0", - "@algolia/cache-in-memory": "4.16.0", - "@algolia/client-account": "4.16.0", - "@algolia/client-analytics": "4.16.0", - "@algolia/client-common": "4.16.0", - "@algolia/client-personalization": "4.16.0", - "@algolia/client-search": "4.16.0", - "@algolia/logger-common": "4.16.0", - "@algolia/logger-console": "4.16.0", - "@algolia/requester-browser-xhr": "4.16.0", - "@algolia/requester-common": "4.16.0", - "@algolia/requester-node-http": "4.16.0", - "@algolia/transporter": "4.16.0" + "@algolia/cache-browser-local-storage": "4.17.0", + "@algolia/cache-common": "4.17.0", + "@algolia/cache-in-memory": "4.17.0", + "@algolia/client-account": "4.17.0", + "@algolia/client-analytics": "4.17.0", + "@algolia/client-common": "4.17.0", + "@algolia/client-personalization": "4.17.0", + "@algolia/client-search": "4.17.0", + "@algolia/logger-common": "4.17.0", + "@algolia/logger-console": "4.17.0", + "@algolia/requester-browser-xhr": "4.17.0", + "@algolia/requester-common": "4.17.0", + "@algolia/requester-node-http": "4.17.0", + "@algolia/transporter": "4.17.0" } }, "node_modules/algoliasearch-helper": { - "version": "3.12.0", - "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.12.0.tgz", - "integrity": "sha512-/j1U3PEwdan0n6P/QqSnSpNSLC5+cEMvyljd5CnmNmUjDlGrys+vFEOwjVEnqELIiAGMHEA/Nl3CiKVFBUYqyQ==", + "version": "3.13.0", + "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.13.0.tgz", + "integrity": "sha512-kV3c1jMQCvkARtGsSDvAwuht4PAMSsQILqPiH4WFiARoa3jXJ/r1TQoBWAjWyWF48rsNYCv7kzxgB4LTxrvvuw==", "dependencies": { "@algolia/events": "^4.0.1" }, @@ -4762,22 +4774,22 @@ } }, "node_modules/cheerio-select/node_modules/domutils": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.0.1.tgz", - "integrity": "sha512-z08c1l761iKhDFtfXO04C7kTdPBLi41zwOZl00WS8b5eiaebNpY00HKbztwBq+e3vyqWNwWF3mP9YLUeqIrF+Q==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.1.0.tgz", + "integrity": "sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==", "dependencies": { "dom-serializer": "^2.0.0", "domelementtype": "^2.3.0", - "domhandler": "^5.0.1" + "domhandler": "^5.0.3" }, "funding": { "url": "https://github.com/fb55/domutils?sponsor=1" } }, "node_modules/cheerio-select/node_modules/entities": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-4.4.0.tgz", - "integrity": "sha512-oYp7156SP8LkeGD0GF85ad1X9Ai79WtRsZ2gxJqtBuzH+98YUV6jkHEKlZkMbcrjJjIVJNIDP/3WL9wQkoPbWA==", + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", "engines": { "node": ">=0.12" }, @@ -4813,22 +4825,22 @@ } }, "node_modules/cheerio/node_modules/domutils": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.0.1.tgz", - "integrity": "sha512-z08c1l761iKhDFtfXO04C7kTdPBLi41zwOZl00WS8b5eiaebNpY00HKbztwBq+e3vyqWNwWF3mP9YLUeqIrF+Q==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.1.0.tgz", + "integrity": "sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==", "dependencies": { "dom-serializer": "^2.0.0", "domelementtype": "^2.3.0", - "domhandler": "^5.0.1" + "domhandler": "^5.0.3" }, "funding": { "url": "https://github.com/fb55/domutils?sponsor=1" } }, "node_modules/cheerio/node_modules/entities": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-4.4.0.tgz", - "integrity": "sha512-oYp7156SP8LkeGD0GF85ad1X9Ai79WtRsZ2gxJqtBuzH+98YUV6jkHEKlZkMbcrjJjIVJNIDP/3WL9wQkoPbWA==", + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", "engines": { "node": ">=0.12" }, @@ -5391,11 +5403,11 @@ } }, "node_modules/cross-fetch": { - "version": "3.1.5", - "resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-3.1.5.tgz", - "integrity": "sha512-lvb1SBsI0Z7GDwmuid+mU3kWVBwTVUbe7S0H52yaaAdQOXq2YktTCZdlAcNKFzE6QtRz0snpw9bNiPeOIkkQvw==", + "version": "3.1.6", + "resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-3.1.6.tgz", + "integrity": "sha512-riRvo06crlE8HiqOwIpQhxwdOk4fOeR7FVM/wXoxchFEqMNUjvbs3bfo4OTgMEMHzppd4DxFBDbyySj8Cv781g==", "dependencies": { - "node-fetch": "2.6.7" + "node-fetch": "^2.6.11" } }, "node_modules/cross-spawn": { @@ -8411,8 +8423,9 @@ } }, "node_modules/is-core-module": { - "version": "2.9.0", - "license": "MIT", + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.12.0.tgz", + "integrity": "sha512-RECHCBCd/viahWmwj6enj19sKbHfJrddi/6cBDsNTKbNq0f7VeaUkBo60BqzvPqo/W54ChS62Z5qyun7cfOMqQ==", "dependencies": { "has": "^1.0.3" }, @@ -8870,10 +8883,9 @@ } }, "node_modules/lilconfig": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.0.6.tgz", - "integrity": "sha512-9JROoBW7pobfsx+Sq2JsASvCo6Pfo6WWoUW79HuB1BCoBXD4PLWJPqDF6fNj67pqBYTbAHkE57M1kS/+L1neOg==", - "license": "MIT", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz", + "integrity": "sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==", "engines": { "node": ">=10" } @@ -9503,9 +9515,9 @@ } }, "node_modules/node-fetch": { - "version": "2.6.7", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.7.tgz", - "integrity": "sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ==", + "version": "2.6.11", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.11.tgz", + "integrity": "sha512-4I6pdBY1EthSqDmJkiNk3JIT8cswwR9nfeW/cPdUagJYEQG7R95WRH74wpz7ma8Gh/9dI9FP+OU+0E4FvtA55w==", "dependencies": { "whatwg-url": "^5.0.0" }, @@ -9922,9 +9934,9 @@ } }, "node_modules/parse5/node_modules/entities": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-4.4.0.tgz", - "integrity": "sha512-oYp7156SP8LkeGD0GF85ad1X9Ai79WtRsZ2gxJqtBuzH+98YUV6jkHEKlZkMbcrjJjIVJNIDP/3WL9wQkoPbWA==", + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", "engines": { "node": ">=0.12" }, @@ -10130,9 +10142,9 @@ } }, "node_modules/postcss": { - "version": "8.4.22", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.22.tgz", - "integrity": "sha512-XseknLAfRHzVWjCEtdviapiBtfLdgyzExD50Rg2ePaucEesyh8Wv4VPdW0nbyDa1ydbrAxV19jvMT4+LFmcNUA==", + "version": "8.4.23", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.23.tgz", + "integrity": "sha512-bQ3qMcpF6A/YjR55xtoTr0jGOlnPOKAIMdOWiv0EIT6HVPEaJiJB4NLljSbiHoC2RX7DN5Uvjtpbg1NPdwv1oA==", "funding": [ { "type": "opencollective", @@ -10265,9 +10277,9 @@ } }, "node_modules/postcss-import": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-14.1.0.tgz", - "integrity": "sha512-flwI+Vgm4SElObFVPpTIT7SU7R3qk2L7PyduMcokiaVKuWv9d/U+Gm/QAd8NDLuykTWTkcrjOeD2Pp1rMeBTGw==", + "version": "15.1.0", + "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz", + "integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==", "dev": true, "dependencies": { "postcss-value-parser": "^4.0.0", @@ -10275,16 +10287,16 @@ "resolve": "^1.1.7" }, "engines": { - "node": ">=10.0.0" + "node": ">=14.0.0" }, "peerDependencies": { "postcss": "^8.0.0" } }, "node_modules/postcss-js": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.0.0.tgz", - "integrity": "sha512-77QESFBwgX4irogGVPgQ5s07vLvFqWr228qZY+w6lW599cRlK/HmnlivnnVUxkjHnCu4J16PDMHcH+e+2HbvTQ==", + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.0.1.tgz", + "integrity": "sha512-dDLF8pEO191hJMtlHFPRa8xsizHaM82MLfNkUHdUtVEV3tgTp5oj+8qbEqYM57SLfc74KSbw//4SeJma2LRVIw==", "dependencies": { "camelcase-css": "^2.0.1" }, @@ -10296,7 +10308,7 @@ "url": "https://opencollective.com/postcss/" }, "peerDependencies": { - "postcss": "^8.3.3" + "postcss": "^8.4.21" } }, "node_modules/postcss-load-config": { @@ -10521,12 +10533,12 @@ } }, "node_modules/postcss-nested": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.0.0.tgz", - "integrity": "sha512-0DkamqrPcmkBDsLn+vQDIrtkSbNkv5AD/M322ySo9kqFkCIYklym2xEmWkwo+Y3/qZo34tzEPNUw4y7yMCdv5w==", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.0.1.tgz", + "integrity": "sha512-mEp4xPMi5bSWiMbsgoPfcP74lsWLHkQbZc3sY+jWYd65CUwXrUaTp0fmNpa01ZcETKlIgUdFN/MpS2xZtqL9dQ==", "dev": true, "dependencies": { - "postcss-selector-parser": "^6.0.10" + "postcss-selector-parser": "^6.0.11" }, "engines": { "node": ">=12.0" @@ -10746,9 +10758,9 @@ } }, "node_modules/postcss-sort-media-queries": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/postcss-sort-media-queries/-/postcss-sort-media-queries-4.3.0.tgz", - "integrity": "sha512-jAl8gJM2DvuIJiI9sL1CuiHtKM4s5aEIomkU8G3LFvbP+p8i7Sz8VV63uieTgoewGqKbi+hxBTiOKJlB35upCg==", + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/postcss-sort-media-queries/-/postcss-sort-media-queries-4.4.1.tgz", + "integrity": "sha512-QDESFzDDGKgpiIh4GYXsSy6sek2yAwQx1JASl5AxBtU1Lq2JfKBljIPNdil989NcSKRQX1ToiaKphImtBuhXWw==", "dependencies": { "sort-css-media-queries": "2.1.0" }, @@ -11820,11 +11832,11 @@ "license": "MIT" }, "node_modules/resolve": { - "version": "1.22.1", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.1.tgz", - "integrity": "sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==", + "version": "1.22.2", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.2.tgz", + "integrity": "sha512-Sb+mjNHOULsBv818T40qSPeRiuWLyaGMa5ewydRLFimneixmVy2zdivRl+AF6jaYPC8ERxGDmFSiqui6SfPd+g==", "dependencies": { - "is-core-module": "^2.9.0", + "is-core-module": "^2.11.0", "path-parse": "^1.0.7", "supports-preserve-symlinks-flag": "^1.0.0" }, @@ -12023,9 +12035,9 @@ "license": "MIT" }, "node_modules/sass": { - "version": "1.62.0", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.62.0.tgz", - "integrity": "sha512-Q4USplo4pLYgCi+XlipZCWUQz5pkg/ruSSgJ0WRDSb/+3z9tXUOkQ7QPYn4XrhZKYAK4HlpaQecRwKLJX6+DBg==", + "version": "1.62.1", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.62.1.tgz", + "integrity": "sha512-NHpxIzN29MXvWiuswfc1W3I0N8SXBd8UR26WntmDlRYf0bSADnwnOjsyMZ3lMezSlArD33Vs3YFhp7dWvL770A==", "dependencies": { "chokidar": ">=3.0.0 <4.0.0", "immutable": "^4.0.0", @@ -12776,11 +12788,12 @@ "integrity": "sha512-GP6WDNWf+o403jrEp9c5jibKavrtLW+/qYGhFxFrG8maXhwTBI7gLLhiBb0o7uFccWN+EOS9aMO6cGHWAO07OA==" }, "node_modules/sucrase": { - "version": "3.31.0", - "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.31.0.tgz", - "integrity": "sha512-6QsHnkqyVEzYcaiHsOKkzOtOgdJcb8i54x6AV2hDwyZcY9ZyykGZVw6L/YN98xC0evwTP6utsWWrKRaa8QlfEQ==", + "version": "3.32.0", + "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.32.0.tgz", + "integrity": "sha512-ydQOU34rpSyj2TGyz4D2p8rbktIOZ8QY9s+DGLvFU1i5pWJE8vkpruCjGCMHsdXwnD7JDcS+noSwM/a7zyNFDQ==", "dev": true, "dependencies": { + "@jridgewell/gen-mapping": "^0.3.2", "commander": "^4.0.0", "glob": "7.1.6", "lines-and-columns": "^1.1.6", @@ -12796,6 +12809,20 @@ "node": ">=8" } }, + "node_modules/sucrase/node_modules/@jridgewell/gen-mapping": { + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", + "integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==", + "dev": true, + "dependencies": { + "@jridgewell/set-array": "^1.0.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.9" + }, + "engines": { + "node": ">=6.0.0" + } + }, "node_modules/sucrase/node_modules/commander": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", @@ -12884,53 +12911,43 @@ } }, "node_modules/tailwindcss": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.3.1.tgz", - "integrity": "sha512-Vkiouc41d4CEq0ujXl6oiGFQ7bA3WEhUZdTgXAhtKxSy49OmKs8rEfQmupsfF0IGW8fv2iQkp1EVUuapCFrZ9g==", + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.3.2.tgz", + "integrity": "sha512-9jPkMiIBXvPc2KywkraqsUfbfj+dHDb+JPWtSJa9MLFdrPyazI7q6WX2sUrm7R9eVR7qqv3Pas7EvQFzxKnI6w==", "dev": true, "dependencies": { + "@alloc/quick-lru": "^5.2.0", "arg": "^5.0.2", "chokidar": "^3.5.3", - "color-name": "^1.1.4", "didyoumean": "^1.2.2", "dlv": "^1.1.3", "fast-glob": "^3.2.12", "glob-parent": "^6.0.2", "is-glob": "^4.0.3", - "jiti": "^1.17.2", - "lilconfig": "^2.0.6", + "jiti": "^1.18.2", + "lilconfig": "^2.1.0", "micromatch": "^4.0.5", "normalize-path": "^3.0.0", "object-hash": "^3.0.0", "picocolors": "^1.0.0", - "postcss": "^8.0.9", - "postcss-import": "^14.1.0", - "postcss-js": "^4.0.0", - "postcss-load-config": "^3.1.4", - "postcss-nested": "6.0.0", + "postcss": "^8.4.23", + "postcss-import": "^15.1.0", + "postcss-js": "^4.0.1", + "postcss-load-config": "^4.0.1", + "postcss-nested": "^6.0.1", "postcss-selector-parser": "^6.0.11", "postcss-value-parser": "^4.2.0", - "quick-lru": "^5.1.1", - "resolve": "^1.22.1", - "sucrase": "^3.29.0" + "resolve": "^1.22.2", + "sucrase": "^3.32.0" }, "bin": { "tailwind": "lib/cli.js", "tailwindcss": "lib/cli.js" }, "engines": { - "node": ">=12.13.0" - }, - "peerDependencies": { - "postcss": "^8.0.9" + "node": ">=14.0.0" } }, - "node_modules/tailwindcss/node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, "node_modules/tailwindcss/node_modules/glob-parent": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", @@ -12943,6 +12960,44 @@ "node": ">=10.13.0" } }, + "node_modules/tailwindcss/node_modules/postcss-load-config": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-4.0.1.tgz", + "integrity": "sha512-vEJIc8RdiBRu3oRAI0ymerOn+7rPuMvRXslTvZUKZonDHFIczxztIyJ1urxM1x9JXEikvpWWTUUqal5j/8QgvA==", + "dev": true, + "dependencies": { + "lilconfig": "^2.0.5", + "yaml": "^2.1.1" + }, + "engines": { + "node": ">= 14" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + "peerDependencies": { + "postcss": ">=8.0.9", + "ts-node": ">=9.0.0" + }, + "peerDependenciesMeta": { + "postcss": { + "optional": true + }, + "ts-node": { + "optional": true + } + } + }, + "node_modules/tailwindcss/node_modules/yaml": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.2.2.tgz", + "integrity": "sha512-CBKFWExMn46Foo4cldiChEzn7S7SRV+wqiluAb6xmueD/fGyRHIhX8m14vVGgeFWjN540nKCNVj6P21eQjgTuA==", + "dev": true, + "engines": { + "node": ">= 14" + } + }, "node_modules/tapable": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz", @@ -13319,9 +13374,9 @@ } }, "node_modules/ua-parser-js": { - "version": "0.7.34", - "resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-0.7.34.tgz", - "integrity": "sha512-cJMeh/eOILyGu0ejgTKB95yKT3zOenSe9UGE3vj6WfiOwgGYnmATUsnDixMFvdU+rNMvWih83hrUP8VwhF9yXQ==", + "version": "0.7.35", + "resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-0.7.35.tgz", + "integrity": "sha512-veRf7dawaj9xaWEu9HoTVn5Pggtc/qj+kqTOFvNiN1l0YdxwC1kvel57UCjThjGa3BHBihE8/UJAHI+uQHmd/g==", "funding": [ { "type": "opencollective", @@ -14803,95 +14858,95 @@ }, "dependencies": { "@algolia/autocomplete-core": { - "version": "1.7.4", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-core/-/autocomplete-core-1.7.4.tgz", - "integrity": "sha512-daoLpQ3ps/VTMRZDEBfU8ixXd+amZcNJ4QSP3IERGyzqnL5Ch8uSRFt/4G8pUvW9c3o6GA4vtVv4I4lmnkdXyg==", + "version": "1.8.2", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-core/-/autocomplete-core-1.8.2.tgz", + "integrity": "sha512-mTeshsyFhAqw/ebqNsQpMtbnjr+qVOSKXArEj4K0d7sqc8It1XD0gkASwecm9mF/jlOQ4Z9RNg1HbdA8JPdRwQ==", "requires": { - "@algolia/autocomplete-shared": "1.7.4" + "@algolia/autocomplete-shared": "1.8.2" } }, "@algolia/autocomplete-preset-algolia": { - "version": "1.7.4", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.7.4.tgz", - "integrity": "sha512-s37hrvLEIfcmKY8VU9LsAXgm2yfmkdHT3DnA3SgHaY93yjZ2qL57wzb5QweVkYuEBZkT2PIREvRoLXC2sxTbpQ==", + "version": "1.8.2", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.8.2.tgz", + "integrity": "sha512-J0oTx4me6ZM9kIKPuL3lyU3aB8DEvpVvR6xWmHVROx5rOYJGQcZsdG4ozxwcOyiiu3qxMkIbzntnV1S1VWD8yA==", "requires": { - "@algolia/autocomplete-shared": "1.7.4" + "@algolia/autocomplete-shared": "1.8.2" } }, "@algolia/autocomplete-shared": { - "version": "1.7.4", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-shared/-/autocomplete-shared-1.7.4.tgz", - "integrity": "sha512-2VGCk7I9tA9Ge73Km99+Qg87w0wzW4tgUruvWAn/gfey1ZXgmxZtyIRBebk35R1O8TbK77wujVtCnpsGpRy1kg==" + "version": "1.8.2", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-shared/-/autocomplete-shared-1.8.2.tgz", + "integrity": "sha512-b6Z/X4MczChMcfhk6kfRmBzPgjoPzuS9KGR4AFsiLulLNRAAqhP+xZTKtMnZGhLuc61I20d5WqlId02AZvcO6g==" }, "@algolia/cache-browser-local-storage": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.16.0.tgz", - "integrity": "sha512-jVrk0YB3tjOhD5/lhBtYCVCeLjZmVpf2kdi4puApofytf/R0scjWz0GdozlW4HhU+Prxmt/c9ge4QFjtv5OAzQ==", + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.17.0.tgz", + "integrity": "sha512-myRSRZDIMYB8uCkO+lb40YKiYHi0fjpWRtJpR/dgkaiBlSD0plRyB6lLOh1XIfmMcSeBOqDE7y9m8xZMrXYfyQ==", "requires": { - "@algolia/cache-common": "4.16.0" + "@algolia/cache-common": "4.17.0" } }, "@algolia/cache-common": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.16.0.tgz", - "integrity": "sha512-4iHjkSYQYw46pITrNQgXXhvUmcekI8INz1m+SzmqLX8jexSSy4Ky4zfGhZzhhhLHXUP3+x/PK/c0qPjxEvRwKQ==" + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.17.0.tgz", + "integrity": "sha512-g8mXzkrcUBIPZaulAuqE7xyHhLAYAcF2xSch7d9dABheybaU3U91LjBX6eJTEB7XVhEsgK4Smi27vWtAJRhIKQ==" }, "@algolia/cache-in-memory": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.16.0.tgz", - "integrity": "sha512-p7RYykvA6Ip6QENxrh99nOD77otVh1sJRivcgcVpnjoZb5sIN3t33eUY1DpB9QSBizcrW+qk19rNkdnZ43a+PQ==", + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.17.0.tgz", + "integrity": "sha512-PT32ciC/xI8z919d0oknWVu3kMfTlhQn3MKxDln3pkn+yA7F7xrxSALysxquv+MhFfNAcrtQ/oVvQVBAQSHtdw==", "requires": { - "@algolia/cache-common": "4.16.0" + "@algolia/cache-common": "4.17.0" } }, "@algolia/client-account": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.16.0.tgz", - "integrity": "sha512-eydcfpdIyuWoKgUSz5iZ/L0wE/Wl7958kACkvTHLDNXvK/b8Z1zypoJavh6/km1ZNQmFpeYS2jrmq0kUSFn02w==", + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.17.0.tgz", + "integrity": "sha512-sSEHx9GA6m7wrlsSMNBGfyzlIfDT2fkz2u7jqfCCd6JEEwmxt8emGmxAU/0qBfbhRSuGvzojoLJlr83BSZAKjA==", "requires": { - "@algolia/client-common": "4.16.0", - "@algolia/client-search": "4.16.0", - "@algolia/transporter": "4.16.0" + "@algolia/client-common": "4.17.0", + "@algolia/client-search": "4.17.0", + "@algolia/transporter": "4.17.0" } }, "@algolia/client-analytics": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.16.0.tgz", - "integrity": "sha512-cONWXH3BfilgdlCofUm492bJRWtpBLVW/hsUlfoFtiX1u05xoBP7qeiDwh9RR+4pSLHLodYkHAf5U4honQ55Qg==", + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.17.0.tgz", + "integrity": "sha512-84ooP8QA3mQ958hQ9wozk7hFUbAO+81CX1CjAuerxBqjKIInh1fOhXKTaku05O/GHBvcfExpPLIQuSuLYziBXQ==", "requires": { - "@algolia/client-common": "4.16.0", - "@algolia/client-search": "4.16.0", - "@algolia/requester-common": "4.16.0", - "@algolia/transporter": "4.16.0" + "@algolia/client-common": "4.17.0", + "@algolia/client-search": "4.17.0", + "@algolia/requester-common": "4.17.0", + "@algolia/transporter": "4.17.0" } }, "@algolia/client-common": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.16.0.tgz", - "integrity": "sha512-QVdR4019ukBH6f5lFr27W60trRxQF1SfS1qo0IP6gjsKhXhUVJuHxOCA6ArF87jrNkeuHEoRoDU+GlvaecNo8g==", + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.17.0.tgz", + "integrity": "sha512-jHMks0ZFicf8nRDn6ma8DNNsdwGgP/NKiAAL9z6rS7CymJ7L0+QqTJl3rYxRW7TmBhsUH40wqzmrG6aMIN/DrQ==", "requires": { - "@algolia/requester-common": "4.16.0", - "@algolia/transporter": "4.16.0" + "@algolia/requester-common": "4.17.0", + "@algolia/transporter": "4.17.0" } }, "@algolia/client-personalization": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/client-personalization/-/client-personalization-4.16.0.tgz", - "integrity": "sha512-irtLafssDGPuhYqIwxqOxiWlVYvrsBD+EMA1P9VJtkKi3vSNBxiWeQ0f0Tn53cUNdSRNEssfoEH84JL97SV2SQ==", + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/client-personalization/-/client-personalization-4.17.0.tgz", + "integrity": "sha512-RMzN4dZLIta1YuwT7QC9o+OeGz2cU6eTOlGNE/6RcUBLOU3l9tkCOdln5dPE2jp8GZXPl2yk54b2nSs1+pAjqw==", "requires": { - "@algolia/client-common": "4.16.0", - "@algolia/requester-common": "4.16.0", - "@algolia/transporter": "4.16.0" + "@algolia/client-common": "4.17.0", + "@algolia/requester-common": "4.17.0", + "@algolia/transporter": "4.17.0" } }, "@algolia/client-search": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.16.0.tgz", - "integrity": "sha512-xsfrAE1jO/JDh1wFrRz+alVyW+aA6qnkzmbWWWZWEgVF3EaFqzIf9r1l/aDtDdBtNTNhX9H3Lg31+BRtd5izQA==", + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.17.0.tgz", + "integrity": "sha512-x4P2wKrrRIXszT8gb7eWsMHNNHAJs0wE7/uqbufm4tZenAp+hwU/hq5KVsY50v+PfwM0LcDwwn/1DroujsTFoA==", "requires": { - "@algolia/client-common": "4.16.0", - "@algolia/requester-common": "4.16.0", - "@algolia/transporter": "4.16.0" + "@algolia/client-common": "4.17.0", + "@algolia/requester-common": "4.17.0", + "@algolia/transporter": "4.17.0" } }, "@algolia/events": { @@ -14900,49 +14955,55 @@ "integrity": "sha512-FQzvOCgoFXAbf5Y6mYozw2aj5KCJoA3m4heImceldzPSMbdyS4atVjJzXKMsfX3wnZTFYwkkt8/z8UesLHlSBQ==" }, "@algolia/logger-common": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.16.0.tgz", - "integrity": "sha512-U9H8uCzSDuePJmbnjjTX21aPDRU6x74Tdq3dJmdYu2+pISx02UeBJm4kSgc9RW5jcR5j35G9gnjHY9Q3ngWbyQ==" + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.17.0.tgz", + "integrity": "sha512-DGuoZqpTmIKJFDeyAJ7M8E/LOenIjWiOsg1XJ1OqAU/eofp49JfqXxbfgctlVZVmDABIyOz8LqEoJ6ZP4DTyvw==" }, "@algolia/logger-console": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.16.0.tgz", - "integrity": "sha512-+qymusiM+lPZKrkf0tDjCQA158eEJO2IU+Nr/sJ9TFyI/xkFPjNPzw/Qbc8Iy/xcOXGlc6eMgmyjtVQqAWq6UA==", + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.17.0.tgz", + "integrity": "sha512-zMPvugQV/gbXUvWBCzihw6m7oxIKp48w37QBIUu/XqQQfxhjoOE9xyfJr1KldUt5FrYOKZJVsJaEjTsu+bIgQg==", "requires": { - "@algolia/logger-common": "4.16.0" + "@algolia/logger-common": "4.17.0" } }, "@algolia/requester-browser-xhr": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.16.0.tgz", - "integrity": "sha512-gK+kvs6LHl/PaOJfDuwjkopNbG1djzFLsVBklGBsSU6h6VjFkxIpo6Qq80IK14p9cplYZfhfaL12va6Q9p3KVQ==", + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.17.0.tgz", + "integrity": "sha512-aSOX/smauyTkP21Pf52pJ1O2LmNFJ5iHRIzEeTh0mwBeADO4GdG94cAWDILFA9rNblq/nK3EDh3+UyHHjplZ1A==", "requires": { - "@algolia/requester-common": "4.16.0" + "@algolia/requester-common": "4.17.0" } }, "@algolia/requester-common": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.16.0.tgz", - "integrity": "sha512-3Zmcs/iMubcm4zqZ3vZG6Zum8t+hMWxGMzo0/uY2BD8o9q5vMxIYI0c4ocdgQjkXcix189WtZNkgjSOBzSbkdw==" + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.17.0.tgz", + "integrity": "sha512-XJjmWFEUlHu0ijvcHBoixuXfEoiRUdyzQM6YwTuB8usJNIgShua8ouFlRWF8iCeag0vZZiUm4S2WCVBPkdxFgg==" }, "@algolia/requester-node-http": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.16.0.tgz", - "integrity": "sha512-L8JxM2VwZzh8LJ1Zb8TFS6G3icYsCKZsdWW+ahcEs1rGWmyk9SybsOe1MLnjonGBaqPWJkn9NjS7mRdjEmBtKA==", + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.17.0.tgz", + "integrity": "sha512-bpb/wDA1aC6WxxM8v7TsFspB7yBN3nqCGs2H1OADolQR/hiAIjAxusbuMxVbRFOdaUvAIqioIIkWvZdpYNIn8w==", "requires": { - "@algolia/requester-common": "4.16.0" + "@algolia/requester-common": "4.17.0" } }, "@algolia/transporter": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.16.0.tgz", - "integrity": "sha512-H9BVB2EAjT65w7XGBNf5drpsW39x2aSZ942j4boSAAJPPlLmjtj5IpAP7UAtsV8g9Beslonh0bLa1XGmE/P0BA==", + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.17.0.tgz", + "integrity": "sha512-6xL6H6fe+Fi0AEP3ziSgC+G04RK37iRb4uUUqVAH9WPYFI8g+LYFq6iv5HS8Cbuc5TTut+Bwj6G+dh/asdb9uA==", "requires": { - "@algolia/cache-common": "4.16.0", - "@algolia/logger-common": "4.16.0", - "@algolia/requester-common": "4.16.0" + "@algolia/cache-common": "4.17.0", + "@algolia/logger-common": "4.17.0", + "@algolia/requester-common": "4.17.0" } }, + "@alloc/quick-lru": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz", + "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==", + "dev": true + }, "@ampproject/remapping": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.0.tgz", @@ -16035,25 +16096,25 @@ "optional": true }, "@docsearch/css": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/@docsearch/css/-/css-3.3.3.tgz", - "integrity": "sha512-6SCwI7P8ao+se1TUsdZ7B4XzL+gqeQZnBc+2EONZlcVa0dVrk0NjETxozFKgMv0eEGH8QzP1fkN+A1rH61l4eg==" + "version": "3.3.4", + "resolved": "https://registry.npmjs.org/@docsearch/css/-/css-3.3.4.tgz", + "integrity": "sha512-vDwCDoVXDgopw/hvr0zEADew2wWaGP8Qq0Bxhgii1Ewz2t4fQeyJwIRN/mWADeLFYPVkpz8TpEbxya/i6Tm0WA==" }, "@docsearch/react": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/@docsearch/react/-/react-3.3.3.tgz", - "integrity": "sha512-pLa0cxnl+G0FuIDuYlW+EBK6Rw2jwLw9B1RHIeS4N4s2VhsfJ/wzeCi3CWcs5yVfxLd5ZK50t//TMA5e79YT7Q==", + "version": "3.3.4", + "resolved": "https://registry.npmjs.org/@docsearch/react/-/react-3.3.4.tgz", + "integrity": "sha512-aeOf1WC5zMzBEi2SI6WWznOmIo9rnpN4p7a3zHXxowVciqlI4HsZGtOR9nFOufLeolv7HibwLlaM0oyUqJxasw==", "requires": { - "@algolia/autocomplete-core": "1.7.4", - "@algolia/autocomplete-preset-algolia": "1.7.4", - "@docsearch/css": "3.3.3", + "@algolia/autocomplete-core": "1.8.2", + "@algolia/autocomplete-preset-algolia": "1.8.2", + "@docsearch/css": "3.3.4", "algoliasearch": "^4.0.0" } }, "@docusaurus/core": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-2.4.0.tgz", - "integrity": "sha512-J55/WEoIpRcLf3afO5POHPguVZosKmJEQWKBL+K7TAnfuE7i+Y0NPLlkKtnWCehagGsgTqClfQEexH/UT4kELA==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-2.4.1.tgz", + "integrity": "sha512-SNsY7PshK3Ri7vtsLXVeAJGS50nJN3RgF836zkyUfAD01Fq+sAk5EwWgLw+nnm5KVNGDu7PRR2kRGDsWvqpo0g==", "requires": { "@babel/core": "^7.18.6", "@babel/generator": "^7.18.7", @@ -16065,13 +16126,13 @@ "@babel/runtime": "^7.18.6", "@babel/runtime-corejs3": "^7.18.6", "@babel/traverse": "^7.18.8", - "@docusaurus/cssnano-preset": "2.4.0", - "@docusaurus/logger": "2.4.0", - "@docusaurus/mdx-loader": "2.4.0", + "@docusaurus/cssnano-preset": "2.4.1", + "@docusaurus/logger": "2.4.1", + "@docusaurus/mdx-loader": "2.4.1", "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/utils": "2.4.0", - "@docusaurus/utils-common": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", + "@docusaurus/utils": "2.4.1", + "@docusaurus/utils-common": "2.4.1", + "@docusaurus/utils-validation": "2.4.1", "@slorber/static-site-generator-webpack-plugin": "^4.0.7", "@svgr/webpack": "^6.2.1", "autoprefixer": "^10.4.7", @@ -16174,9 +16235,9 @@ } }, "@docusaurus/cssnano-preset": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-2.4.0.tgz", - "integrity": "sha512-RmdiA3IpsLgZGXRzqnmTbGv43W4OD44PCo+6Q/aYjEM2V57vKCVqNzuafE94jv0z/PjHoXUrjr69SaRymBKYYw==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-2.4.1.tgz", + "integrity": "sha512-ka+vqXwtcW1NbXxWsh6yA1Ckii1klY9E53cJ4O9J09nkMBgrNX3iEFED1fWdv8wf4mJjvGi5RLZ2p9hJNjsLyQ==", "requires": { "cssnano-preset-advanced": "^5.3.8", "postcss": "^8.4.14", @@ -16185,9 +16246,9 @@ } }, "@docusaurus/logger": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-2.4.0.tgz", - "integrity": "sha512-T8+qR4APN+MjcC9yL2Es+xPJ2923S9hpzDmMtdsOcUGLqpCGBbU1vp3AAqDwXtVgFkq+NsEk7sHdVsfLWR/AXw==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-2.4.1.tgz", + "integrity": "sha512-5h5ysIIWYIDHyTVd8BjheZmQZmEgWDR54aQ1BX9pjFfpyzFo5puKXKYrYJXbjEHGyVhEzmB9UXwbxGfaZhOjcg==", "requires": { "chalk": "^4.1.2", "tslib": "^2.4.0" @@ -16239,14 +16300,14 @@ } }, "@docusaurus/mdx-loader": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-2.4.0.tgz", - "integrity": "sha512-GWoH4izZKOmFoC+gbI2/y8deH/xKLvzz/T5BsEexBye8EHQlwsA7FMrVa48N063bJBH4FUOiRRXxk5rq9cC36g==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-2.4.1.tgz", + "integrity": "sha512-4KhUhEavteIAmbBj7LVFnrVYDiU51H5YWW1zY6SmBSte/YLhDutztLTBE0PQl1Grux1jzUJeaSvAzHpTn6JJDQ==", "requires": { "@babel/parser": "^7.18.8", "@babel/traverse": "^7.18.8", - "@docusaurus/logger": "2.4.0", - "@docusaurus/utils": "2.4.0", + "@docusaurus/logger": "2.4.1", + "@docusaurus/utils": "2.4.1", "@mdx-js/mdx": "^1.6.22", "escape-html": "^1.0.3", "file-loader": "^6.2.0", @@ -16263,12 +16324,12 @@ } }, "@docusaurus/module-type-aliases": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-2.4.0.tgz", - "integrity": "sha512-YEQO2D3UXs72qCn8Cr+RlycSQXVGN9iEUyuHwTuK4/uL/HFomB2FHSU0vSDM23oLd+X/KibQ3Ez6nGjQLqXcHg==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-2.4.1.tgz", + "integrity": "sha512-gLBuIFM8Dp2XOCWffUDSjtxY7jQgKvYujt7Mx5s4FCTfoL5dN1EVbnrn+O2Wvh8b0a77D57qoIDY7ghgmatR1A==", "requires": { "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/types": "2.4.0", + "@docusaurus/types": "2.4.1", "@types/history": "^4.7.11", "@types/react": "*", "@types/react-router-config": "*", @@ -16278,17 +16339,17 @@ } }, "@docusaurus/plugin-content-blog": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-2.4.0.tgz", - "integrity": "sha512-YwkAkVUxtxoBAIj/MCb4ohN0SCtHBs4AS75jMhPpf67qf3j+U/4n33cELq7567hwyZ6fMz2GPJcVmctzlGGThQ==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-2.4.1.tgz", + "integrity": "sha512-E2i7Knz5YIbE1XELI6RlTnZnGgS52cUO4BlCiCUCvQHbR+s1xeIWz4C6BtaVnlug0Ccz7nFSksfwDpVlkujg5Q==", "requires": { - "@docusaurus/core": "2.4.0", - "@docusaurus/logger": "2.4.0", - "@docusaurus/mdx-loader": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils": "2.4.0", - "@docusaurus/utils-common": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", + "@docusaurus/core": "2.4.1", + "@docusaurus/logger": "2.4.1", + "@docusaurus/mdx-loader": "2.4.1", + "@docusaurus/types": "2.4.1", + "@docusaurus/utils": "2.4.1", + "@docusaurus/utils-common": "2.4.1", + "@docusaurus/utils-validation": "2.4.1", "cheerio": "^1.0.0-rc.12", "feed": "^4.2.2", "fs-extra": "^10.1.0", @@ -16301,17 +16362,17 @@ } }, "@docusaurus/plugin-content-docs": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-2.4.0.tgz", - "integrity": "sha512-ic/Z/ZN5Rk/RQo+Io6rUGpToOtNbtPloMR2JcGwC1xT2riMu6zzfSwmBi9tHJgdXH6CB5jG+0dOZZO8QS5tmDg==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-2.4.1.tgz", + "integrity": "sha512-Lo7lSIcpswa2Kv4HEeUcGYqaasMUQNpjTXpV0N8G6jXgZaQurqp7E8NGYeGbDXnb48czmHWbzDL4S3+BbK0VzA==", "requires": { - "@docusaurus/core": "2.4.0", - "@docusaurus/logger": "2.4.0", - "@docusaurus/mdx-loader": "2.4.0", - "@docusaurus/module-type-aliases": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", + "@docusaurus/core": "2.4.1", + "@docusaurus/logger": "2.4.1", + "@docusaurus/mdx-loader": "2.4.1", + "@docusaurus/module-type-aliases": "2.4.1", + "@docusaurus/types": "2.4.1", + "@docusaurus/utils": "2.4.1", + "@docusaurus/utils-validation": "2.4.1", "@types/react-router-config": "^5.0.6", "combine-promises": "^1.1.0", "fs-extra": "^10.1.0", @@ -16324,100 +16385,100 @@ } }, "@docusaurus/plugin-content-pages": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-2.4.0.tgz", - "integrity": "sha512-Pk2pOeOxk8MeU3mrTU0XLIgP9NZixbdcJmJ7RUFrZp1Aj42nd0RhIT14BGvXXyqb8yTQlk4DmYGAzqOfBsFyGw==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-2.4.1.tgz", + "integrity": "sha512-/UjuH/76KLaUlL+o1OvyORynv6FURzjurSjvn2lbWTFc4tpYY2qLYTlKpTCBVPhlLUQsfyFnshEJDLmPneq2oA==", "requires": { - "@docusaurus/core": "2.4.0", - "@docusaurus/mdx-loader": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", + "@docusaurus/core": "2.4.1", + "@docusaurus/mdx-loader": "2.4.1", + "@docusaurus/types": "2.4.1", + "@docusaurus/utils": "2.4.1", + "@docusaurus/utils-validation": "2.4.1", "fs-extra": "^10.1.0", "tslib": "^2.4.0", "webpack": "^5.73.0" } }, "@docusaurus/plugin-debug": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-2.4.0.tgz", - "integrity": "sha512-KC56DdYjYT7Txyux71vXHXGYZuP6yYtqwClvYpjKreWIHWus5Zt6VNi23rMZv3/QKhOCrN64zplUbdfQMvddBQ==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-2.4.1.tgz", + "integrity": "sha512-7Yu9UPzRShlrH/G8btOpR0e6INFZr0EegWplMjOqelIwAcx3PKyR8mgPTxGTxcqiYj6hxSCRN0D8R7YrzImwNA==", "requires": { - "@docusaurus/core": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils": "2.4.0", + "@docusaurus/core": "2.4.1", + "@docusaurus/types": "2.4.1", + "@docusaurus/utils": "2.4.1", "fs-extra": "^10.1.0", "react-json-view": "^1.21.3", "tslib": "^2.4.0" } }, "@docusaurus/plugin-google-analytics": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-2.4.0.tgz", - "integrity": "sha512-uGUzX67DOAIglygdNrmMOvEp8qG03X20jMWadeqVQktS6nADvozpSLGx4J0xbkblhJkUzN21WiilsP9iVP+zkw==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-2.4.1.tgz", + "integrity": "sha512-dyZJdJiCoL+rcfnm0RPkLt/o732HvLiEwmtoNzOoz9MSZz117UH2J6U2vUDtzUzwtFLIf32KkeyzisbwUCgcaQ==", "requires": { - "@docusaurus/core": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", + "@docusaurus/core": "2.4.1", + "@docusaurus/types": "2.4.1", + "@docusaurus/utils-validation": "2.4.1", "tslib": "^2.4.0" } }, "@docusaurus/plugin-google-gtag": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-2.4.0.tgz", - "integrity": "sha512-adj/70DANaQs2+TF/nRdMezDXFAV/O/pjAbUgmKBlyOTq5qoMe0Tk4muvQIwWUmiUQxFJe+sKlZGM771ownyOg==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-2.4.1.tgz", + "integrity": "sha512-mKIefK+2kGTQBYvloNEKtDmnRD7bxHLsBcxgnbt4oZwzi2nxCGjPX6+9SQO2KCN5HZbNrYmGo5GJfMgoRvy6uA==", "requires": { - "@docusaurus/core": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", + "@docusaurus/core": "2.4.1", + "@docusaurus/types": "2.4.1", + "@docusaurus/utils-validation": "2.4.1", "tslib": "^2.4.0" } }, "@docusaurus/plugin-google-tag-manager": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-2.4.0.tgz", - "integrity": "sha512-E66uGcYs4l7yitmp/8kMEVQftFPwV9iC62ORh47Veqzs6ExwnhzBkJmwDnwIysHBF1vlxnzET0Fl2LfL5fRR3A==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-2.4.1.tgz", + "integrity": "sha512-Zg4Ii9CMOLfpeV2nG74lVTWNtisFaH9QNtEw48R5QE1KIwDBdTVaiSA18G1EujZjrzJJzXN79VhINSbOJO/r3g==", "requires": { - "@docusaurus/core": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", + "@docusaurus/core": "2.4.1", + "@docusaurus/types": "2.4.1", + "@docusaurus/utils-validation": "2.4.1", "tslib": "^2.4.0" } }, "@docusaurus/plugin-sitemap": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-2.4.0.tgz", - "integrity": "sha512-pZxh+ygfnI657sN8a/FkYVIAmVv0CGk71QMKqJBOfMmDHNN1FeDeFkBjWP49ejBqpqAhjufkv5UWq3UOu2soCw==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-2.4.1.tgz", + "integrity": "sha512-lZx+ijt/+atQ3FVE8FOHV/+X3kuok688OydDXrqKRJyXBJZKgGjA2Qa8RjQ4f27V2woaXhtnyrdPop/+OjVMRg==", "requires": { - "@docusaurus/core": "2.4.0", - "@docusaurus/logger": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils": "2.4.0", - "@docusaurus/utils-common": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", + "@docusaurus/core": "2.4.1", + "@docusaurus/logger": "2.4.1", + "@docusaurus/types": "2.4.1", + "@docusaurus/utils": "2.4.1", + "@docusaurus/utils-common": "2.4.1", + "@docusaurus/utils-validation": "2.4.1", "fs-extra": "^10.1.0", "sitemap": "^7.1.1", "tslib": "^2.4.0" } }, "@docusaurus/preset-classic": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-2.4.0.tgz", - "integrity": "sha512-/5z5o/9bc6+P5ool2y01PbJhoGddEGsC0ej1MF6mCoazk8A+kW4feoUd68l7Bnv01rCnG3xy7kHUQP97Y0grUA==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-2.4.1.tgz", + "integrity": "sha512-P4//+I4zDqQJ+UDgoFrjIFaQ1MeS9UD1cvxVQaI6O7iBmiHQm0MGROP1TbE7HlxlDPXFJjZUK3x3cAoK63smGQ==", "requires": { - "@docusaurus/core": "2.4.0", - "@docusaurus/plugin-content-blog": "2.4.0", - "@docusaurus/plugin-content-docs": "2.4.0", - "@docusaurus/plugin-content-pages": "2.4.0", - "@docusaurus/plugin-debug": "2.4.0", - "@docusaurus/plugin-google-analytics": "2.4.0", - "@docusaurus/plugin-google-gtag": "2.4.0", - "@docusaurus/plugin-google-tag-manager": "2.4.0", - "@docusaurus/plugin-sitemap": "2.4.0", - "@docusaurus/theme-classic": "2.4.0", - "@docusaurus/theme-common": "2.4.0", - "@docusaurus/theme-search-algolia": "2.4.0", - "@docusaurus/types": "2.4.0" + "@docusaurus/core": "2.4.1", + "@docusaurus/plugin-content-blog": "2.4.1", + "@docusaurus/plugin-content-docs": "2.4.1", + "@docusaurus/plugin-content-pages": "2.4.1", + "@docusaurus/plugin-debug": "2.4.1", + "@docusaurus/plugin-google-analytics": "2.4.1", + "@docusaurus/plugin-google-gtag": "2.4.1", + "@docusaurus/plugin-google-tag-manager": "2.4.1", + "@docusaurus/plugin-sitemap": "2.4.1", + "@docusaurus/theme-classic": "2.4.1", + "@docusaurus/theme-common": "2.4.1", + "@docusaurus/theme-search-algolia": "2.4.1", + "@docusaurus/types": "2.4.1" } }, "@docusaurus/react-loadable": { @@ -16430,22 +16491,22 @@ } }, "@docusaurus/theme-classic": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-2.4.0.tgz", - "integrity": "sha512-GMDX5WU6Z0OC65eQFgl3iNNEbI9IMJz9f6KnOyuMxNUR6q0qVLsKCNopFUDfFNJ55UU50o7P7o21yVhkwpfJ9w==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-2.4.1.tgz", + "integrity": "sha512-Rz0wKUa+LTW1PLXmwnf8mn85EBzaGSt6qamqtmnh9Hflkc+EqiYMhtUJeLdV+wsgYq4aG0ANc+bpUDpsUhdnwg==", "requires": { - "@docusaurus/core": "2.4.0", - "@docusaurus/mdx-loader": "2.4.0", - "@docusaurus/module-type-aliases": "2.4.0", - "@docusaurus/plugin-content-blog": "2.4.0", - "@docusaurus/plugin-content-docs": "2.4.0", - "@docusaurus/plugin-content-pages": "2.4.0", - "@docusaurus/theme-common": "2.4.0", - "@docusaurus/theme-translations": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils": "2.4.0", - "@docusaurus/utils-common": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", + "@docusaurus/core": "2.4.1", + "@docusaurus/mdx-loader": "2.4.1", + "@docusaurus/module-type-aliases": "2.4.1", + "@docusaurus/plugin-content-blog": "2.4.1", + "@docusaurus/plugin-content-docs": "2.4.1", + "@docusaurus/plugin-content-pages": "2.4.1", + "@docusaurus/theme-common": "2.4.1", + "@docusaurus/theme-translations": "2.4.1", + "@docusaurus/types": "2.4.1", + "@docusaurus/utils": "2.4.1", + "@docusaurus/utils-common": "2.4.1", + "@docusaurus/utils-validation": "2.4.1", "@mdx-js/react": "^1.6.22", "clsx": "^1.2.1", "copy-text-to-clipboard": "^3.0.1", @@ -16462,17 +16523,17 @@ } }, "@docusaurus/theme-common": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-2.4.0.tgz", - "integrity": "sha512-IkG/l5f/FLY6cBIxtPmFnxpuPzc5TupuqlOx+XDN+035MdQcAh8wHXXZJAkTeYDeZ3anIUSUIvWa7/nRKoQEfg==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-2.4.1.tgz", + "integrity": "sha512-G7Zau1W5rQTaFFB3x3soQoZpkgMbl/SYNG8PfMFIjKa3M3q8n0m/GRf5/H/e5BqOvt8c+ZWIXGCiz+kUCSHovA==", "requires": { - "@docusaurus/mdx-loader": "2.4.0", - "@docusaurus/module-type-aliases": "2.4.0", - "@docusaurus/plugin-content-blog": "2.4.0", - "@docusaurus/plugin-content-docs": "2.4.0", - "@docusaurus/plugin-content-pages": "2.4.0", - "@docusaurus/utils": "2.4.0", - "@docusaurus/utils-common": "2.4.0", + "@docusaurus/mdx-loader": "2.4.1", + "@docusaurus/module-type-aliases": "2.4.1", + "@docusaurus/plugin-content-blog": "2.4.1", + "@docusaurus/plugin-content-docs": "2.4.1", + "@docusaurus/plugin-content-pages": "2.4.1", + "@docusaurus/utils": "2.4.1", + "@docusaurus/utils-common": "2.4.1", "@types/history": "^4.7.11", "@types/react": "*", "@types/react-router-config": "*", @@ -16485,18 +16546,18 @@ } }, "@docusaurus/theme-search-algolia": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-2.4.0.tgz", - "integrity": "sha512-pPCJSCL1Qt4pu/Z0uxBAuke0yEBbxh0s4fOvimna7TEcBLPq0x06/K78AaABXrTVQM6S0vdocFl9EoNgU17hqA==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-2.4.1.tgz", + "integrity": "sha512-6BcqW2lnLhZCXuMAvPRezFs1DpmEKzXFKlYjruuas+Xy3AQeFzDJKTJFIm49N77WFCTyxff8d3E4Q9pi/+5McQ==", "requires": { "@docsearch/react": "^3.1.1", - "@docusaurus/core": "2.4.0", - "@docusaurus/logger": "2.4.0", - "@docusaurus/plugin-content-docs": "2.4.0", - "@docusaurus/theme-common": "2.4.0", - "@docusaurus/theme-translations": "2.4.0", - "@docusaurus/utils": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", + "@docusaurus/core": "2.4.1", + "@docusaurus/logger": "2.4.1", + "@docusaurus/plugin-content-docs": "2.4.1", + "@docusaurus/theme-common": "2.4.1", + "@docusaurus/theme-translations": "2.4.1", + "@docusaurus/utils": "2.4.1", + "@docusaurus/utils-validation": "2.4.1", "algoliasearch": "^4.13.1", "algoliasearch-helper": "^3.10.0", "clsx": "^1.2.1", @@ -16508,18 +16569,18 @@ } }, "@docusaurus/theme-translations": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-2.4.0.tgz", - "integrity": "sha512-kEoITnPXzDPUMBHk3+fzEzbopxLD3fR5sDoayNH0vXkpUukA88/aDL1bqkhxWZHA3LOfJ3f0vJbOwmnXW5v85Q==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-2.4.1.tgz", + "integrity": "sha512-T1RAGP+f86CA1kfE8ejZ3T3pUU3XcyvrGMfC/zxCtc2BsnoexuNI9Vk2CmuKCb+Tacvhxjv5unhxXce0+NKyvA==", "requires": { "fs-extra": "^10.1.0", "tslib": "^2.4.0" } }, "@docusaurus/types": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-2.4.0.tgz", - "integrity": "sha512-xaBXr+KIPDkIaef06c+i2HeTqVNixB7yFut5fBXPGI2f1rrmEV2vLMznNGsFwvZ5XmA3Quuefd4OGRkdo97Dhw==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-2.4.1.tgz", + "integrity": "sha512-0R+cbhpMkhbRXX138UOc/2XZFF8hiZa6ooZAEEJFp5scytzCw4tC1gChMFXrpa3d2tYE6AX8IrOEpSonLmfQuQ==", "requires": { "@types/history": "^4.7.11", "@types/react": "*", @@ -16532,11 +16593,11 @@ } }, "@docusaurus/utils": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-2.4.0.tgz", - "integrity": "sha512-89hLYkvtRX92j+C+ERYTuSUK6nF9bGM32QThcHPg2EDDHVw6FzYQXmX6/p+pU5SDyyx5nBlE4qXR92RxCAOqfg==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-2.4.1.tgz", + "integrity": "sha512-1lvEZdAQhKNht9aPXPoh69eeKnV0/62ROhQeFKKxmzd0zkcuE/Oc5Gpnt00y/f5bIsmOsYMY7Pqfm/5rteT5GA==", "requires": { - "@docusaurus/logger": "2.4.0", + "@docusaurus/logger": "2.4.1", "@svgr/webpack": "^6.2.1", "escape-string-regexp": "^4.0.0", "file-loader": "^6.2.0", @@ -16562,20 +16623,20 @@ } }, "@docusaurus/utils-common": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-2.4.0.tgz", - "integrity": "sha512-zIMf10xuKxddYfLg5cS19x44zud/E9I7lj3+0bv8UIs0aahpErfNrGhijEfJpAfikhQ8tL3m35nH3hJ3sOG82A==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-2.4.1.tgz", + "integrity": "sha512-bCVGdZU+z/qVcIiEQdyx0K13OC5mYwxhSuDUR95oFbKVuXYRrTVrwZIqQljuo1fyJvFTKHiL9L9skQOPokuFNQ==", "requires": { "tslib": "^2.4.0" } }, "@docusaurus/utils-validation": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-2.4.0.tgz", - "integrity": "sha512-IrBsBbbAp6y7mZdJx4S4pIA7dUyWSA0GNosPk6ZJ0fX3uYIEQgcQSGIgTeSC+8xPEx3c16o03en1jSDpgQgz/w==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-2.4.1.tgz", + "integrity": "sha512-unII3hlJlDwZ3w8U+pMO3Lx3RhI4YEbY3YNsQj4yzrkZzlpqZOLuAiZK2JyULnD+TKbceKU0WyWkQXtYbLNDFA==", "requires": { - "@docusaurus/logger": "2.4.0", - "@docusaurus/utils": "2.4.0", + "@docusaurus/logger": "2.4.1", + "@docusaurus/utils": "2.4.1", "joi": "^17.6.0", "js-yaml": "^4.1.0", "tslib": "^2.4.0" @@ -17420,30 +17481,30 @@ "requires": {} }, "algoliasearch": { - "version": "4.16.0", - "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.16.0.tgz", - "integrity": "sha512-HAjKJ6bBblaXqO4dYygF4qx251GuJ6zCZt+qbJ+kU7sOC+yc84pawEjVpJByh+cGP2APFCsao2Giz50cDlKNPA==", + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.17.0.tgz", + "integrity": "sha512-JMRh2Mw6sEnVMiz6+APsi7lx9a2jiDFF+WUtANaUVCv6uSU9UOLdo5h9K3pdP6frRRybaM2fX8b1u0nqICS9aA==", "requires": { - "@algolia/cache-browser-local-storage": "4.16.0", - "@algolia/cache-common": "4.16.0", - "@algolia/cache-in-memory": "4.16.0", - "@algolia/client-account": "4.16.0", - "@algolia/client-analytics": "4.16.0", - "@algolia/client-common": "4.16.0", - "@algolia/client-personalization": "4.16.0", - "@algolia/client-search": "4.16.0", - "@algolia/logger-common": "4.16.0", - "@algolia/logger-console": "4.16.0", - "@algolia/requester-browser-xhr": "4.16.0", - "@algolia/requester-common": "4.16.0", - "@algolia/requester-node-http": "4.16.0", - "@algolia/transporter": "4.16.0" + "@algolia/cache-browser-local-storage": "4.17.0", + "@algolia/cache-common": "4.17.0", + "@algolia/cache-in-memory": "4.17.0", + "@algolia/client-account": "4.17.0", + "@algolia/client-analytics": "4.17.0", + "@algolia/client-common": "4.17.0", + "@algolia/client-personalization": "4.17.0", + "@algolia/client-search": "4.17.0", + "@algolia/logger-common": "4.17.0", + "@algolia/logger-console": "4.17.0", + "@algolia/requester-browser-xhr": "4.17.0", + "@algolia/requester-common": "4.17.0", + "@algolia/requester-node-http": "4.17.0", + "@algolia/transporter": "4.17.0" } }, "algoliasearch-helper": { - "version": "3.12.0", - "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.12.0.tgz", - "integrity": "sha512-/j1U3PEwdan0n6P/QqSnSpNSLC5+cEMvyljd5CnmNmUjDlGrys+vFEOwjVEnqELIiAGMHEA/Nl3CiKVFBUYqyQ==", + "version": "3.13.0", + "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.13.0.tgz", + "integrity": "sha512-kV3c1jMQCvkARtGsSDvAwuht4PAMSsQILqPiH4WFiARoa3jXJ/r1TQoBWAjWyWF48rsNYCv7kzxgB4LTxrvvuw==", "requires": { "@algolia/events": "^4.0.1" } @@ -17995,19 +18056,19 @@ } }, "domutils": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.0.1.tgz", - "integrity": "sha512-z08c1l761iKhDFtfXO04C7kTdPBLi41zwOZl00WS8b5eiaebNpY00HKbztwBq+e3vyqWNwWF3mP9YLUeqIrF+Q==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.1.0.tgz", + "integrity": "sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==", "requires": { "dom-serializer": "^2.0.0", "domelementtype": "^2.3.0", - "domhandler": "^5.0.1" + "domhandler": "^5.0.3" } }, "entities": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-4.4.0.tgz", - "integrity": "sha512-oYp7156SP8LkeGD0GF85ad1X9Ai79WtRsZ2gxJqtBuzH+98YUV6jkHEKlZkMbcrjJjIVJNIDP/3WL9wQkoPbWA==" + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==" }, "htmlparser2": { "version": "8.0.2", @@ -18066,19 +18127,19 @@ } }, "domutils": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.0.1.tgz", - "integrity": "sha512-z08c1l761iKhDFtfXO04C7kTdPBLi41zwOZl00WS8b5eiaebNpY00HKbztwBq+e3vyqWNwWF3mP9YLUeqIrF+Q==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.1.0.tgz", + "integrity": "sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==", "requires": { "dom-serializer": "^2.0.0", "domelementtype": "^2.3.0", - "domhandler": "^5.0.1" + "domhandler": "^5.0.3" } }, "entities": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-4.4.0.tgz", - "integrity": "sha512-oYp7156SP8LkeGD0GF85ad1X9Ai79WtRsZ2gxJqtBuzH+98YUV6jkHEKlZkMbcrjJjIVJNIDP/3WL9wQkoPbWA==" + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==" } } }, @@ -18439,11 +18500,11 @@ } }, "cross-fetch": { - "version": "3.1.5", - "resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-3.1.5.tgz", - "integrity": "sha512-lvb1SBsI0Z7GDwmuid+mU3kWVBwTVUbe7S0H52yaaAdQOXq2YktTCZdlAcNKFzE6QtRz0snpw9bNiPeOIkkQvw==", + "version": "3.1.6", + "resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-3.1.6.tgz", + "integrity": "sha512-riRvo06crlE8HiqOwIpQhxwdOk4fOeR7FVM/wXoxchFEqMNUjvbs3bfo4OTgMEMHzppd4DxFBDbyySj8Cv781g==", "requires": { - "node-fetch": "2.6.7" + "node-fetch": "^2.6.11" } }, "cross-spawn": { @@ -20508,7 +20569,9 @@ } }, "is-core-module": { - "version": "2.9.0", + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.12.0.tgz", + "integrity": "sha512-RECHCBCd/viahWmwj6enj19sKbHfJrddi/6cBDsNTKbNq0f7VeaUkBo60BqzvPqo/W54ChS62Z5qyun7cfOMqQ==", "requires": { "has": "^1.0.3" } @@ -20799,9 +20862,9 @@ "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==" }, "lilconfig": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.0.6.tgz", - "integrity": "sha512-9JROoBW7pobfsx+Sq2JsASvCo6Pfo6WWoUW79HuB1BCoBXD4PLWJPqDF6fNj67pqBYTbAHkE57M1kS/+L1neOg==" + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz", + "integrity": "sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==" }, "lines-and-columns": { "version": "1.2.4", @@ -21240,9 +21303,9 @@ } }, "node-fetch": { - "version": "2.6.7", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.7.tgz", - "integrity": "sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ==", + "version": "2.6.11", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.11.tgz", + "integrity": "sha512-4I6pdBY1EthSqDmJkiNk3JIT8cswwR9nfeW/cPdUagJYEQG7R95WRH74wpz7ma8Gh/9dI9FP+OU+0E4FvtA55w==", "requires": { "whatwg-url": "^5.0.0" } @@ -21490,9 +21553,9 @@ }, "dependencies": { "entities": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-4.4.0.tgz", - "integrity": "sha512-oYp7156SP8LkeGD0GF85ad1X9Ai79WtRsZ2gxJqtBuzH+98YUV6jkHEKlZkMbcrjJjIVJNIDP/3WL9wQkoPbWA==" + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==" } } }, @@ -21648,9 +21711,9 @@ "integrity": "sha512-Wb4p1J4zyFTbM+u6WuO4XstYx4Ky9Cewe4DWrel7B0w6VVICvPwdOpotjzcf6eD8TsckVnIMNONQyPIUFOUbCQ==" }, "postcss": { - "version": "8.4.22", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.22.tgz", - "integrity": "sha512-XseknLAfRHzVWjCEtdviapiBtfLdgyzExD50Rg2ePaucEesyh8Wv4VPdW0nbyDa1ydbrAxV19jvMT4+LFmcNUA==", + "version": "8.4.23", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.23.tgz", + "integrity": "sha512-bQ3qMcpF6A/YjR55xtoTr0jGOlnPOKAIMdOWiv0EIT6HVPEaJiJB4NLljSbiHoC2RX7DN5Uvjtpbg1NPdwv1oA==", "requires": { "nanoid": "^3.3.6", "picocolors": "^1.0.0", @@ -21719,9 +21782,9 @@ } }, "postcss-import": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-14.1.0.tgz", - "integrity": "sha512-flwI+Vgm4SElObFVPpTIT7SU7R3qk2L7PyduMcokiaVKuWv9d/U+Gm/QAd8NDLuykTWTkcrjOeD2Pp1rMeBTGw==", + "version": "15.1.0", + "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz", + "integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==", "dev": true, "requires": { "postcss-value-parser": "^4.0.0", @@ -21730,9 +21793,9 @@ } }, "postcss-js": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.0.0.tgz", - "integrity": "sha512-77QESFBwgX4irogGVPgQ5s07vLvFqWr228qZY+w6lW599cRlK/HmnlivnnVUxkjHnCu4J16PDMHcH+e+2HbvTQ==", + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.0.1.tgz", + "integrity": "sha512-dDLF8pEO191hJMtlHFPRa8xsizHaM82MLfNkUHdUtVEV3tgTp5oj+8qbEqYM57SLfc74KSbw//4SeJma2LRVIw==", "requires": { "camelcase-css": "^2.0.1" } @@ -21854,12 +21917,12 @@ } }, "postcss-nested": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.0.0.tgz", - "integrity": "sha512-0DkamqrPcmkBDsLn+vQDIrtkSbNkv5AD/M322ySo9kqFkCIYklym2xEmWkwo+Y3/qZo34tzEPNUw4y7yMCdv5w==", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.0.1.tgz", + "integrity": "sha512-mEp4xPMi5bSWiMbsgoPfcP74lsWLHkQbZc3sY+jWYd65CUwXrUaTp0fmNpa01ZcETKlIgUdFN/MpS2xZtqL9dQ==", "dev": true, "requires": { - "postcss-selector-parser": "^6.0.10" + "postcss-selector-parser": "^6.0.11" } }, "postcss-normalize-charset": { @@ -21978,9 +22041,9 @@ } }, "postcss-sort-media-queries": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/postcss-sort-media-queries/-/postcss-sort-media-queries-4.3.0.tgz", - "integrity": "sha512-jAl8gJM2DvuIJiI9sL1CuiHtKM4s5aEIomkU8G3LFvbP+p8i7Sz8VV63uieTgoewGqKbi+hxBTiOKJlB35upCg==", + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/postcss-sort-media-queries/-/postcss-sort-media-queries-4.4.1.tgz", + "integrity": "sha512-QDESFzDDGKgpiIh4GYXsSy6sek2yAwQx1JASl5AxBtU1Lq2JfKBljIPNdil989NcSKRQX1ToiaKphImtBuhXWw==", "requires": { "sort-css-media-queries": "2.1.0" } @@ -22734,11 +22797,11 @@ "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==" }, "resolve": { - "version": "1.22.1", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.1.tgz", - "integrity": "sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==", + "version": "1.22.2", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.2.tgz", + "integrity": "sha512-Sb+mjNHOULsBv818T40qSPeRiuWLyaGMa5ewydRLFimneixmVy2zdivRl+AF6jaYPC8ERxGDmFSiqui6SfPd+g==", "requires": { - "is-core-module": "^2.9.0", + "is-core-module": "^2.11.0", "path-parse": "^1.0.7", "supports-preserve-symlinks-flag": "^1.0.0" } @@ -22865,9 +22928,9 @@ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" }, "sass": { - "version": "1.62.0", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.62.0.tgz", - "integrity": "sha512-Q4USplo4pLYgCi+XlipZCWUQz5pkg/ruSSgJ0WRDSb/+3z9tXUOkQ7QPYn4XrhZKYAK4HlpaQecRwKLJX6+DBg==", + "version": "1.62.1", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.62.1.tgz", + "integrity": "sha512-NHpxIzN29MXvWiuswfc1W3I0N8SXBd8UR26WntmDlRYf0bSADnwnOjsyMZ3lMezSlArD33Vs3YFhp7dWvL770A==", "requires": { "chokidar": ">=3.0.0 <4.0.0", "immutable": "^4.0.0", @@ -23387,11 +23450,12 @@ "integrity": "sha512-GP6WDNWf+o403jrEp9c5jibKavrtLW+/qYGhFxFrG8maXhwTBI7gLLhiBb0o7uFccWN+EOS9aMO6cGHWAO07OA==" }, "sucrase": { - "version": "3.31.0", - "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.31.0.tgz", - "integrity": "sha512-6QsHnkqyVEzYcaiHsOKkzOtOgdJcb8i54x6AV2hDwyZcY9ZyykGZVw6L/YN98xC0evwTP6utsWWrKRaa8QlfEQ==", + "version": "3.32.0", + "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.32.0.tgz", + "integrity": "sha512-ydQOU34rpSyj2TGyz4D2p8rbktIOZ8QY9s+DGLvFU1i5pWJE8vkpruCjGCMHsdXwnD7JDcS+noSwM/a7zyNFDQ==", "dev": true, "requires": { + "@jridgewell/gen-mapping": "^0.3.2", "commander": "^4.0.0", "glob": "7.1.6", "lines-and-columns": "^1.1.6", @@ -23400,6 +23464,17 @@ "ts-interface-checker": "^0.1.9" }, "dependencies": { + "@jridgewell/gen-mapping": { + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", + "integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==", + "dev": true, + "requires": { + "@jridgewell/set-array": "^1.0.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.9" + } + }, "commander": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", @@ -23460,43 +23535,36 @@ } }, "tailwindcss": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.3.1.tgz", - "integrity": "sha512-Vkiouc41d4CEq0ujXl6oiGFQ7bA3WEhUZdTgXAhtKxSy49OmKs8rEfQmupsfF0IGW8fv2iQkp1EVUuapCFrZ9g==", + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.3.2.tgz", + "integrity": "sha512-9jPkMiIBXvPc2KywkraqsUfbfj+dHDb+JPWtSJa9MLFdrPyazI7q6WX2sUrm7R9eVR7qqv3Pas7EvQFzxKnI6w==", "dev": true, "requires": { + "@alloc/quick-lru": "^5.2.0", "arg": "^5.0.2", "chokidar": "^3.5.3", - "color-name": "^1.1.4", "didyoumean": "^1.2.2", "dlv": "^1.1.3", "fast-glob": "^3.2.12", "glob-parent": "^6.0.2", "is-glob": "^4.0.3", - "jiti": "^1.17.2", - "lilconfig": "^2.0.6", + "jiti": "^1.18.2", + "lilconfig": "^2.1.0", "micromatch": "^4.0.5", "normalize-path": "^3.0.0", "object-hash": "^3.0.0", "picocolors": "^1.0.0", - "postcss": "^8.0.9", - "postcss-import": "^14.1.0", - "postcss-js": "^4.0.0", - "postcss-load-config": "^3.1.4", - "postcss-nested": "6.0.0", + "postcss": "^8.4.23", + "postcss-import": "^15.1.0", + "postcss-js": "^4.0.1", + "postcss-load-config": "^4.0.1", + "postcss-nested": "^6.0.1", "postcss-selector-parser": "^6.0.11", "postcss-value-parser": "^4.2.0", - "quick-lru": "^5.1.1", - "resolve": "^1.22.1", - "sucrase": "^3.29.0" + "resolve": "^1.22.2", + "sucrase": "^3.32.0" }, "dependencies": { - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, "glob-parent": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", @@ -23505,6 +23573,22 @@ "requires": { "is-glob": "^4.0.3" } + }, + "postcss-load-config": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-4.0.1.tgz", + "integrity": "sha512-vEJIc8RdiBRu3oRAI0ymerOn+7rPuMvRXslTvZUKZonDHFIczxztIyJ1urxM1x9JXEikvpWWTUUqal5j/8QgvA==", + "dev": true, + "requires": { + "lilconfig": "^2.0.5", + "yaml": "^2.1.1" + } + }, + "yaml": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.2.2.tgz", + "integrity": "sha512-CBKFWExMn46Foo4cldiChEzn7S7SRV+wqiluAb6xmueD/fGyRHIhX8m14vVGgeFWjN540nKCNVj6P21eQjgTuA==", + "dev": true } } }, @@ -23760,9 +23844,9 @@ "peer": true }, "ua-parser-js": { - "version": "0.7.34", - "resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-0.7.34.tgz", - "integrity": "sha512-cJMeh/eOILyGu0ejgTKB95yKT3zOenSe9UGE3vj6WfiOwgGYnmATUsnDixMFvdU+rNMvWih83hrUP8VwhF9yXQ==" + "version": "0.7.35", + "resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-0.7.35.tgz", + "integrity": "sha512-veRf7dawaj9xaWEu9HoTVn5Pggtc/qj+kqTOFvNiN1l0YdxwC1kvel57UCjThjGa3BHBihE8/UJAHI+uQHmd/g==" }, "unherit": { "version": "1.1.3", diff --git a/website/package.json b/website/package.json index 8aac3730a..5cbb3db04 100644 --- a/website/package.json +++ b/website/package.json @@ -14,9 +14,9 @@ "write-heading-ids": "docusaurus write-heading-ids" }, "dependencies": { - "@docusaurus/core": "2.4.0", + "@docusaurus/core": "2.4.1", "@docusaurus/plugin-google-gtag": "^2.4.0", - "@docusaurus/preset-classic": "2.4.0", + "@docusaurus/preset-classic": "2.4.1", "@loadable/component": "^5.15.3", "@mdx-js/react": "^1.6.22", "animate.css": "^4.1.1", @@ -30,17 +30,17 @@ "prism-react-renderer": "^1.3.5", "react": "^17.0.2", "react-dom": "^17.0.2", - "sass": "^1.62.0", + "sass": "^1.62.1", "tiny-slider": "^2.9.4", "tw-elements": "^1.0.0-alpha13", "wow.js": "^1.2.2" }, "devDependencies": { - "@docusaurus/module-type-aliases": "2.4.0", + "@docusaurus/module-type-aliases": "2.4.1", "@iconify/react": "^4.1.0", "autoprefixer": "^10.4.14", - "postcss": "^8.4.22", - "tailwindcss": "^3.3.1" + "postcss": "^8.4.23", + "tailwindcss": "^3.3.2" }, "browserslist": { "production": [ diff --git a/website/styles/Vocab/Base/accept.txt b/website/styles/Vocab/Base/accept.txt index 7f8d159c7..4ed3b41bf 100644 --- a/website/styles/Vocab/Base/accept.txt +++ b/website/styles/Vocab/Base/accept.txt @@ -54,4 +54,7 @@ Demetrius Malbrough lockdowns exfiltrate -deduplicating \ No newline at end of file +deduplicating +subtree +subtrees +anonymized \ No newline at end of file