merge main
This commit is contained in:
commit
c4e0566433
13
.github/workflows/_filechange_checker.yml
vendored
13
.github/workflows/_filechange_checker.yml
vendored
@ -9,6 +9,9 @@ on:
|
|||||||
websitefileschanged:
|
websitefileschanged:
|
||||||
description: "'true' if websites/** or .github/workflows/** files have changed in the branch"
|
description: "'true' if websites/** or .github/workflows/** files have changed in the branch"
|
||||||
value: ${{ jobs.file-change-check.outputs.websitefileschanged }}
|
value: ${{ jobs.file-change-check.outputs.websitefileschanged }}
|
||||||
|
actionsfileschanged:
|
||||||
|
description: "'true' if .github/actions/** or .github/workflows/** files have changed in the branch"
|
||||||
|
value: ${{ jobs.file-change-check.outputs.actionsfileschanged }}
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
file-change-check:
|
file-change-check:
|
||||||
@ -19,6 +22,7 @@ jobs:
|
|||||||
outputs:
|
outputs:
|
||||||
srcfileschanged: ${{ steps.srcchecker.outputs.srcfileschanged }}
|
srcfileschanged: ${{ steps.srcchecker.outputs.srcfileschanged }}
|
||||||
websitefileschanged: ${{ steps.websitechecker.outputs.websitefileschanged }}
|
websitefileschanged: ${{ steps.websitechecker.outputs.websitefileschanged }}
|
||||||
|
actionsfileschanged: ${{ steps.actionschecker.outputs.actionsfileschanged }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
@ -49,4 +53,11 @@ jobs:
|
|||||||
if: steps.dornycheck.outputs.src == 'true' || steps.dornycheck.outputs.website == 'true' || steps.dornycheck.outputs.actions == 'true'
|
if: steps.dornycheck.outputs.src == 'true' || steps.dornycheck.outputs.website == 'true' || steps.dornycheck.outputs.actions == 'true'
|
||||||
run: |
|
run: |
|
||||||
echo "website or workflow file changes occurred"
|
echo "website or workflow file changes occurred"
|
||||||
echo websitefileschanged=true >> $GITHUB_OUTPUT
|
echo websitefileschanged=true >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Check dorny for changes in actions filepaths
|
||||||
|
id: actionschecker
|
||||||
|
if: steps.dornycheck.outputs.actions == 'true'
|
||||||
|
run: |
|
||||||
|
echo "actions file changes occurred"
|
||||||
|
echo actionsfileschanged=true >> $GITHUB_OUTPUT
|
||||||
|
|||||||
32
.github/workflows/ci.yml
vendored
32
.github/workflows/ci.yml
vendored
@ -364,7 +364,7 @@ jobs:
|
|||||||
# --- Source Code Linting ----------------------------------------------------------------------------
|
# --- Source Code Linting ----------------------------------------------------------------------------
|
||||||
# ----------------------------------------------------------------------------------------------------
|
# ----------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
Linting:
|
Source-Code-Linting:
|
||||||
needs: [Precheck, Checkout]
|
needs: [Precheck, Checkout]
|
||||||
environment: Testing
|
environment: Testing
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@ -404,12 +404,36 @@ jobs:
|
|||||||
working-directory: src
|
working-directory: src
|
||||||
|
|
||||||
|
|
||||||
|
# ----------------------------------------------------------------------------------------------------
|
||||||
|
# --- GitHub Actions Linting -------------------------------------------------------------------------
|
||||||
|
# ----------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
Actions-Lint:
|
||||||
|
needs: [Precheck]
|
||||||
|
environment: Testing
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: needs.precheck.outputs.actionsfileschanged == 'true'
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: actionlint
|
||||||
|
uses: raven-actions/actionlint@v1
|
||||||
|
with:
|
||||||
|
fail-on-error: true
|
||||||
|
cache: true
|
||||||
|
# Ignore
|
||||||
|
# * combining commands into a subshell and using single output
|
||||||
|
# redirect
|
||||||
|
# * various variable quoting patterns
|
||||||
|
# * possible ineffective echo commands
|
||||||
|
flags: "-ignore SC2129 -ignore SC2086 -ignore SC2046 -ignore 2116"
|
||||||
|
|
||||||
# ----------------------------------------------------------------------------------------------------
|
# ----------------------------------------------------------------------------------------------------
|
||||||
# --- Publish steps ----------------------------------------------------------------------------------
|
# --- Publish steps ----------------------------------------------------------------------------------
|
||||||
# ----------------------------------------------------------------------------------------------------
|
# ----------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
Publish-Binary:
|
Publish-Binary:
|
||||||
needs: [Test-Suite-Trusted, Linting, Website-Linting, SetEnv]
|
needs: [Test-Suite-Trusted, Source-Code-Linting, Website-Linting, SetEnv]
|
||||||
environment: ${{ needs.SetEnv.outputs.environment }}
|
environment: ${{ needs.SetEnv.outputs.environment }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: startsWith(github.ref, 'refs/tags/') || github.ref == 'refs/heads/main'
|
if: startsWith(github.ref, 'refs/tags/') || github.ref == 'refs/heads/main'
|
||||||
@ -426,7 +450,7 @@ jobs:
|
|||||||
rudderstack_data_plane_url: ${{ secrets.RUDDERSTACK_CORSO_DATA_PLANE_URL }}
|
rudderstack_data_plane_url: ${{ secrets.RUDDERSTACK_CORSO_DATA_PLANE_URL }}
|
||||||
|
|
||||||
Publish-Image:
|
Publish-Image:
|
||||||
needs: [Test-Suite-Trusted, Linting, Website-Linting, SetEnv]
|
needs: [Test-Suite-Trusted, Source-Code-Linting, Website-Linting, SetEnv]
|
||||||
environment: ${{ needs.SetEnv.outputs.environment }}
|
environment: ${{ needs.SetEnv.outputs.environment }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: startsWith(github.ref, 'refs/tags/')
|
if: startsWith(github.ref, 'refs/tags/')
|
||||||
@ -568,7 +592,7 @@ jobs:
|
|||||||
./corso.exe --version 2>&1 | grep -E "version: ${{ env.CORSO_VERSION }}$"
|
./corso.exe --version 2>&1 | grep -E "version: ${{ env.CORSO_VERSION }}$"
|
||||||
|
|
||||||
Publish-Website-Test:
|
Publish-Website-Test:
|
||||||
needs: [Test-Suite-Trusted, Linting, Website-Linting, SetEnv]
|
needs: [Test-Suite-Trusted, Source-Code-Linting, Website-Linting, SetEnv]
|
||||||
environment: ${{ needs.SetEnv.outputs.environment }}
|
environment: ${{ needs.SetEnv.outputs.environment }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: github.ref == 'refs/heads/main'
|
if: github.ref == 'refs/heads/main'
|
||||||
|
|||||||
1
.github/workflows/ci_test_cleanup.yml
vendored
1
.github/workflows/ci_test_cleanup.yml
vendored
@ -1,5 +1,6 @@
|
|||||||
name: CI Test Cleanup
|
name: CI Test Cleanup
|
||||||
on:
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
schedule:
|
schedule:
|
||||||
# every half hour
|
# every half hour
|
||||||
- cron: "*/30 * * * *"
|
- cron: "*/30 * * * *"
|
||||||
|
|||||||
10
.github/workflows/load_test.yml
vendored
10
.github/workflows/load_test.yml
vendored
@ -1,10 +1,8 @@
|
|||||||
name: Nightly Load Testing
|
name: Nightly Load Testing
|
||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
# every day at 01:59 (01:59am) UTC
|
# every day at 03:59 GMT (roughly 8pm PST)
|
||||||
# - cron: "59 1 * * *"
|
- cron: "59 3 * * *"
|
||||||
# temp, for testing: every 4 hours
|
|
||||||
- cron: "0 */4 * * *"
|
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
# required to retrieve AWS credentials
|
# required to retrieve AWS credentials
|
||||||
@ -20,6 +18,10 @@ jobs:
|
|||||||
Load-Tests:
|
Load-Tests:
|
||||||
environment: Load Testing
|
environment: Load Testing
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
# Skipping load testing for now. They need some love to get up and
|
||||||
|
# running properly, and it's better to not fight for resources with
|
||||||
|
# tests that are guaranteed to fail.
|
||||||
|
if: false
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
working-directory: src
|
working-directory: src
|
||||||
|
|||||||
6
.github/workflows/nightly_test.yml
vendored
6
.github/workflows/nightly_test.yml
vendored
@ -3,8 +3,8 @@ on:
|
|||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
schedule:
|
schedule:
|
||||||
# Run every day at 0 minutes and 0 hours (midnight GMT)
|
# Run every day at 04:00 GMT (roughly 8pm PST)
|
||||||
- cron: "0 0 * * *"
|
- cron: "0 4 * * *"
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
# required to retrieve AWS credentials
|
# required to retrieve AWS credentials
|
||||||
@ -50,7 +50,6 @@ jobs:
|
|||||||
environment: ${{ steps.environment.outputs.environment }}
|
environment: ${{ steps.environment.outputs.environment }}
|
||||||
version: ${{ steps.version.outputs.version }}
|
version: ${{ steps.version.outputs.version }}
|
||||||
website-bucket: ${{ steps.website-bucket.outputs.website-bucket }}
|
website-bucket: ${{ steps.website-bucket.outputs.website-bucket }}
|
||||||
website-cfid: ${{ steps.website-cfid.outputs.website-cfid }}
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
@ -122,6 +121,7 @@ jobs:
|
|||||||
AZURE_CLIENT_SECRET: ${{ secrets[env.AZURE_CLIENT_SECRET_NAME] }}
|
AZURE_CLIENT_SECRET: ${{ secrets[env.AZURE_CLIENT_SECRET_NAME] }}
|
||||||
AZURE_TENANT_ID: ${{ secrets.TENANT_ID }}
|
AZURE_TENANT_ID: ${{ secrets.TENANT_ID }}
|
||||||
CORSO_NIGHTLY_TESTS: true
|
CORSO_NIGHTLY_TESTS: true
|
||||||
|
CORSO_E2E_TESTS: true
|
||||||
CORSO_M365_TEST_USER_ID: ${{ vars.CORSO_M365_TEST_USER_ID }}
|
CORSO_M365_TEST_USER_ID: ${{ vars.CORSO_M365_TEST_USER_ID }}
|
||||||
CORSO_SECONDARY_M365_TEST_USER_ID: ${{ vars.CORSO_SECONDARY_M365_TEST_USER_ID }}
|
CORSO_SECONDARY_M365_TEST_USER_ID: ${{ vars.CORSO_SECONDARY_M365_TEST_USER_ID }}
|
||||||
CORSO_PASSPHRASE: ${{ secrets.INTEGRATION_TEST_CORSO_PASSPHRASE }}
|
CORSO_PASSPHRASE: ${{ secrets.INTEGRATION_TEST_CORSO_PASSPHRASE }}
|
||||||
|
|||||||
55
.github/workflows/sanity-test.yaml
vendored
55
.github/workflows/sanity-test.yaml
vendored
@ -36,6 +36,7 @@ jobs:
|
|||||||
CORSO_LOG_DIR: testlog
|
CORSO_LOG_DIR: testlog
|
||||||
CORSO_LOG_FILE: testlog/testlogging.log
|
CORSO_LOG_FILE: testlog/testlogging.log
|
||||||
TEST_USER: ${{ github.event.inputs.user != '' && github.event.inputs.user || secrets.CORSO_M365_TEST_USER_ID }}
|
TEST_USER: ${{ github.event.inputs.user != '' && github.event.inputs.user || secrets.CORSO_M365_TEST_USER_ID }}
|
||||||
|
TEST_SITE: ${{ secrets.CORSO_M365_TEST_SITE_URL }}
|
||||||
SECONDARY_TEST_USER : ${{ secrets.CORSO_SECONDARY_M365_TEST_USER_ID }}
|
SECONDARY_TEST_USER : ${{ secrets.CORSO_SECONDARY_M365_TEST_USER_ID }}
|
||||||
CORSO_PASSPHRASE: ${{ secrets.INTEGRATION_TEST_CORSO_PASSPHRASE }}
|
CORSO_PASSPHRASE: ${{ secrets.INTEGRATION_TEST_CORSO_PASSPHRASE }}
|
||||||
TEST_RESULT: test_results
|
TEST_RESULT: test_results
|
||||||
@ -65,7 +66,7 @@ jobs:
|
|||||||
- name: Version Test
|
- name: Version Test
|
||||||
run: |
|
run: |
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
if [ $( ./corso --version | grep 'Corso version:' | wc -l) -ne 1 ]
|
if [ $( ./corso --version | grep -c 'Corso version:' ) -ne 1 ]
|
||||||
then
|
then
|
||||||
echo "valid version not found"
|
echo "valid version not found"
|
||||||
exit 1
|
exit 1
|
||||||
@ -77,7 +78,7 @@ jobs:
|
|||||||
TEST_RESULT: "test_results"
|
TEST_RESULT: "test_results"
|
||||||
run: |
|
run: |
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
prefix=`date +"%Y-%m-%d-%T"`
|
prefix=$(date +"%Y-%m-%d-%T")
|
||||||
echo -e "\nRepo init test\n" >> ${CORSO_LOG_FILE}
|
echo -e "\nRepo init test\n" >> ${CORSO_LOG_FILE}
|
||||||
./corso repo init s3 \
|
./corso repo init s3 \
|
||||||
--no-stats \
|
--no-stats \
|
||||||
@ -123,9 +124,9 @@ jobs:
|
|||||||
AZURE_TENANT_ID: ${{ secrets.TENANT_ID }}
|
AZURE_TENANT_ID: ${{ secrets.TENANT_ID }}
|
||||||
run: |
|
run: |
|
||||||
go run . exchange emails \
|
go run . exchange emails \
|
||||||
--user ${{ env.TEST_USER }} \
|
--user ${TEST_USER} \
|
||||||
--tenant ${{ env.AZURE_TENANT_ID }} \
|
--tenant ${{ env.AZURE_TENANT_ID }} \
|
||||||
--destination Corso_Restore_st_${{ steps.repo-init.outputs.result }} \
|
--destination Corso_Test_sanity${{ steps.repo-init.outputs.result }} \
|
||||||
--count 4
|
--count 4
|
||||||
|
|
||||||
- name: Backup exchange test
|
- name: Backup exchange test
|
||||||
@ -191,7 +192,7 @@ jobs:
|
|||||||
echo -e "\nBackup Exchange restore test\n" >> ${CORSO_LOG_FILE}
|
echo -e "\nBackup Exchange restore test\n" >> ${CORSO_LOG_FILE}
|
||||||
./corso restore exchange \
|
./corso restore exchange \
|
||||||
--no-stats \
|
--no-stats \
|
||||||
--email-folder Corso_Restore_st_${{ steps.repo-init.outputs.result }} \
|
--email-folder Corso_Test_sanity${{ steps.repo-init.outputs.result }} \
|
||||||
--hide-progress \
|
--hide-progress \
|
||||||
--backup "${{ steps.exchange-test.outputs.result }}" \
|
--backup "${{ steps.exchange-test.outputs.result }}" \
|
||||||
2>&1 | tee $TEST_RESULT/exchange-restore-test.txt
|
2>&1 | tee $TEST_RESULT/exchange-restore-test.txt
|
||||||
@ -201,7 +202,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
SANITY_RESTORE_FOLDER: ${{ steps.exchange-restore-test.outputs.result }}
|
SANITY_RESTORE_FOLDER: ${{ steps.exchange-restore-test.outputs.result }}
|
||||||
SANITY_RESTORE_SERVICE: "exchange"
|
SANITY_RESTORE_SERVICE: "exchange"
|
||||||
TEST_DATA: Corso_Restore_st_${{ steps.repo-init.outputs.result }}
|
TEST_DATA: Corso_Test_sanity${{ steps.repo-init.outputs.result }}
|
||||||
run: |
|
run: |
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
./sanityTest
|
./sanityTest
|
||||||
@ -238,7 +239,7 @@ jobs:
|
|||||||
--no-stats \
|
--no-stats \
|
||||||
--hide-progress \
|
--hide-progress \
|
||||||
--backup "${{ steps.exchange-incremental-test.outputs.result }}" \
|
--backup "${{ steps.exchange-incremental-test.outputs.result }}" \
|
||||||
--email-folder Corso_Restore_st_${{ steps.repo-init.outputs.result }} \
|
--email-folder Corso_Test_sanity${{ steps.repo-init.outputs.result }} \
|
||||||
2>&1 | tee $TEST_RESULT/exchange-incremantal-restore-test.txt
|
2>&1 | tee $TEST_RESULT/exchange-incremantal-restore-test.txt
|
||||||
echo result=$(grep -i -e 'Restoring to folder ' $TEST_RESULT/exchange-incremantal-restore-test.txt | sed "s/Restoring to folder//" ) >> $GITHUB_OUTPUT
|
echo result=$(grep -i -e 'Restoring to folder ' $TEST_RESULT/exchange-incremantal-restore-test.txt | sed "s/Restoring to folder//" ) >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
@ -246,7 +247,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
SANITY_RESTORE_FOLDER: ${{ steps.exchange-incremantal-restore-test.outputs.result }}
|
SANITY_RESTORE_FOLDER: ${{ steps.exchange-incremantal-restore-test.outputs.result }}
|
||||||
SANITY_RESTORE_SERVICE: "exchange"
|
SANITY_RESTORE_SERVICE: "exchange"
|
||||||
TEST_DATA: Corso_Restore_st_${{ steps.repo-init.outputs.result }}
|
TEST_DATA: Corso_Test_sanity${{ steps.repo-init.outputs.result }}
|
||||||
BASE_BACKUP: ${{ steps.exchange-restore-test.outputs.result }}
|
BASE_BACKUP: ${{ steps.exchange-restore-test.outputs.result }}
|
||||||
run: |
|
run: |
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
@ -265,13 +266,13 @@ jobs:
|
|||||||
AZURE_CLIENT_SECRET: ${{ secrets[needs.SetM365App.outputs.client_secret_env] }}
|
AZURE_CLIENT_SECRET: ${{ secrets[needs.SetM365App.outputs.client_secret_env] }}
|
||||||
AZURE_TENANT_ID: ${{ secrets.TENANT_ID }}
|
AZURE_TENANT_ID: ${{ secrets.TENANT_ID }}
|
||||||
run: |
|
run: |
|
||||||
suffix=`date +"%Y-%m-%d_%H-%M"`
|
suffix=$(date +"%Y-%m-%d_%H-%M")
|
||||||
|
|
||||||
go run . onedrive files \
|
go run . onedrive files \
|
||||||
--user ${{ env.TEST_USER }} \
|
--user ${TEST_USER} \
|
||||||
--secondaryuser ${{ env.SECONDARY_TEST_USER }} \
|
--secondaryuser ${{ env.SECONDARY_TEST_USER }} \
|
||||||
--tenant ${{ env.AZURE_TENANT_ID }} \
|
--tenant ${{ env.AZURE_TENANT_ID }} \
|
||||||
--destination Corso_Restore_st_$suffix \
|
--destination Corso_Test_sanity$suffix \
|
||||||
--count 4
|
--count 4
|
||||||
|
|
||||||
echo result="$suffix" >> $GITHUB_OUTPUT
|
echo result="$suffix" >> $GITHUB_OUTPUT
|
||||||
@ -340,7 +341,7 @@ jobs:
|
|||||||
./corso restore onedrive \
|
./corso restore onedrive \
|
||||||
--no-stats \
|
--no-stats \
|
||||||
--restore-permissions \
|
--restore-permissions \
|
||||||
--folder Corso_Restore_st_${{ steps.new-data-creation-onedrive.outputs.result }} \
|
--folder Corso_Test_sanity${{ steps.new-data-creation-onedrive.outputs.result }} \
|
||||||
--hide-progress \
|
--hide-progress \
|
||||||
--backup "${{ steps.onedrive-test.outputs.result }}" \
|
--backup "${{ steps.onedrive-test.outputs.result }}" \
|
||||||
2>&1 | tee $TEST_RESULT/onedrive-restore-test.txt
|
2>&1 | tee $TEST_RESULT/onedrive-restore-test.txt
|
||||||
@ -350,7 +351,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
SANITY_RESTORE_FOLDER: ${{ steps.onedrive-restore-test.outputs.result }}
|
SANITY_RESTORE_FOLDER: ${{ steps.onedrive-restore-test.outputs.result }}
|
||||||
SANITY_RESTORE_SERVICE: "onedrive"
|
SANITY_RESTORE_SERVICE: "onedrive"
|
||||||
TEST_DATA: Corso_Restore_st_${{ steps.new-data-creation-onedrive.outputs.result }}
|
TEST_DATA: Corso_Test_sanity${{ steps.new-data-creation-onedrive.outputs.result }}
|
||||||
run: |
|
run: |
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
./sanityTest
|
./sanityTest
|
||||||
@ -364,10 +365,10 @@ jobs:
|
|||||||
AZURE_TENANT_ID: ${{ secrets.TENANT_ID }}
|
AZURE_TENANT_ID: ${{ secrets.TENANT_ID }}
|
||||||
run: |
|
run: |
|
||||||
go run . onedrive files \
|
go run . onedrive files \
|
||||||
--user ${{ env.TEST_USER }} \
|
--user ${TEST_USER} \
|
||||||
--secondaryuser ${{ env.SECONDARY_TEST_USER }} \
|
--secondaryuser ${{ env.SECONDARY_TEST_USER }} \
|
||||||
--tenant ${{ env.AZURE_TENANT_ID }} \
|
--tenant ${{ env.AZURE_TENANT_ID }} \
|
||||||
--destination Corso_Restore_st_${{ steps.new-data-creation-onedrive.outputs.result }} \
|
--destination Corso_Test_sanity${{ steps.new-data-creation-onedrive.outputs.result }} \
|
||||||
--count 4
|
--count 4
|
||||||
|
|
||||||
# incremental backup
|
# incremental backup
|
||||||
@ -403,7 +404,7 @@ jobs:
|
|||||||
--no-stats \
|
--no-stats \
|
||||||
--restore-permissions \
|
--restore-permissions \
|
||||||
--hide-progress \
|
--hide-progress \
|
||||||
--folder Corso_Restore_st_${{ steps.new-data-creation-onedrive.outputs.result }} \
|
--folder Corso_Test_sanity${{ steps.new-data-creation-onedrive.outputs.result }} \
|
||||||
--backup "${{ steps.onedrive-incremental-test.outputs.result }}" \
|
--backup "${{ steps.onedrive-incremental-test.outputs.result }}" \
|
||||||
2>&1 | tee $TEST_RESULT/onedrive-incremental-restore-test.txt
|
2>&1 | tee $TEST_RESULT/onedrive-incremental-restore-test.txt
|
||||||
echo result=$(grep -i -e 'Restoring to folder ' $TEST_RESULT/onedrive-incremental-restore-test.txt | sed "s/Restoring to folder//") >> $GITHUB_OUTPUT
|
echo result=$(grep -i -e 'Restoring to folder ' $TEST_RESULT/onedrive-incremental-restore-test.txt | sed "s/Restoring to folder//") >> $GITHUB_OUTPUT
|
||||||
@ -412,7 +413,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
SANITY_RESTORE_FOLDER: ${{ steps.onedrive-incremental-restore-test.outputs.result }}
|
SANITY_RESTORE_FOLDER: ${{ steps.onedrive-incremental-restore-test.outputs.result }}
|
||||||
SANITY_RESTORE_SERVICE: "onedrive"
|
SANITY_RESTORE_SERVICE: "onedrive"
|
||||||
TEST_DATA: Corso_Restore_st_${{ steps.new-data-creation-onedrive.outputs.result }}
|
TEST_DATA: Corso_Test_sanity${{ steps.new-data-creation-onedrive.outputs.result }}
|
||||||
run: |
|
run: |
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
./sanityTest
|
./sanityTest
|
||||||
@ -427,18 +428,18 @@ jobs:
|
|||||||
id: sharepoint-test
|
id: sharepoint-test
|
||||||
run: |
|
run: |
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
echo -e "\nBackup SharePoint test\n" >> ${CORSO_LOG_FILE}
|
echo -e "\nBackup SharePoint test\n" >> ${CORSO_LOG_FILE}
|
||||||
|
|
||||||
./corso backup create sharepoint \
|
./corso backup create sharepoint \
|
||||||
--no-stats \
|
--no-stats \
|
||||||
--hide-progress \
|
--hide-progress \
|
||||||
--site "${CORSO_M365_TEST_SITE_URL}" \
|
--site "${TEST_SITE}" \
|
||||||
--json \
|
--json \
|
||||||
2>&1 | tee $TEST_RESULT/backup_sharepoint.txt
|
2>&1 | tee $TEST_RESULT/backup_sharepoint.txt
|
||||||
|
|
||||||
resultjson=$(sed -e '1,/Completed Backups/d' $TEST_RESULT/backup_sharepoint.txt )
|
resultjson=$(sed -e '1,/Completed Backups/d' $TEST_RESULT/backup_sharepoint.txt )
|
||||||
|
|
||||||
if [[ $( echo $resultjson | jq -r '.[0] | .errorCount') -ne 0 ]]; then
|
if [[ $( echo $resultjson | jq -r '.[0] | .stats.errorCount') -ne 0 ]]; then
|
||||||
echo "backup was not successful"
|
echo "backup was not successful"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
@ -450,7 +451,7 @@ jobs:
|
|||||||
- name: Backup sharepoint list test
|
- name: Backup sharepoint list test
|
||||||
run: |
|
run: |
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
echo -e "\nBackup List SharePoint test\n" >> ${CORSO_LOG_FILE}
|
echo -e "\nBackup List SharePoint test\n" >> ${CORSO_LOG_FILE}
|
||||||
|
|
||||||
./corso backup list sharepoint \
|
./corso backup list sharepoint \
|
||||||
--no-stats \
|
--no-stats \
|
||||||
@ -467,7 +468,7 @@ jobs:
|
|||||||
- name: Backup sharepoint list single backup test
|
- name: Backup sharepoint list single backup test
|
||||||
run: |
|
run: |
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
echo -e "\nBackup List single backup SharePoint test\n" >> ${CORSO_LOG_FILE}
|
echo -e "\nBackup List single backup SharePoint test\n" >> ${CORSO_LOG_FILE}
|
||||||
|
|
||||||
./corso backup list sharepoint \
|
./corso backup list sharepoint \
|
||||||
--no-stats \
|
--no-stats \
|
||||||
@ -486,7 +487,7 @@ jobs:
|
|||||||
id: sharepoint-restore-test
|
id: sharepoint-restore-test
|
||||||
run: |
|
run: |
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
echo -e "\nRestore SharePoint test\n" >> ${CORSO_LOG_FILE}
|
echo -e "\nRestore SharePoint test\n" >> ${CORSO_LOG_FILE}
|
||||||
|
|
||||||
./corso restore sharepoint \
|
./corso restore sharepoint \
|
||||||
--no-stats \
|
--no-stats \
|
||||||
@ -513,18 +514,18 @@ jobs:
|
|||||||
id: sharepoint-incremental-test
|
id: sharepoint-incremental-test
|
||||||
run: |
|
run: |
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
echo -e "\nIncremental Backup SharePoint test\n" >> ${CORSO_LOG_FILE}
|
echo -e "\nIncremental Backup SharePoint test\n" >> ${CORSO_LOG_FILE}
|
||||||
|
|
||||||
./corso backup create sharepoint \
|
./corso backup create sharepoint \
|
||||||
--no-stats \
|
--no-stats \
|
||||||
--hide-progress \
|
--hide-progress \
|
||||||
--site "${CORSO_M365_TEST_SITE_URL}" \
|
--site "${TEST_SITE}" \
|
||||||
--json \
|
--json \
|
||||||
2>&1 | tee $TEST_RESULT/backup_sharepoint_incremental.txt
|
2>&1 | tee $TEST_RESULT/backup_sharepoint_incremental.txt
|
||||||
|
|
||||||
resultjson=$(sed -e '1,/Completed Backups/d' $TEST_RESULT/backup_sharepoint_incremental.txt )
|
resultjson=$(sed -e '1,/Completed Backups/d' $TEST_RESULT/backup_sharepoint_incremental.txt )
|
||||||
|
|
||||||
if [[ $( echo $resultjson | jq -r '.[0] | .errorCount') -ne 0 ]]; then
|
if [[ $( echo $resultjson | jq -r '.[0] | .stats.errorCount') -ne 0 ]]; then
|
||||||
echo "backup was not successful"
|
echo "backup was not successful"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
@ -537,7 +538,7 @@ jobs:
|
|||||||
id: sharepoint-incremental-restore-test
|
id: sharepoint-incremental-restore-test
|
||||||
run: |
|
run: |
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
echo -e "\nIncremental Restore SharePoint test\n" >> ${CORSO_LOG_FILE}
|
echo -e "\nIncremental Restore SharePoint test\n" >> ${CORSO_LOG_FILE}
|
||||||
|
|
||||||
./corso restore sharepoint \
|
./corso restore sharepoint \
|
||||||
--no-stats \
|
--no-stats \
|
||||||
|
|||||||
3
.github/workflows/website-publish.yml
vendored
3
.github/workflows/website-publish.yml
vendored
@ -28,8 +28,7 @@ jobs:
|
|||||||
- name: Get version string
|
- name: Get version string
|
||||||
id: version
|
id: version
|
||||||
run: |
|
run: |
|
||||||
echo "set-output name=version::$(git describe --tags --abbrev=0)"
|
echo version=$(git describe --tags --abbrev=0) | tee -a $GITHUB_OUTPUT
|
||||||
echo "::set-output name=version::$(git describe --tags --abbrev=0)"
|
|
||||||
|
|
||||||
# ----------------------------------------------------------------------------------------------------
|
# ----------------------------------------------------------------------------------------------------
|
||||||
# --- Website Linting -----------------------------------------------------------------------------------
|
# --- Website Linting -----------------------------------------------------------------------------------
|
||||||
|
|||||||
@ -8,6 +8,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
## [Unreleased] (beta)
|
## [Unreleased] (beta)
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
- Released the --mask-sensitive-data flag, which will automatically obscure private data in logs.
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
- Graph requests now automatically retry in case of a Bad Gateway or Gateway Timeout.
|
- Graph requests now automatically retry in case of a Bad Gateway or Gateway Timeout.
|
||||||
@ -17,6 +18,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
- Improve restore time on large restores by optimizing how items are loaded from the remote repository.
|
- Improve restore time on large restores by optimizing how items are loaded from the remote repository.
|
||||||
- Remove exchange item filtering based on m365 item ID via the CLI.
|
- Remove exchange item filtering based on m365 item ID via the CLI.
|
||||||
- OneDrive backups no longer include a user's non-default drives.
|
- OneDrive backups no longer include a user's non-default drives.
|
||||||
|
- OneDrive and SharePoint file downloads will properly redirect from 3xx responses.
|
||||||
|
- Refined oneDrive rate limiter controls to reduce throttling errors.
|
||||||
|
- Fix handling of duplicate folders at the same hierarchy level in Exchange. Duplicate folders will be merged during restore operations.
|
||||||
|
|
||||||
|
### Known Issues
|
||||||
|
- Restore operations will merge duplicate Exchange folders at the same hierarchy level into a single folder.
|
||||||
|
|
||||||
## [v0.7.0] (beta) - 2023-05-02
|
## [v0.7.0] (beta) - 2023-05-02
|
||||||
|
|
||||||
|
|||||||
@ -207,7 +207,7 @@ func runBackups(
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
owner = discSel.DiscreteOwner
|
owner = discSel.DiscreteOwner
|
||||||
ictx = clues.Add(ctx, "resource_owner", owner)
|
ictx = clues.Add(ctx, "resource_owner_selected", owner)
|
||||||
)
|
)
|
||||||
|
|
||||||
bo, err := r.NewBackupWithLookup(ictx, discSel, ins)
|
bo, err := r.NewBackupWithLookup(ictx, discSel, ins)
|
||||||
@ -218,6 +218,11 @@ func runBackups(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ictx = clues.Add(
|
||||||
|
ctx,
|
||||||
|
"resource_owner_id", bo.ResourceOwner.ID(),
|
||||||
|
"resource_owner_name", bo.ResourceOwner.Name())
|
||||||
|
|
||||||
err = bo.Run(ictx)
|
err = bo.Run(ictx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = append(errs, clues.Wrap(err, owner).WithClues(ictx))
|
errs = append(errs, clues.Wrap(err, owner).WithClues(ictx))
|
||||||
|
|||||||
@ -54,7 +54,6 @@ func TestNoBackupExchangeE2ESuite(t *testing.T) {
|
|||||||
suite.Run(t, &NoBackupExchangeE2ESuite{Suite: tester.NewE2ESuite(
|
suite.Run(t, &NoBackupExchangeE2ESuite{Suite: tester.NewE2ESuite(
|
||||||
t,
|
t,
|
||||||
[][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs},
|
[][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs},
|
||||||
tester.CorsoCITests,
|
|
||||||
)})
|
)})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -120,7 +119,6 @@ func TestBackupExchangeE2ESuite(t *testing.T) {
|
|||||||
suite.Run(t, &BackupExchangeE2ESuite{Suite: tester.NewE2ESuite(
|
suite.Run(t, &BackupExchangeE2ESuite{Suite: tester.NewE2ESuite(
|
||||||
t,
|
t,
|
||||||
[][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs},
|
[][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs},
|
||||||
tester.CorsoCITests,
|
|
||||||
)})
|
)})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -235,7 +233,6 @@ func TestPreparedBackupExchangeE2ESuite(t *testing.T) {
|
|||||||
suite.Run(t, &PreparedBackupExchangeE2ESuite{Suite: tester.NewE2ESuite(
|
suite.Run(t, &PreparedBackupExchangeE2ESuite{Suite: tester.NewE2ESuite(
|
||||||
t,
|
t,
|
||||||
[][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs},
|
[][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs},
|
||||||
tester.CorsoCITests,
|
|
||||||
)})
|
)})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -490,7 +487,6 @@ func TestBackupDeleteExchangeE2ESuite(t *testing.T) {
|
|||||||
Suite: tester.NewE2ESuite(
|
Suite: tester.NewE2ESuite(
|
||||||
t,
|
t,
|
||||||
[][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs},
|
[][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs},
|
||||||
tester.CorsoCITests,
|
|
||||||
),
|
),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@ -22,6 +22,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
"github.com/alcionai/corso/src/pkg/repository"
|
"github.com/alcionai/corso/src/pkg/repository"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
|
selTD "github.com/alcionai/corso/src/pkg/selectors/testdata"
|
||||||
"github.com/alcionai/corso/src/pkg/storage"
|
"github.com/alcionai/corso/src/pkg/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -44,9 +45,7 @@ func TestNoBackupOneDriveE2ESuite(t *testing.T) {
|
|||||||
suite.Run(t, &NoBackupOneDriveE2ESuite{
|
suite.Run(t, &NoBackupOneDriveE2ESuite{
|
||||||
Suite: tester.NewE2ESuite(
|
Suite: tester.NewE2ESuite(
|
||||||
t,
|
t,
|
||||||
[][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs},
|
[][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs}),
|
||||||
tester.CorsoCITests,
|
|
||||||
),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -148,9 +147,7 @@ func TestBackupDeleteOneDriveE2ESuite(t *testing.T) {
|
|||||||
suite.Run(t, &BackupDeleteOneDriveE2ESuite{
|
suite.Run(t, &BackupDeleteOneDriveE2ESuite{
|
||||||
Suite: tester.NewE2ESuite(
|
Suite: tester.NewE2ESuite(
|
||||||
t,
|
t,
|
||||||
[][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs},
|
[][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs}),
|
||||||
tester.CorsoCITests,
|
|
||||||
),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -176,7 +173,7 @@ func (suite *BackupDeleteOneDriveE2ESuite) SetupSuite() {
|
|||||||
|
|
||||||
// some tests require an existing backup
|
// some tests require an existing backup
|
||||||
sel := selectors.NewOneDriveBackup(users)
|
sel := selectors.NewOneDriveBackup(users)
|
||||||
sel.Include(sel.Folders(selectors.Any()))
|
sel.Include(selTD.OneDriveBackupFolderScope(sel))
|
||||||
|
|
||||||
backupOp, err := suite.repo.NewBackupWithLookup(ctx, sel.Selector, ins)
|
backupOp, err := suite.repo.NewBackupWithLookup(ctx, sel.Selector, ins)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|||||||
@ -45,7 +45,6 @@ func TestNoBackupSharePointE2ESuite(t *testing.T) {
|
|||||||
suite.Run(t, &NoBackupSharePointE2ESuite{Suite: tester.NewE2ESuite(
|
suite.Run(t, &NoBackupSharePointE2ESuite{Suite: tester.NewE2ESuite(
|
||||||
t,
|
t,
|
||||||
[][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs},
|
[][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs},
|
||||||
tester.CorsoCITests,
|
|
||||||
)})
|
)})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -112,9 +111,7 @@ func TestBackupDeleteSharePointE2ESuite(t *testing.T) {
|
|||||||
suite.Run(t, &BackupDeleteSharePointE2ESuite{
|
suite.Run(t, &BackupDeleteSharePointE2ESuite{
|
||||||
Suite: tester.NewE2ESuite(
|
Suite: tester.NewE2ESuite(
|
||||||
t,
|
t,
|
||||||
[][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs},
|
[][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs}),
|
||||||
tester.CorsoCITests,
|
|
||||||
),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -25,7 +25,6 @@ func TestS3E2ESuite(t *testing.T) {
|
|||||||
suite.Run(t, &S3E2ESuite{Suite: tester.NewE2ESuite(
|
suite.Run(t, &S3E2ESuite{Suite: tester.NewE2ESuite(
|
||||||
t,
|
t,
|
||||||
[][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs},
|
[][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs},
|
||||||
tester.CorsoCITests,
|
|
||||||
)})
|
)})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -48,9 +48,7 @@ func TestRestoreExchangeE2ESuite(t *testing.T) {
|
|||||||
suite.Run(t, &RestoreExchangeE2ESuite{
|
suite.Run(t, &RestoreExchangeE2ESuite{
|
||||||
Suite: tester.NewE2ESuite(
|
Suite: tester.NewE2ESuite(
|
||||||
t,
|
t,
|
||||||
[][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs},
|
[][]string{tester.AWSStorageCredEnvs, tester.M365AcctCredEnvs}),
|
||||||
tester.CorsoCITests,
|
|
||||||
),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -10,6 +10,7 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// common flag vars (eg: FV)
|
// common flag vars (eg: FV)
|
||||||
@ -215,6 +216,11 @@ func trimFolderSlash(folders []string) []string {
|
|||||||
res := make([]string, 0, len(folders))
|
res := make([]string, 0, len(folders))
|
||||||
|
|
||||||
for _, p := range folders {
|
for _, p := range folders {
|
||||||
|
if p == string(path.PathSeparator) {
|
||||||
|
res = selectors.Any()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
// Use path package because it has logic to handle escaping already.
|
// Use path package because it has logic to handle escaping already.
|
||||||
res = append(res, path.TrimTrailingSlash(p))
|
res = append(res, path.TrimTrailingSlash(p))
|
||||||
}
|
}
|
||||||
|
|||||||
@ -8,6 +8,7 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/corso/src/cli/utils"
|
"github.com/alcionai/corso/src/cli/utils"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
|
|
||||||
type OneDriveUtilsSuite struct {
|
type OneDriveUtilsSuite struct {
|
||||||
@ -26,6 +27,7 @@ func (suite *OneDriveUtilsSuite) TestIncludeOneDriveRestoreDataSelectors() {
|
|||||||
containsOnly = []string{"contains"}
|
containsOnly = []string{"contains"}
|
||||||
prefixOnly = []string{"/prefix"}
|
prefixOnly = []string{"/prefix"}
|
||||||
containsAndPrefix = []string{"contains", "/prefix"}
|
containsAndPrefix = []string{"contains", "/prefix"}
|
||||||
|
onlySlash = []string{string(path.PathSeparator)}
|
||||||
)
|
)
|
||||||
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
@ -87,6 +89,15 @@ func (suite *OneDriveUtilsSuite) TestIncludeOneDriveRestoreDataSelectors() {
|
|||||||
},
|
},
|
||||||
expectIncludeLen: 2,
|
expectIncludeLen: 2,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "folder with just /",
|
||||||
|
opts: utils.OneDriveOpts{
|
||||||
|
Users: empty,
|
||||||
|
FileName: empty,
|
||||||
|
FolderPath: onlySlash,
|
||||||
|
},
|
||||||
|
expectIncludeLen: 1,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
suite.Run(test.name, func() {
|
suite.Run(test.name, func() {
|
||||||
|
|||||||
@ -9,6 +9,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/cli/utils"
|
"github.com/alcionai/corso/src/cli/utils"
|
||||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -30,6 +31,7 @@ func (suite *SharePointUtilsSuite) TestIncludeSharePointRestoreDataSelectors() {
|
|||||||
containsOnly = []string{"contains"}
|
containsOnly = []string{"contains"}
|
||||||
prefixOnly = []string{"/prefix"}
|
prefixOnly = []string{"/prefix"}
|
||||||
containsAndPrefix = []string{"contains", "/prefix"}
|
containsAndPrefix = []string{"contains", "/prefix"}
|
||||||
|
onlySlash = []string{string(path.PathSeparator)}
|
||||||
)
|
)
|
||||||
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
@ -182,6 +184,13 @@ func (suite *SharePointUtilsSuite) TestIncludeSharePointRestoreDataSelectors() {
|
|||||||
},
|
},
|
||||||
expectIncludeLen: 2,
|
expectIncludeLen: 2,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "folder with just /",
|
||||||
|
opts: utils.SharePointOpts{
|
||||||
|
FolderPath: onlySlash,
|
||||||
|
},
|
||||||
|
expectIncludeLen: 1,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
suite.Run(test.name, func() {
|
suite.Run(test.name, func() {
|
||||||
|
|||||||
14
src/cli/utils/testdata/opts.go
vendored
14
src/cli/utils/testdata/opts.go
vendored
@ -356,6 +356,13 @@ var (
|
|||||||
FolderPath: selectors.Any(),
|
FolderPath: selectors.Any(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "FilesWithSingleSlash",
|
||||||
|
Expected: testdata.OneDriveItems,
|
||||||
|
Opts: utils.OneDriveOpts{
|
||||||
|
FolderPath: []string{"/"},
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "FolderPrefixMatch",
|
Name: "FolderPrefixMatch",
|
||||||
Expected: testdata.OneDriveItems,
|
Expected: testdata.OneDriveItems,
|
||||||
@ -482,6 +489,13 @@ var (
|
|||||||
FolderPath: selectors.Any(),
|
FolderPath: selectors.Any(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "LibraryItemsWithSingleSlash",
|
||||||
|
Expected: testdata.SharePointLibraryItems,
|
||||||
|
Opts: utils.SharePointOpts{
|
||||||
|
FolderPath: []string{"/"},
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "FolderPrefixMatch",
|
Name: "FolderPrefixMatch",
|
||||||
Expected: testdata.SharePointLibraryItems,
|
Expected: testdata.SharePointLibraryItems,
|
||||||
|
|||||||
@ -420,7 +420,7 @@ func generateAndRestoreOnedriveItems(
|
|||||||
Service: service,
|
Service: service,
|
||||||
Tenant: tenantID,
|
Tenant: tenantID,
|
||||||
ResourceOwners: []string{resourceOwner},
|
ResourceOwners: []string{resourceOwner},
|
||||||
Dest: tester.DefaultTestRestoreDestination(),
|
Dest: tester.DefaultTestRestoreDestination(""),
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _, collections, _ := connector.GetCollectionsAndExpected(
|
_, _, collections, _ := connector.GetCollectionsAndExpected(
|
||||||
|
|||||||
@ -112,7 +112,7 @@ func runDisplayM365JSON(
|
|||||||
creds account.M365Config,
|
creds account.M365Config,
|
||||||
user, itemID string,
|
user, itemID string,
|
||||||
) error {
|
) error {
|
||||||
drive, err := api.GetDriveByID(ctx, srv, user)
|
drive, err := api.GetUsersDrive(ctx, srv, user)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -131,6 +131,12 @@ if (![string]::IsNullOrEmpty($User)) {
|
|||||||
# Works for dev domains where format is <user name>@<domain>.onmicrosoft.com
|
# Works for dev domains where format is <user name>@<domain>.onmicrosoft.com
|
||||||
$domain = $User.Split('@')[1].Split('.')[0]
|
$domain = $User.Split('@')[1].Split('.')[0]
|
||||||
$userNameEscaped = $User.Replace('.', '_').Replace('@', '_')
|
$userNameEscaped = $User.Replace('.', '_').Replace('@', '_')
|
||||||
|
|
||||||
|
# hacky special case because of recreated CI user
|
||||||
|
if ($userNameEscaped -ilike "lynner*") {
|
||||||
|
$userNameEscaped += '1'
|
||||||
|
}
|
||||||
|
|
||||||
$siteUrl = "https://$domain-my.sharepoint.com/personal/$userNameEscaped/"
|
$siteUrl = "https://$domain-my.sharepoint.com/personal/$userNameEscaped/"
|
||||||
|
|
||||||
if ($LibraryNameList.count -eq 0) {
|
if ($LibraryNameList.count -eq 0) {
|
||||||
|
|||||||
16
src/go.mod
16
src/go.mod
@ -2,13 +2,13 @@ module github.com/alcionai/corso/src
|
|||||||
|
|
||||||
go 1.19
|
go 1.19
|
||||||
|
|
||||||
replace github.com/kopia/kopia => github.com/alcionai/kopia v0.12.2-0.20230417220734-efdcd8c54f7f
|
replace github.com/kopia/kopia => github.com/alcionai/kopia v0.12.2-0.20230502235504-2509b1d72a79
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0
|
||||||
github.com/alcionai/clues v0.0.0-20230406223931-f48777f4773c
|
github.com/alcionai/clues v0.0.0-20230406223931-f48777f4773c
|
||||||
github.com/armon/go-metrics v0.4.1
|
github.com/armon/go-metrics v0.4.1
|
||||||
github.com/aws/aws-sdk-go v1.44.256
|
github.com/aws/aws-sdk-go v1.44.262
|
||||||
github.com/aws/aws-xray-sdk-go v1.8.1
|
github.com/aws/aws-xray-sdk-go v1.8.1
|
||||||
github.com/cenkalti/backoff/v4 v4.2.1
|
github.com/cenkalti/backoff/v4 v4.2.1
|
||||||
github.com/google/uuid v1.3.0
|
github.com/google/uuid v1.3.0
|
||||||
@ -34,7 +34,7 @@ require (
|
|||||||
go.uber.org/zap v1.24.0
|
go.uber.org/zap v1.24.0
|
||||||
golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb
|
golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb
|
||||||
golang.org/x/time v0.1.0
|
golang.org/x/time v0.1.0
|
||||||
golang.org/x/tools v0.8.0
|
golang.org/x/tools v0.9.1
|
||||||
gopkg.in/resty.v1 v1.12.0
|
gopkg.in/resty.v1 v1.12.0
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -79,7 +79,7 @@ require (
|
|||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/klauspost/compress v1.16.4 // indirect
|
github.com/klauspost/compress v1.16.5 // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
|
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
|
||||||
github.com/klauspost/pgzip v1.2.5 // indirect
|
github.com/klauspost/pgzip v1.2.5 // indirect
|
||||||
github.com/klauspost/reedsolomon v1.11.7 // indirect
|
github.com/klauspost/reedsolomon v1.11.7 // indirect
|
||||||
@ -118,11 +118,11 @@ require (
|
|||||||
go.uber.org/multierr v1.11.0 // indirect
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
golang.org/x/crypto v0.8.0 // indirect
|
golang.org/x/crypto v0.8.0 // indirect
|
||||||
golang.org/x/mod v0.10.0 // indirect
|
golang.org/x/mod v0.10.0 // indirect
|
||||||
golang.org/x/net v0.9.0 // indirect
|
golang.org/x/net v0.10.0 // indirect
|
||||||
golang.org/x/sync v0.1.0 // indirect
|
golang.org/x/sync v0.2.0 // indirect
|
||||||
golang.org/x/sys v0.7.0 // indirect
|
golang.org/x/sys v0.8.0 // indirect
|
||||||
golang.org/x/text v0.9.0 // indirect
|
golang.org/x/text v0.9.0 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd // indirect
|
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
||||||
google.golang.org/grpc v1.54.0 // indirect
|
google.golang.org/grpc v1.54.0 // indirect
|
||||||
google.golang.org/protobuf v1.30.0 // indirect
|
google.golang.org/protobuf v1.30.0 // indirect
|
||||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||||
|
|||||||
34
src/go.sum
34
src/go.sum
@ -55,8 +55,8 @@ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpH
|
|||||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
|
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
|
||||||
github.com/alcionai/clues v0.0.0-20230406223931-f48777f4773c h1:Njdw/Nnq2DN3f8QMaHuZZHdVHTUSxFqPMMxDIInDWB4=
|
github.com/alcionai/clues v0.0.0-20230406223931-f48777f4773c h1:Njdw/Nnq2DN3f8QMaHuZZHdVHTUSxFqPMMxDIInDWB4=
|
||||||
github.com/alcionai/clues v0.0.0-20230406223931-f48777f4773c/go.mod h1:DeaMbAwDvYM6ZfPMR/GUl3hceqI5C8jIQ1lstjB2IW8=
|
github.com/alcionai/clues v0.0.0-20230406223931-f48777f4773c/go.mod h1:DeaMbAwDvYM6ZfPMR/GUl3hceqI5C8jIQ1lstjB2IW8=
|
||||||
github.com/alcionai/kopia v0.12.2-0.20230417220734-efdcd8c54f7f h1:cD7mcWVTEu83qX6Ml3aqgo8DDv+fBZt/7mQQps2TokM=
|
github.com/alcionai/kopia v0.12.2-0.20230502235504-2509b1d72a79 h1:Wrl99Y7jftZMnNDiOIcRJrjstZO3IEj3+Q/sip27vmI=
|
||||||
github.com/alcionai/kopia v0.12.2-0.20230417220734-efdcd8c54f7f/go.mod h1:eTgZSDaU2pDzVGC7QRubbKOeohvHzzbRXvhZMH+AGHA=
|
github.com/alcionai/kopia v0.12.2-0.20230502235504-2509b1d72a79/go.mod h1:Iic7CcKhsq+A7MLR9hh6VJfgpcJhLx3Kn+BgjY+azvI=
|
||||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
@ -66,8 +66,8 @@ github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY
|
|||||||
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||||
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||||
github.com/aws/aws-sdk-go v1.44.256 h1:O8VH+bJqgLDguqkH/xQBFz5o/YheeZqgcOYIgsTVWY4=
|
github.com/aws/aws-sdk-go v1.44.262 h1:gyXpcJptWoNkK+DiAiaBltlreoWKQXjAIh6FRh60F+I=
|
||||||
github.com/aws/aws-sdk-go v1.44.256/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
github.com/aws/aws-sdk-go v1.44.262/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||||
github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo=
|
github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo=
|
||||||
github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A=
|
github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A=
|
||||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||||
@ -203,7 +203,7 @@ github.com/h2non/gock v1.2.0 h1:K6ol8rfrRkUOefooBC8elXoaNGYkpp7y2qcxGG6BzUE=
|
|||||||
github.com/h2non/gock v1.2.0/go.mod h1:tNhoxHYW2W42cYkYb1WqzdbYIieALC99kpYr7rH/BQk=
|
github.com/h2non/gock v1.2.0/go.mod h1:tNhoxHYW2W42cYkYb1WqzdbYIieALC99kpYr7rH/BQk=
|
||||||
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
|
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
|
||||||
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
|
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
|
||||||
github.com/hanwen/go-fuse/v2 v2.2.0 h1:jo5QZYmBLNcl9ovypWaQ5yXMSSV+Ch68xoC3rtZvvBM=
|
github.com/hanwen/go-fuse/v2 v2.3.0 h1:t5ivNIH2PK+zw4OBul/iJjsoG9K6kXo4nMDoBpciC8A=
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||||
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
||||||
@ -234,8 +234,8 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X
|
|||||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||||
github.com/klauspost/compress v1.16.4 h1:91KN02FnsOYhuunwU4ssRe8lc2JosWmizWa91B5v1PU=
|
github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI=
|
||||||
github.com/klauspost/compress v1.16.4/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||||
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||||
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||||
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||||
@ -530,8 +530,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||||
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
|
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
|
||||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
@ -552,8 +552,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
|
|||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
|
||||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
@ -604,8 +604,8 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
|
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
|
||||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
@ -673,8 +673,8 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f
|
|||||||
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
|
golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo=
|
||||||
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
|
golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
@ -741,8 +741,8 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D
|
|||||||
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd h1:sLpv7bNL1AsX3fdnWh9WVh7ejIzXdOc1RRHGeAmeStU=
|
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
|
||||||
google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
|
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||||
|
|||||||
@ -49,6 +49,8 @@ func (gc *GraphConnector) ProduceBackupCollections(
|
|||||||
diagnostics.Index("service", sels.Service.String()))
|
diagnostics.Index("service", sels.Service.String()))
|
||||||
defer end()
|
defer end()
|
||||||
|
|
||||||
|
ctx = graph.BindRateLimiterConfig(ctx, graph.LimiterCfg{Service: sels.PathService()})
|
||||||
|
|
||||||
// Limit the max number of active requests to graph from this collection.
|
// Limit the max number of active requests to graph from this collection.
|
||||||
ctrlOpts.Parallelism.ItemFetch = graph.Parallelism(sels.PathService()).
|
ctrlOpts.Parallelism.ItemFetch = graph.Parallelism(sels.PathService()).
|
||||||
ItemOverride(ctx, ctrlOpts.Parallelism.ItemFetch)
|
ItemOverride(ctx, ctrlOpts.Parallelism.ItemFetch)
|
||||||
@ -194,7 +196,7 @@ func (gc *GraphConnector) ConsumeRestoreCollections(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
backupVersion int,
|
backupVersion int,
|
||||||
acct account.Account,
|
acct account.Account,
|
||||||
selector selectors.Selector,
|
sels selectors.Selector,
|
||||||
dest control.RestoreDestination,
|
dest control.RestoreDestination,
|
||||||
opts control.Options,
|
opts control.Options,
|
||||||
dcs []data.RestoreCollection,
|
dcs []data.RestoreCollection,
|
||||||
@ -203,6 +205,8 @@ func (gc *GraphConnector) ConsumeRestoreCollections(
|
|||||||
ctx, end := diagnostics.Span(ctx, "connector:restore")
|
ctx, end := diagnostics.Span(ctx, "connector:restore")
|
||||||
defer end()
|
defer end()
|
||||||
|
|
||||||
|
ctx = graph.BindRateLimiterConfig(ctx, graph.LimiterCfg{Service: sels.PathService()})
|
||||||
|
|
||||||
var (
|
var (
|
||||||
status *support.ConnectorOperationStatus
|
status *support.ConnectorOperationStatus
|
||||||
deets = &details.Builder{}
|
deets = &details.Builder{}
|
||||||
@ -213,7 +217,7 @@ func (gc *GraphConnector) ConsumeRestoreCollections(
|
|||||||
return nil, clues.Wrap(err, "malformed azure credentials")
|
return nil, clues.Wrap(err, "malformed azure credentials")
|
||||||
}
|
}
|
||||||
|
|
||||||
switch selector.Service {
|
switch sels.Service {
|
||||||
case selectors.ServiceExchange:
|
case selectors.ServiceExchange:
|
||||||
status, err = exchange.RestoreExchangeDataCollections(ctx, creds, gc.Service, dest, dcs, deets, errs)
|
status, err = exchange.RestoreExchangeDataCollections(ctx, creds, gc.Service, dest, dcs, deets, errs)
|
||||||
case selectors.ServiceOneDrive:
|
case selectors.ServiceOneDrive:
|
||||||
@ -221,7 +225,7 @@ func (gc *GraphConnector) ConsumeRestoreCollections(
|
|||||||
case selectors.ServiceSharePoint:
|
case selectors.ServiceSharePoint:
|
||||||
status, err = sharepoint.RestoreCollections(ctx, backupVersion, creds, gc.Service, dest, dcs, deets, errs)
|
status, err = sharepoint.RestoreCollections(ctx, backupVersion, creds, gc.Service, dest, dcs, deets, errs)
|
||||||
default:
|
default:
|
||||||
err = clues.Wrap(clues.New(selector.Service.String()), "service not supported")
|
err = clues.Wrap(clues.New(sels.Service.String()), "service not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
gc.incrementAwaitingMessages()
|
gc.incrementAwaitingMessages()
|
||||||
|
|||||||
@ -20,7 +20,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors/testdata"
|
selTD "github.com/alcionai/corso/src/pkg/selectors/testdata"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
@ -160,7 +160,7 @@ func (suite *DataCollectionIntgSuite) TestDataCollections_invalidResourceOwner()
|
|||||||
name: "Invalid onedrive backup user",
|
name: "Invalid onedrive backup user",
|
||||||
getSelector: func(t *testing.T) selectors.Selector {
|
getSelector: func(t *testing.T) selectors.Selector {
|
||||||
sel := selectors.NewOneDriveBackup(owners)
|
sel := selectors.NewOneDriveBackup(owners)
|
||||||
sel.Include(sel.Folders(selectors.Any()))
|
sel.Include(selTD.OneDriveBackupFolderScope(sel))
|
||||||
return sel.Selector
|
return sel.Selector
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -168,7 +168,7 @@ func (suite *DataCollectionIntgSuite) TestDataCollections_invalidResourceOwner()
|
|||||||
name: "Invalid sharepoint backup site",
|
name: "Invalid sharepoint backup site",
|
||||||
getSelector: func(t *testing.T) selectors.Selector {
|
getSelector: func(t *testing.T) selectors.Selector {
|
||||||
sel := selectors.NewSharePointBackup(owners)
|
sel := selectors.NewSharePointBackup(owners)
|
||||||
sel.Include(testdata.SharePointBackupFolderScope(sel))
|
sel.Include(selTD.SharePointBackupFolderScope(sel))
|
||||||
return sel.Selector
|
return sel.Selector
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -185,7 +185,7 @@ func (suite *DataCollectionIntgSuite) TestDataCollections_invalidResourceOwner()
|
|||||||
name: "missing onedrive backup user",
|
name: "missing onedrive backup user",
|
||||||
getSelector: func(t *testing.T) selectors.Selector {
|
getSelector: func(t *testing.T) selectors.Selector {
|
||||||
sel := selectors.NewOneDriveBackup(owners)
|
sel := selectors.NewOneDriveBackup(owners)
|
||||||
sel.Include(sel.Folders(selectors.Any()))
|
sel.Include(selTD.OneDriveBackupFolderScope(sel))
|
||||||
sel.DiscreteOwner = ""
|
sel.DiscreteOwner = ""
|
||||||
return sel.Selector
|
return sel.Selector
|
||||||
},
|
},
|
||||||
@ -194,7 +194,7 @@ func (suite *DataCollectionIntgSuite) TestDataCollections_invalidResourceOwner()
|
|||||||
name: "missing sharepoint backup site",
|
name: "missing sharepoint backup site",
|
||||||
getSelector: func(t *testing.T) selectors.Selector {
|
getSelector: func(t *testing.T) selectors.Selector {
|
||||||
sel := selectors.NewSharePointBackup(owners)
|
sel := selectors.NewSharePointBackup(owners)
|
||||||
sel.Include(testdata.SharePointBackupFolderScope(sel))
|
sel.Include(selTD.SharePointBackupFolderScope(sel))
|
||||||
sel.DiscreteOwner = ""
|
sel.DiscreteOwner = ""
|
||||||
return sel.Selector
|
return sel.Selector
|
||||||
},
|
},
|
||||||
@ -239,7 +239,7 @@ func (suite *DataCollectionIntgSuite) TestSharePointDataCollection() {
|
|||||||
name: "Libraries",
|
name: "Libraries",
|
||||||
getSelector: func() selectors.Selector {
|
getSelector: func() selectors.Selector {
|
||||||
sel := selectors.NewSharePointBackup(selSites)
|
sel := selectors.NewSharePointBackup(selSites)
|
||||||
sel.Include(testdata.SharePointBackupFolderScope(sel))
|
sel.Include(selTD.SharePointBackupFolderScope(sel))
|
||||||
return sel.Selector
|
return sel.Selector
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
@ -134,6 +134,10 @@ func (c Mail) GetItem(
|
|||||||
immutableIDs bool,
|
immutableIDs bool,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) (serialization.Parsable, *details.ExchangeInfo, error) {
|
) (serialization.Parsable, *details.ExchangeInfo, error) {
|
||||||
|
var (
|
||||||
|
size int64
|
||||||
|
mailBody models.ItemBodyable
|
||||||
|
)
|
||||||
// Will need adjusted if attachments start allowing paging.
|
// Will need adjusted if attachments start allowing paging.
|
||||||
headers := buildPreferHeaders(false, immutableIDs)
|
headers := buildPreferHeaders(false, immutableIDs)
|
||||||
itemOpts := &users.ItemMessagesMessageItemRequestBuilderGetRequestConfiguration{
|
itemOpts := &users.ItemMessagesMessageItemRequestBuilderGetRequestConfiguration{
|
||||||
@ -145,8 +149,16 @@ func (c Mail) GetItem(
|
|||||||
return nil, nil, graph.Stack(ctx, err)
|
return nil, nil, graph.Stack(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !ptr.Val(mail.GetHasAttachments()) && !HasAttachments(mail.GetBody()) {
|
mailBody = mail.GetBody()
|
||||||
return mail, MailInfo(mail), nil
|
if mailBody != nil {
|
||||||
|
content := ptr.Val(mailBody.GetContent())
|
||||||
|
if len(content) > 0 {
|
||||||
|
size = int64(len(content))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ptr.Val(mail.GetHasAttachments()) && !HasAttachments(mailBody) {
|
||||||
|
return mail, MailInfo(mail, size), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
options := &users.ItemMessagesItemAttachmentsRequestBuilderGetRequestConfiguration{
|
options := &users.ItemMessagesItemAttachmentsRequestBuilderGetRequestConfiguration{
|
||||||
@ -163,8 +175,14 @@ func (c Mail) GetItem(
|
|||||||
Attachments().
|
Attachments().
|
||||||
Get(ctx, options)
|
Get(ctx, options)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
for _, a := range attached.GetValue() {
|
||||||
|
attachSize := ptr.Val(a.GetSize())
|
||||||
|
size = +int64(attachSize)
|
||||||
|
}
|
||||||
|
|
||||||
mail.SetAttachments(attached.GetValue())
|
mail.SetAttachments(attached.GetValue())
|
||||||
return mail, MailInfo(mail), nil
|
|
||||||
|
return mail, MailInfo(mail, size), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// A failure can be caused by having a lot of attachments as
|
// A failure can be caused by having a lot of attachments as
|
||||||
@ -214,11 +232,13 @@ func (c Mail) GetItem(
|
|||||||
}
|
}
|
||||||
|
|
||||||
atts = append(atts, att)
|
atts = append(atts, att)
|
||||||
|
attachSize := ptr.Val(a.GetSize())
|
||||||
|
size = +int64(attachSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
mail.SetAttachments(atts)
|
mail.SetAttachments(atts)
|
||||||
|
|
||||||
return mail, MailInfo(mail), nil
|
return mail, MailInfo(mail, size), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// EnumerateContainers iterates through all of the users current
|
// EnumerateContainers iterates through all of the users current
|
||||||
@ -419,7 +439,7 @@ func (c Mail) Serialize(
|
|||||||
// Helpers
|
// Helpers
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
func MailInfo(msg models.Messageable) *details.ExchangeInfo {
|
func MailInfo(msg models.Messageable, size int64) *details.ExchangeInfo {
|
||||||
var (
|
var (
|
||||||
sender = UnwrapEmailAddress(msg.GetSender())
|
sender = UnwrapEmailAddress(msg.GetSender())
|
||||||
subject = ptr.Val(msg.GetSubject())
|
subject = ptr.Val(msg.GetSubject())
|
||||||
@ -444,6 +464,7 @@ func MailInfo(msg models.Messageable) *details.ExchangeInfo {
|
|||||||
Recipient: recipients,
|
Recipient: recipients,
|
||||||
Subject: subject,
|
Subject: subject,
|
||||||
Received: received,
|
Received: received,
|
||||||
|
Size: size,
|
||||||
Created: created,
|
Created: created,
|
||||||
Modified: ptr.OrNow(msg.GetLastModifiedDateTime()),
|
Modified: ptr.OrNow(msg.GetLastModifiedDateTime()),
|
||||||
}
|
}
|
||||||
|
|||||||
@ -152,7 +152,7 @@ func (suite *MailAPIUnitSuite) TestMailInfo() {
|
|||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
suite.Run(tt.name, func() {
|
suite.Run(tt.name, func() {
|
||||||
msg, expected := tt.msgAndRP()
|
msg, expected := tt.msgAndRP()
|
||||||
assert.Equal(suite.T(), expected, api.MailInfo(msg))
|
assert.Equal(suite.T(), expected, api.MailInfo(msg, 0))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -213,6 +213,7 @@ func (suite *MailAPIIntgSuite) TestHugeAttachmentListDownload() {
|
|||||||
name string
|
name string
|
||||||
setupf func()
|
setupf func()
|
||||||
attachmentCount int
|
attachmentCount int
|
||||||
|
size int64
|
||||||
expect assert.ErrorAssertionFunc
|
expect assert.ErrorAssertionFunc
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
@ -242,6 +243,9 @@ func (suite *MailAPIIntgSuite) TestHugeAttachmentListDownload() {
|
|||||||
|
|
||||||
atts := models.NewAttachmentCollectionResponse()
|
atts := models.NewAttachmentCollectionResponse()
|
||||||
aitem := models.NewAttachment()
|
aitem := models.NewAttachment()
|
||||||
|
|
||||||
|
asize := int32(50)
|
||||||
|
aitem.SetSize(&asize)
|
||||||
atts.SetValue([]models.Attachmentable{aitem})
|
atts.SetValue([]models.Attachmentable{aitem})
|
||||||
|
|
||||||
gock.New("https://graph.microsoft.com").
|
gock.New("https://graph.microsoft.com").
|
||||||
@ -250,6 +254,7 @@ func (suite *MailAPIIntgSuite) TestHugeAttachmentListDownload() {
|
|||||||
JSON(getJSONObject(suite.T(), atts))
|
JSON(getJSONObject(suite.T(), atts))
|
||||||
},
|
},
|
||||||
attachmentCount: 1,
|
attachmentCount: 1,
|
||||||
|
size: 50,
|
||||||
expect: assert.NoError,
|
expect: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -289,6 +294,7 @@ func (suite *MailAPIIntgSuite) TestHugeAttachmentListDownload() {
|
|||||||
JSON(getJSONObject(suite.T(), aitem))
|
JSON(getJSONObject(suite.T(), aitem))
|
||||||
},
|
},
|
||||||
attachmentCount: 1,
|
attachmentCount: 1,
|
||||||
|
size: 200,
|
||||||
expect: assert.NoError,
|
expect: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -330,6 +336,7 @@ func (suite *MailAPIIntgSuite) TestHugeAttachmentListDownload() {
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
attachmentCount: 5,
|
attachmentCount: 5,
|
||||||
|
size: 200,
|
||||||
expect: assert.NoError,
|
expect: assert.NoError,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -348,8 +355,23 @@ func (suite *MailAPIIntgSuite) TestHugeAttachmentListDownload() {
|
|||||||
it, ok := item.(models.Messageable)
|
it, ok := item.(models.Messageable)
|
||||||
require.True(suite.T(), ok, "convert to messageable")
|
require.True(suite.T(), ok, "convert to messageable")
|
||||||
|
|
||||||
|
var size int64
|
||||||
|
mailBody := it.GetBody()
|
||||||
|
if mailBody != nil {
|
||||||
|
content := ptr.Val(mailBody.GetContent())
|
||||||
|
if len(content) > 0 {
|
||||||
|
size = int64(len(content))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
attachments := it.GetAttachments()
|
||||||
|
for _, attachment := range attachments {
|
||||||
|
size = +int64(*attachment.GetSize())
|
||||||
|
}
|
||||||
|
|
||||||
assert.Equal(suite.T(), *it.GetId(), mid)
|
assert.Equal(suite.T(), *it.GetId(), mid)
|
||||||
assert.Equal(suite.T(), tt.attachmentCount, len(it.GetAttachments()), "attachment count")
|
assert.Equal(suite.T(), tt.attachmentCount, len(attachments), "attachment count")
|
||||||
|
assert.Equal(suite.T(), tt.size, size, "mail size")
|
||||||
assert.True(suite.T(), gock.IsDone(), "made all requests")
|
assert.True(suite.T(), gock.IsDone(), "made all requests")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@ -549,7 +549,7 @@ func (suite *FolderCacheIntegrationSuite) TestCreateContainerDestination() {
|
|||||||
var (
|
var (
|
||||||
user = tester.M365UserID(suite.T())
|
user = tester.M365UserID(suite.T())
|
||||||
directoryCaches = make(map[path.CategoryType]graph.ContainerResolver)
|
directoryCaches = make(map[path.CategoryType]graph.ContainerResolver)
|
||||||
folderName = tester.DefaultTestRestoreDestination().ContainerName
|
folderName = tester.DefaultTestRestoreDestination("").ContainerName
|
||||||
tests = []struct {
|
tests = []struct {
|
||||||
name string
|
name string
|
||||||
pathFunc1 func(t *testing.T) path.Path
|
pathFunc1 func(t *testing.T) path.Path
|
||||||
|
|||||||
@ -282,9 +282,18 @@ func (suite *DataCollectionsIntegrationSuite) TestMailFetch() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
require.NotEmpty(t, c.FullPath().Folder(false))
|
require.NotEmpty(t, c.FullPath().Folder(false))
|
||||||
folder := c.FullPath().Folder(false)
|
|
||||||
|
|
||||||
delete(test.folderNames, folder)
|
// TODO(ashmrtn): Remove when LocationPath is made part of BackupCollection
|
||||||
|
// interface.
|
||||||
|
if !assert.Implements(t, (*data.LocationPather)(nil), c) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
loc := c.(data.LocationPather).LocationPath().String()
|
||||||
|
|
||||||
|
require.NotEmpty(t, loc)
|
||||||
|
|
||||||
|
delete(test.folderNames, loc)
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Empty(t, test.folderNames)
|
assert.Empty(t, test.folderNames)
|
||||||
@ -525,7 +534,16 @@ func (suite *DataCollectionsIntegrationSuite) TestContactSerializationRegression
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal(t, edc.FullPath().Folder(false), DefaultContactFolder)
|
// TODO(ashmrtn): Remove when LocationPath is made part of BackupCollection
|
||||||
|
// interface.
|
||||||
|
if !assert.Implements(t, (*data.LocationPather)(nil), edc) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
edc.(data.LocationPather).LocationPath().String(),
|
||||||
|
DefaultContactFolder)
|
||||||
assert.NotZero(t, count)
|
assert.NotZero(t, count)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -260,7 +260,12 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
info.Size = int64(len(data))
|
// In case of mail the size of data is calc as- size of body content+size of attachment
|
||||||
|
// in all other case the size is - total item's serialized size
|
||||||
|
if info.Size <= 0 {
|
||||||
|
info.Size = int64(len(data))
|
||||||
|
}
|
||||||
|
|
||||||
info.ParentPath = col.locationPath.String()
|
info.ParentPath = col.locationPath.String()
|
||||||
|
|
||||||
col.data <- &Stream{
|
col.data <- &Stream{
|
||||||
|
|||||||
@ -3,14 +3,12 @@ package exchange
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/connector/exchange/api"
|
"github.com/alcionai/corso/src/internal/connector/exchange/api"
|
||||||
exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock"
|
exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock"
|
||||||
@ -67,8 +65,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreContact() {
|
|||||||
var (
|
var (
|
||||||
t = suite.T()
|
t = suite.T()
|
||||||
userID = tester.M365UserID(t)
|
userID = tester.M365UserID(t)
|
||||||
now = time.Now()
|
folderName = tester.DefaultTestRestoreDestination("contact").ContainerName
|
||||||
folderName = "TestRestoreContact: " + dttm.FormatTo(now, dttm.SafeForTesting)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
aFolder, err := suite.ac.Contacts().CreateContactFolder(ctx, userID, folderName)
|
aFolder, err := suite.ac.Contacts().CreateContactFolder(ctx, userID, folderName)
|
||||||
@ -102,7 +99,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreEvent() {
|
|||||||
var (
|
var (
|
||||||
t = suite.T()
|
t = suite.T()
|
||||||
userID = tester.M365UserID(t)
|
userID = tester.M365UserID(t)
|
||||||
subject = "TestRestoreEvent: " + dttm.FormatNow(dttm.SafeForTesting)
|
subject = tester.DefaultTestRestoreDestination("event").ContainerName
|
||||||
)
|
)
|
||||||
|
|
||||||
calendar, err := suite.ac.Events().CreateCalendar(ctx, userID, subject)
|
calendar, err := suite.ac.Events().CreateCalendar(ctx, userID, subject)
|
||||||
@ -172,7 +169,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
userID := tester.M365UserID(suite.T())
|
userID := tester.M365UserID(suite.T())
|
||||||
now := time.Now()
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
bytes []byte
|
bytes []byte
|
||||||
@ -184,7 +181,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
|||||||
bytes: exchMock.MessageBytes("Restore Exchange Object"),
|
bytes: exchMock.MessageBytes("Restore Exchange Object"),
|
||||||
category: path.EmailCategory,
|
category: path.EmailCategory,
|
||||||
destination: func(t *testing.T, ctx context.Context) string {
|
destination: func(t *testing.T, ctx context.Context) string {
|
||||||
folderName := "TestRestoreMailObject: " + dttm.FormatTo(now, dttm.SafeForTesting)
|
folderName := tester.DefaultTestRestoreDestination("mailobj").ContainerName
|
||||||
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -196,7 +193,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
|||||||
bytes: exchMock.MessageWithDirectAttachment("Restore 1 Attachment"),
|
bytes: exchMock.MessageWithDirectAttachment("Restore 1 Attachment"),
|
||||||
category: path.EmailCategory,
|
category: path.EmailCategory,
|
||||||
destination: func(t *testing.T, ctx context.Context) string {
|
destination: func(t *testing.T, ctx context.Context) string {
|
||||||
folderName := "TestRestoreMailwithAttachment: " + dttm.FormatTo(now, dttm.SafeForTesting)
|
folderName := tester.DefaultTestRestoreDestination("mailwattch").ContainerName
|
||||||
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -208,7 +205,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
|||||||
bytes: exchMock.MessageWithItemAttachmentEvent("Event Item Attachment"),
|
bytes: exchMock.MessageWithItemAttachmentEvent("Event Item Attachment"),
|
||||||
category: path.EmailCategory,
|
category: path.EmailCategory,
|
||||||
destination: func(t *testing.T, ctx context.Context) string {
|
destination: func(t *testing.T, ctx context.Context) string {
|
||||||
folderName := "TestRestoreEventItemAttachment: " + dttm.FormatTo(now, dttm.SafeForTesting)
|
folderName := tester.DefaultTestRestoreDestination("eventwattch").ContainerName
|
||||||
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -220,7 +217,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
|||||||
bytes: exchMock.MessageWithItemAttachmentMail("Mail Item Attachment"),
|
bytes: exchMock.MessageWithItemAttachmentMail("Mail Item Attachment"),
|
||||||
category: path.EmailCategory,
|
category: path.EmailCategory,
|
||||||
destination: func(t *testing.T, ctx context.Context) string {
|
destination: func(t *testing.T, ctx context.Context) string {
|
||||||
folderName := "TestRestoreMailItemAttachment: " + dttm.FormatTo(now, dttm.SafeForTesting)
|
folderName := tester.DefaultTestRestoreDestination("mailitemattch").ContainerName
|
||||||
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -235,7 +232,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
|||||||
),
|
),
|
||||||
category: path.EmailCategory,
|
category: path.EmailCategory,
|
||||||
destination: func(t *testing.T, ctx context.Context) string {
|
destination: func(t *testing.T, ctx context.Context) string {
|
||||||
folderName := "TestRestoreMailBasicItemAttachment: " + dttm.FormatTo(now, dttm.SafeForTesting)
|
folderName := tester.DefaultTestRestoreDestination("mailbasicattch").ContainerName
|
||||||
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -250,7 +247,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
|||||||
),
|
),
|
||||||
category: path.EmailCategory,
|
category: path.EmailCategory,
|
||||||
destination: func(t *testing.T, ctx context.Context) string {
|
destination: func(t *testing.T, ctx context.Context) string {
|
||||||
folderName := "ItemMailAttachmentwAttachment " + dttm.FormatTo(now, dttm.SafeForTesting)
|
folderName := tester.DefaultTestRestoreDestination("mailnestattch").ContainerName
|
||||||
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -265,7 +262,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
|||||||
),
|
),
|
||||||
category: path.EmailCategory,
|
category: path.EmailCategory,
|
||||||
destination: func(t *testing.T, ctx context.Context) string {
|
destination: func(t *testing.T, ctx context.Context) string {
|
||||||
folderName := "ItemMailAttachment_Contact " + dttm.FormatTo(now, dttm.SafeForTesting)
|
folderName := tester.DefaultTestRestoreDestination("mailcontactattch").ContainerName
|
||||||
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -277,7 +274,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
|||||||
bytes: exchMock.MessageWithNestedItemAttachmentEvent("Nested Item Attachment"),
|
bytes: exchMock.MessageWithNestedItemAttachmentEvent("Nested Item Attachment"),
|
||||||
category: path.EmailCategory,
|
category: path.EmailCategory,
|
||||||
destination: func(t *testing.T, ctx context.Context) string {
|
destination: func(t *testing.T, ctx context.Context) string {
|
||||||
folderName := "TestRestoreNestedEventItemAttachment: " + dttm.FormatTo(now, dttm.SafeForTesting)
|
folderName := tester.DefaultTestRestoreDestination("nestedattch").ContainerName
|
||||||
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -289,7 +286,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
|||||||
bytes: exchMock.MessageWithLargeAttachment("Restore Large Attachment"),
|
bytes: exchMock.MessageWithLargeAttachment("Restore Large Attachment"),
|
||||||
category: path.EmailCategory,
|
category: path.EmailCategory,
|
||||||
destination: func(t *testing.T, ctx context.Context) string {
|
destination: func(t *testing.T, ctx context.Context) string {
|
||||||
folderName := "TestRestoreMailwithLargeAttachment: " + dttm.FormatTo(now, dttm.SafeForTesting)
|
folderName := tester.DefaultTestRestoreDestination("maillargeattch").ContainerName
|
||||||
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -301,7 +298,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
|||||||
bytes: exchMock.MessageWithTwoAttachments("Restore 2 Attachments"),
|
bytes: exchMock.MessageWithTwoAttachments("Restore 2 Attachments"),
|
||||||
category: path.EmailCategory,
|
category: path.EmailCategory,
|
||||||
destination: func(t *testing.T, ctx context.Context) string {
|
destination: func(t *testing.T, ctx context.Context) string {
|
||||||
folderName := "TestRestoreMailwithAttachments: " + dttm.FormatTo(now, dttm.SafeForTesting)
|
folderName := tester.DefaultTestRestoreDestination("mailtwoattch").ContainerName
|
||||||
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -313,7 +310,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
|||||||
bytes: exchMock.MessageWithOneDriveAttachment("Restore Reference(OneDrive) Attachment"),
|
bytes: exchMock.MessageWithOneDriveAttachment("Restore Reference(OneDrive) Attachment"),
|
||||||
category: path.EmailCategory,
|
category: path.EmailCategory,
|
||||||
destination: func(t *testing.T, ctx context.Context) string {
|
destination: func(t *testing.T, ctx context.Context) string {
|
||||||
folderName := "TestRestoreMailwithReferenceAttachment: " + dttm.FormatTo(now, dttm.SafeForTesting)
|
folderName := tester.DefaultTestRestoreDestination("mailrefattch").ContainerName
|
||||||
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -326,7 +323,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
|||||||
bytes: exchMock.ContactBytes("Test_Omega"),
|
bytes: exchMock.ContactBytes("Test_Omega"),
|
||||||
category: path.ContactsCategory,
|
category: path.ContactsCategory,
|
||||||
destination: func(t *testing.T, ctx context.Context) string {
|
destination: func(t *testing.T, ctx context.Context) string {
|
||||||
folderName := "TestRestoreContactObject: " + dttm.FormatTo(now, dttm.SafeForTesting)
|
folderName := tester.DefaultTestRestoreDestination("contact").ContainerName
|
||||||
folder, err := suite.ac.Contacts().CreateContactFolder(ctx, userID, folderName)
|
folder, err := suite.ac.Contacts().CreateContactFolder(ctx, userID, folderName)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -338,8 +335,8 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
|||||||
bytes: exchMock.EventBytes("Restored Event Object"),
|
bytes: exchMock.EventBytes("Restored Event Object"),
|
||||||
category: path.EventsCategory,
|
category: path.EventsCategory,
|
||||||
destination: func(t *testing.T, ctx context.Context) string {
|
destination: func(t *testing.T, ctx context.Context) string {
|
||||||
calendarName := "TestRestoreEventObject: " + dttm.FormatTo(now, dttm.SafeForTesting)
|
folderName := tester.DefaultTestRestoreDestination("event").ContainerName
|
||||||
calendar, err := suite.ac.Events().CreateCalendar(ctx, userID, calendarName)
|
calendar, err := suite.ac.Events().CreateCalendar(ctx, userID, folderName)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
return ptr.Val(calendar.GetId())
|
return ptr.Val(calendar.GetId())
|
||||||
@ -350,8 +347,8 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
|||||||
bytes: exchMock.EventWithAttachment("Restored Event Attachment"),
|
bytes: exchMock.EventWithAttachment("Restored Event Attachment"),
|
||||||
category: path.EventsCategory,
|
category: path.EventsCategory,
|
||||||
destination: func(t *testing.T, ctx context.Context) string {
|
destination: func(t *testing.T, ctx context.Context) string {
|
||||||
calendarName := "TestRestoreEventObject_" + dttm.FormatTo(now, dttm.SafeForTesting)
|
folderName := tester.DefaultTestRestoreDestination("eventobj").ContainerName
|
||||||
calendar, err := suite.ac.Events().CreateCalendar(ctx, userID, calendarName)
|
calendar, err := suite.ac.Events().CreateCalendar(ctx, userID, folderName)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
return ptr.Val(calendar.GetId())
|
return ptr.Val(calendar.GetId())
|
||||||
|
|||||||
@ -137,21 +137,15 @@ func includeContainer(
|
|||||||
directory = locPath.Folder(false)
|
directory = locPath.Folder(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var ok bool
|
||||||
ok bool
|
|
||||||
pathRes path.Path
|
|
||||||
)
|
|
||||||
|
|
||||||
switch category {
|
switch category {
|
||||||
case path.EmailCategory:
|
case path.EmailCategory:
|
||||||
ok = scope.Matches(selectors.ExchangeMailFolder, directory)
|
ok = scope.Matches(selectors.ExchangeMailFolder, directory)
|
||||||
pathRes = locPath
|
|
||||||
case path.ContactsCategory:
|
case path.ContactsCategory:
|
||||||
ok = scope.Matches(selectors.ExchangeContactFolder, directory)
|
ok = scope.Matches(selectors.ExchangeContactFolder, directory)
|
||||||
pathRes = locPath
|
|
||||||
case path.EventsCategory:
|
case path.EventsCategory:
|
||||||
ok = scope.Matches(selectors.ExchangeEventCalendar, directory)
|
ok = scope.Matches(selectors.ExchangeEventCalendar, directory)
|
||||||
pathRes = dirPath
|
|
||||||
default:
|
default:
|
||||||
return nil, nil, false
|
return nil, nil, false
|
||||||
}
|
}
|
||||||
@ -162,5 +156,5 @@ func includeContainer(
|
|||||||
"matches_input", directory,
|
"matches_input", directory,
|
||||||
).Debug("backup folder selection filter")
|
).Debug("backup folder selection filter")
|
||||||
|
|
||||||
return pathRes, loc, ok
|
return dirPath, loc, ok
|
||||||
}
|
}
|
||||||
|
|||||||
@ -56,10 +56,6 @@ func filterContainersAndFillCollections(
|
|||||||
// deleted from this map, leaving only the deleted folders behind
|
// deleted from this map, leaving only the deleted folders behind
|
||||||
tombstones = makeTombstones(dps)
|
tombstones = makeTombstones(dps)
|
||||||
category = qp.Category
|
category = qp.Category
|
||||||
|
|
||||||
// Stop-gap: Track folders by LocationPath and if there's duplicates pick
|
|
||||||
// the one with the lexicographically larger ID.
|
|
||||||
dupPaths = map[string]string{}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.Ctx(ctx).Infow("filling collections", "len_deltapaths", len(dps))
|
logger.Ctx(ctx).Infow("filling collections", "len_deltapaths", len(dps))
|
||||||
@ -108,53 +104,6 @@ func filterContainersAndFillCollections(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is a duplicate collection. Either the collection we're examining now
|
|
||||||
// should be skipped or the collection we previously added should be
|
|
||||||
// skipped.
|
|
||||||
//
|
|
||||||
// Calendars is already using folder IDs so we don't need to pick the
|
|
||||||
// "newest" folder for that.
|
|
||||||
if oldCID := dupPaths[locPath.String()]; category != path.EventsCategory && len(oldCID) > 0 {
|
|
||||||
if cID < oldCID {
|
|
||||||
logger.Ctx(ictx).Infow(
|
|
||||||
"skipping duplicate folder with lesser ID",
|
|
||||||
"previous_folder_id", clues.Hide(oldCID),
|
|
||||||
"current_folder_id", clues.Hide(cID),
|
|
||||||
"duplicate_path", locPath)
|
|
||||||
|
|
||||||
// Readd this entry to the tombstone map because we remove it first off.
|
|
||||||
if oldDP, ok := dps[cID]; ok {
|
|
||||||
tombstones[cID] = oldDP.path
|
|
||||||
}
|
|
||||||
|
|
||||||
// Continuing here ensures we don't add anything to the paths map or the
|
|
||||||
// delta map which is the behavior we want.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Ctx(ictx).Infow(
|
|
||||||
"switching duplicate folders as newer folder found",
|
|
||||||
"previous_folder_id", clues.Hide(oldCID),
|
|
||||||
"current_folder_id", clues.Hide(cID),
|
|
||||||
"duplicate_path", locPath)
|
|
||||||
|
|
||||||
// Remove the previous collection from the maps. This will make us think
|
|
||||||
// it's a new item and properly populate it if it ever:
|
|
||||||
// * moves
|
|
||||||
// * replaces the current entry (current entry moves/is deleted)
|
|
||||||
delete(collections, oldCID)
|
|
||||||
delete(deltaURLs, oldCID)
|
|
||||||
delete(currPaths, oldCID)
|
|
||||||
|
|
||||||
// Re-add the tombstone entry for the old folder so that it can be marked
|
|
||||||
// as deleted if need.
|
|
||||||
if oldDP, ok := dps[oldCID]; ok {
|
|
||||||
tombstones[oldCID] = oldDP.path
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dupPaths[locPath.String()] = cID
|
|
||||||
|
|
||||||
if len(prevPathStr) > 0 {
|
if len(prevPathStr) > 0 {
|
||||||
if prevPath, err = pathFromPrevString(prevPathStr); err != nil {
|
if prevPath, err = pathFromPrevString(prevPathStr); err != nil {
|
||||||
logger.CtxErr(ictx, err).Error("parsing prev path")
|
logger.CtxErr(ictx, err).Error("parsing prev path")
|
||||||
|
|||||||
@ -384,6 +384,7 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_Dupli
|
|||||||
ResourceOwner: inMock.NewProvider("user_id", "user_name"),
|
ResourceOwner: inMock.NewProvider("user_id", "user_name"),
|
||||||
Credentials: suite.creds,
|
Credentials: suite.creds,
|
||||||
}
|
}
|
||||||
|
|
||||||
statusUpdater = func(*support.ConnectorOperationStatus) {}
|
statusUpdater = func(*support.ConnectorOperationStatus) {}
|
||||||
|
|
||||||
dataTypes = []scopeCat{
|
dataTypes = []scopeCat{
|
||||||
@ -395,6 +396,10 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_Dupli
|
|||||||
scope: selectors.NewExchangeBackup(nil).ContactFolders(selectors.Any())[0],
|
scope: selectors.NewExchangeBackup(nil).ContactFolders(selectors.Any())[0],
|
||||||
cat: path.ContactsCategory,
|
cat: path.ContactsCategory,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
scope: selectors.NewExchangeBackup(nil).EventCalendars(selectors.Any())[0],
|
||||||
|
cat: path.EventsCategory,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
location = path.Builder{}.Append("foo", "bar")
|
location = path.Builder{}.Append("foo", "bar")
|
||||||
@ -448,8 +453,20 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_Dupli
|
|||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
locPath := func(t *testing.T, cat path.CategoryType) path.Path {
|
idPath1 := func(t *testing.T, cat path.CategoryType) path.Path {
|
||||||
res, err := location.ToDataLayerPath(
|
res, err := path.Builder{}.Append("1").ToDataLayerPath(
|
||||||
|
suite.creds.AzureTenantID,
|
||||||
|
qp.ResourceOwner.ID(),
|
||||||
|
path.ExchangeService,
|
||||||
|
cat,
|
||||||
|
false)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
idPath2 := func(t *testing.T, cat path.CategoryType) path.Path {
|
||||||
|
res, err := path.Builder{}.Append("2").ToDataLayerPath(
|
||||||
suite.creds.AzureTenantID,
|
suite.creds.AzureTenantID,
|
||||||
qp.ResourceOwner.ID(),
|
qp.ResourceOwner.ID(),
|
||||||
path.ExchangeService,
|
path.ExchangeService,
|
||||||
@ -467,8 +484,6 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_Dupli
|
|||||||
inputMetadata func(t *testing.T, cat path.CategoryType) DeltaPaths
|
inputMetadata func(t *testing.T, cat path.CategoryType) DeltaPaths
|
||||||
expectNewColls int
|
expectNewColls int
|
||||||
expectDeleted int
|
expectDeleted int
|
||||||
expectAdded []string
|
|
||||||
expectRemoved []string
|
|
||||||
expectMetadata func(t *testing.T, cat path.CategoryType) DeltaPaths
|
expectMetadata func(t *testing.T, cat path.CategoryType) DeltaPaths
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
@ -486,49 +501,19 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_Dupli
|
|||||||
},
|
},
|
||||||
"2": DeltaPath{
|
"2": DeltaPath{
|
||||||
delta: "old_delta",
|
delta: "old_delta",
|
||||||
path: locPath(t, cat).String(),
|
path: idPath2(t, cat).String(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
expectDeleted: 1,
|
|
||||||
expectAdded: result2.added,
|
|
||||||
expectRemoved: result2.removed,
|
|
||||||
expectMetadata: func(t *testing.T, cat path.CategoryType) DeltaPaths {
|
expectMetadata: func(t *testing.T, cat path.CategoryType) DeltaPaths {
|
||||||
return DeltaPaths{
|
|
||||||
"2": DeltaPath{
|
|
||||||
delta: "delta_url2",
|
|
||||||
path: locPath(t, cat).String(),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "1 moved to duplicate, other order",
|
|
||||||
getter: map[string]mockGetterResults{
|
|
||||||
"1": result1,
|
|
||||||
"2": result2,
|
|
||||||
},
|
|
||||||
resolver: newMockResolver(container2, container1),
|
|
||||||
inputMetadata: func(t *testing.T, cat path.CategoryType) DeltaPaths {
|
|
||||||
return DeltaPaths{
|
return DeltaPaths{
|
||||||
"1": DeltaPath{
|
"1": DeltaPath{
|
||||||
delta: "old_delta",
|
delta: "delta_url",
|
||||||
path: oldPath1(t, cat).String(),
|
path: idPath1(t, cat).String(),
|
||||||
},
|
},
|
||||||
"2": DeltaPath{
|
|
||||||
delta: "old_delta",
|
|
||||||
path: locPath(t, cat).String(),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
},
|
|
||||||
expectDeleted: 1,
|
|
||||||
expectAdded: result2.added,
|
|
||||||
expectRemoved: result2.removed,
|
|
||||||
expectMetadata: func(t *testing.T, cat path.CategoryType) DeltaPaths {
|
|
||||||
return DeltaPaths{
|
|
||||||
"2": DeltaPath{
|
"2": DeltaPath{
|
||||||
delta: "delta_url2",
|
delta: "delta_url2",
|
||||||
path: locPath(t, cat).String(),
|
path: idPath2(t, cat).String(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -552,14 +537,15 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_Dupli
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
expectDeleted: 1,
|
|
||||||
expectAdded: result2.added,
|
|
||||||
expectRemoved: result2.removed,
|
|
||||||
expectMetadata: func(t *testing.T, cat path.CategoryType) DeltaPaths {
|
expectMetadata: func(t *testing.T, cat path.CategoryType) DeltaPaths {
|
||||||
return DeltaPaths{
|
return DeltaPaths{
|
||||||
|
"1": DeltaPath{
|
||||||
|
delta: "delta_url",
|
||||||
|
path: idPath1(t, cat).String(),
|
||||||
|
},
|
||||||
"2": DeltaPath{
|
"2": DeltaPath{
|
||||||
delta: "delta_url2",
|
delta: "delta_url2",
|
||||||
path: locPath(t, cat).String(),
|
path: idPath2(t, cat).String(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -574,14 +560,16 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_Dupli
|
|||||||
inputMetadata: func(t *testing.T, cat path.CategoryType) DeltaPaths {
|
inputMetadata: func(t *testing.T, cat path.CategoryType) DeltaPaths {
|
||||||
return DeltaPaths{}
|
return DeltaPaths{}
|
||||||
},
|
},
|
||||||
expectNewColls: 1,
|
expectNewColls: 2,
|
||||||
expectAdded: result2.added,
|
|
||||||
expectRemoved: result2.removed,
|
|
||||||
expectMetadata: func(t *testing.T, cat path.CategoryType) DeltaPaths {
|
expectMetadata: func(t *testing.T, cat path.CategoryType) DeltaPaths {
|
||||||
return DeltaPaths{
|
return DeltaPaths{
|
||||||
|
"1": DeltaPath{
|
||||||
|
delta: "delta_url",
|
||||||
|
path: idPath1(t, cat).String(),
|
||||||
|
},
|
||||||
"2": DeltaPath{
|
"2": DeltaPath{
|
||||||
delta: "delta_url2",
|
delta: "delta_url2",
|
||||||
path: locPath(t, cat).String(),
|
path: idPath2(t, cat).String(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -596,19 +584,17 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_Dupli
|
|||||||
return DeltaPaths{
|
return DeltaPaths{
|
||||||
"2": DeltaPath{
|
"2": DeltaPath{
|
||||||
delta: "old_delta",
|
delta: "old_delta",
|
||||||
path: locPath(t, cat).String(),
|
path: idPath2(t, cat).String(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
expectNewColls: 1,
|
expectNewColls: 1,
|
||||||
expectDeleted: 1,
|
expectDeleted: 1,
|
||||||
expectAdded: result1.added,
|
|
||||||
expectRemoved: result1.removed,
|
|
||||||
expectMetadata: func(t *testing.T, cat path.CategoryType) DeltaPaths {
|
expectMetadata: func(t *testing.T, cat path.CategoryType) DeltaPaths {
|
||||||
return DeltaPaths{
|
return DeltaPaths{
|
||||||
"1": DeltaPath{
|
"1": DeltaPath{
|
||||||
delta: "delta_url",
|
delta: "delta_url",
|
||||||
path: locPath(t, cat).String(),
|
path: idPath1(t, cat).String(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -633,7 +619,7 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_Dupli
|
|||||||
statusUpdater,
|
statusUpdater,
|
||||||
test.resolver,
|
test.resolver,
|
||||||
sc.scope,
|
sc.scope,
|
||||||
test.inputMetadata(t, sc.cat),
|
test.inputMetadata(t, qp.Category),
|
||||||
control.Options{FailureHandling: control.FailFast},
|
control.Options{FailureHandling: control.FailFast},
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, "getting collections", clues.ToCore(err))
|
require.NoError(t, err, "getting collections", clues.ToCore(err))
|
||||||
@ -649,21 +635,30 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_Dupli
|
|||||||
|
|
||||||
if c.FullPath().Service() == path.ExchangeMetadataService {
|
if c.FullPath().Service() == path.ExchangeMetadataService {
|
||||||
metadatas++
|
metadatas++
|
||||||
checkMetadata(t, ctx, sc.cat, test.expectMetadata(t, sc.cat), c)
|
checkMetadata(t, ctx, qp.Category, test.expectMetadata(t, qp.Category), c)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.State() == data.NewState {
|
if c.State() == data.NewState {
|
||||||
news++
|
news++
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
exColl, ok := c.(*Collection)
|
assert.Equal(t, test.expectDeleted, deleteds, "deleted collections")
|
||||||
require.True(t, ok, "collection is an *exchange.Collection")
|
assert.Equal(t, test.expectNewColls, news, "new collections")
|
||||||
|
assert.Equal(t, 1, metadatas, "metadata collections")
|
||||||
|
|
||||||
if exColl.LocationPath() != nil {
|
// items in collections assertions
|
||||||
assert.Equal(t, location.String(), exColl.LocationPath().String())
|
for k, expect := range test.getter {
|
||||||
|
coll := collections[k]
|
||||||
|
|
||||||
|
if coll == nil {
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
exColl, ok := coll.(*Collection)
|
||||||
|
require.True(t, ok, "collection is an *exchange.Collection")
|
||||||
|
|
||||||
ids := [][]string{
|
ids := [][]string{
|
||||||
make([]string, 0, len(exColl.added)),
|
make([]string, 0, len(exColl.added)),
|
||||||
make([]string, 0, len(exColl.removed)),
|
make([]string, 0, len(exColl.removed)),
|
||||||
@ -675,268 +670,15 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_Dupli
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.ElementsMatch(t, test.expectAdded, ids[0], "added items")
|
assert.ElementsMatch(t, expect.added, ids[0], "added items")
|
||||||
assert.ElementsMatch(t, test.expectRemoved, ids[1], "removed items")
|
assert.ElementsMatch(t, expect.removed, ids[1], "removed items")
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal(t, test.expectDeleted, deleteds, "deleted collections")
|
|
||||||
assert.Equal(t, test.expectNewColls, news, "new collections")
|
|
||||||
assert.Equal(t, 1, metadatas, "metadata collections")
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_DuplicateFolders_Events() {
|
|
||||||
var (
|
|
||||||
qp = graph.QueryParams{
|
|
||||||
ResourceOwner: inMock.NewProvider("user_id", "user_name"),
|
|
||||||
Category: path.EventsCategory,
|
|
||||||
Credentials: suite.creds,
|
|
||||||
}
|
|
||||||
statusUpdater = func(*support.ConnectorOperationStatus) {}
|
|
||||||
|
|
||||||
scope = selectors.NewExchangeBackup(nil).EventCalendars(selectors.Any())[0]
|
|
||||||
|
|
||||||
location = path.Builder{}.Append("foo", "bar")
|
|
||||||
|
|
||||||
result1 = mockGetterResults{
|
|
||||||
added: []string{"a1", "a2", "a3"},
|
|
||||||
removed: []string{"r1", "r2", "r3"},
|
|
||||||
newDelta: api.DeltaUpdate{URL: "delta_url"},
|
|
||||||
}
|
|
||||||
result2 = mockGetterResults{
|
|
||||||
added: []string{"a4", "a5", "a6"},
|
|
||||||
removed: []string{"r4", "r5", "r6"},
|
|
||||||
newDelta: api.DeltaUpdate{URL: "delta_url2"},
|
|
||||||
}
|
|
||||||
|
|
||||||
container1 = mockContainer{
|
|
||||||
id: strPtr("1"),
|
|
||||||
displayName: strPtr("bar"),
|
|
||||||
p: path.Builder{}.Append("1"),
|
|
||||||
l: location,
|
|
||||||
}
|
|
||||||
container2 = mockContainer{
|
|
||||||
id: strPtr("2"),
|
|
||||||
displayName: strPtr("bar"),
|
|
||||||
p: path.Builder{}.Append("2"),
|
|
||||||
l: location,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
oldPath1, err := location.Append("1").ToDataLayerPath(
|
|
||||||
suite.creds.AzureTenantID,
|
|
||||||
qp.ResourceOwner.ID(),
|
|
||||||
path.ExchangeService,
|
|
||||||
qp.Category,
|
|
||||||
false)
|
|
||||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
|
||||||
|
|
||||||
oldPath2, err := location.Append("2").ToDataLayerPath(
|
|
||||||
suite.creds.AzureTenantID,
|
|
||||||
qp.ResourceOwner.ID(),
|
|
||||||
path.ExchangeService,
|
|
||||||
qp.Category,
|
|
||||||
false)
|
|
||||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
|
||||||
|
|
||||||
idPath1, err := path.Builder{}.Append("1").ToDataLayerPath(
|
|
||||||
suite.creds.AzureTenantID,
|
|
||||||
qp.ResourceOwner.ID(),
|
|
||||||
path.ExchangeService,
|
|
||||||
qp.Category,
|
|
||||||
false)
|
|
||||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
|
||||||
|
|
||||||
idPath2, err := path.Builder{}.Append("2").ToDataLayerPath(
|
|
||||||
suite.creds.AzureTenantID,
|
|
||||||
qp.ResourceOwner.ID(),
|
|
||||||
path.ExchangeService,
|
|
||||||
qp.Category,
|
|
||||||
false)
|
|
||||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
|
||||||
|
|
||||||
table := []struct {
|
|
||||||
name string
|
|
||||||
getter mockGetter
|
|
||||||
resolver graph.ContainerResolver
|
|
||||||
inputMetadata DeltaPaths
|
|
||||||
expectNewColls int
|
|
||||||
expectDeleted int
|
|
||||||
expectMetadata DeltaPaths
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "1 moved to duplicate",
|
|
||||||
getter: map[string]mockGetterResults{
|
|
||||||
"1": result1,
|
|
||||||
"2": result2,
|
|
||||||
},
|
|
||||||
resolver: newMockResolver(container1, container2),
|
|
||||||
inputMetadata: DeltaPaths{
|
|
||||||
"1": DeltaPath{
|
|
||||||
delta: "old_delta",
|
|
||||||
path: oldPath1.String(),
|
|
||||||
},
|
|
||||||
"2": DeltaPath{
|
|
||||||
delta: "old_delta",
|
|
||||||
path: idPath2.String(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectMetadata: DeltaPaths{
|
|
||||||
"1": DeltaPath{
|
|
||||||
delta: "delta_url",
|
|
||||||
path: idPath1.String(),
|
|
||||||
},
|
|
||||||
"2": DeltaPath{
|
|
||||||
delta: "delta_url2",
|
|
||||||
path: idPath2.String(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "both move to duplicate",
|
|
||||||
getter: map[string]mockGetterResults{
|
|
||||||
"1": result1,
|
|
||||||
"2": result2,
|
|
||||||
},
|
|
||||||
resolver: newMockResolver(container1, container2),
|
|
||||||
inputMetadata: DeltaPaths{
|
|
||||||
"1": DeltaPath{
|
|
||||||
delta: "old_delta",
|
|
||||||
path: oldPath1.String(),
|
|
||||||
},
|
|
||||||
"2": DeltaPath{
|
|
||||||
delta: "old_delta",
|
|
||||||
path: oldPath2.String(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectMetadata: DeltaPaths{
|
|
||||||
"1": DeltaPath{
|
|
||||||
delta: "delta_url",
|
|
||||||
path: idPath1.String(),
|
|
||||||
},
|
|
||||||
"2": DeltaPath{
|
|
||||||
delta: "delta_url2",
|
|
||||||
path: idPath2.String(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "both new",
|
|
||||||
getter: map[string]mockGetterResults{
|
|
||||||
"1": result1,
|
|
||||||
"2": result2,
|
|
||||||
},
|
|
||||||
resolver: newMockResolver(container1, container2),
|
|
||||||
inputMetadata: DeltaPaths{},
|
|
||||||
expectNewColls: 2,
|
|
||||||
expectMetadata: DeltaPaths{
|
|
||||||
"1": DeltaPath{
|
|
||||||
delta: "delta_url",
|
|
||||||
path: idPath1.String(),
|
|
||||||
},
|
|
||||||
"2": DeltaPath{
|
|
||||||
delta: "delta_url2",
|
|
||||||
path: idPath2.String(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "add 1 remove 2",
|
|
||||||
getter: map[string]mockGetterResults{
|
|
||||||
"1": result1,
|
|
||||||
},
|
|
||||||
resolver: newMockResolver(container1),
|
|
||||||
inputMetadata: DeltaPaths{
|
|
||||||
"2": DeltaPath{
|
|
||||||
delta: "old_delta",
|
|
||||||
path: idPath2.String(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectNewColls: 1,
|
|
||||||
expectDeleted: 1,
|
|
||||||
expectMetadata: DeltaPaths{
|
|
||||||
"1": DeltaPath{
|
|
||||||
delta: "delta_url",
|
|
||||||
path: idPath1.String(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, test := range table {
|
|
||||||
suite.Run(test.name, func() {
|
|
||||||
t := suite.T()
|
|
||||||
|
|
||||||
ctx, flush := tester.NewContext()
|
|
||||||
defer flush()
|
|
||||||
|
|
||||||
collections, err := filterContainersAndFillCollections(
|
|
||||||
ctx,
|
|
||||||
qp,
|
|
||||||
test.getter,
|
|
||||||
statusUpdater,
|
|
||||||
test.resolver,
|
|
||||||
scope,
|
|
||||||
test.inputMetadata,
|
|
||||||
control.Options{FailureHandling: control.FailFast},
|
|
||||||
fault.New(true))
|
|
||||||
require.NoError(t, err, "getting collections", clues.ToCore(err))
|
|
||||||
|
|
||||||
// collection assertions
|
|
||||||
|
|
||||||
deleteds, news, metadatas := 0, 0, 0
|
|
||||||
for _, c := range collections {
|
|
||||||
if c.State() == data.DeletedState {
|
|
||||||
deleteds++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.FullPath().Service() == path.ExchangeMetadataService {
|
|
||||||
metadatas++
|
|
||||||
checkMetadata(t, ctx, qp.Category, test.expectMetadata, c)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.State() == data.NewState {
|
|
||||||
news++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Equal(t, test.expectDeleted, deleteds, "deleted collections")
|
|
||||||
assert.Equal(t, test.expectNewColls, news, "new collections")
|
|
||||||
assert.Equal(t, 1, metadatas, "metadata collections")
|
|
||||||
|
|
||||||
// items in collections assertions
|
|
||||||
for k, expect := range test.getter {
|
|
||||||
coll := collections[k]
|
|
||||||
|
|
||||||
if coll == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
exColl, ok := coll.(*Collection)
|
|
||||||
require.True(t, ok, "collection is an *exchange.Collection")
|
|
||||||
|
|
||||||
ids := [][]string{
|
|
||||||
make([]string, 0, len(exColl.added)),
|
|
||||||
make([]string, 0, len(exColl.removed)),
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, cIDs := range []map[string]struct{}{exColl.added, exColl.removed} {
|
|
||||||
for id := range cIDs {
|
|
||||||
ids[i] = append(ids[i], id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.ElementsMatch(t, expect.added, ids[0], "added items")
|
|
||||||
assert.ElementsMatch(t, expect.removed, ids[1], "removed items")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_repeatedItems() {
|
func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_repeatedItems() {
|
||||||
newDelta := api.DeltaUpdate{URL: "delta_url"}
|
newDelta := api.DeltaUpdate{URL: "delta_url"}
|
||||||
|
|
||||||
|
|||||||
@ -218,8 +218,7 @@ func RestoreMailMessage(
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
info := api.MailInfo(clone)
|
info := api.MailInfo(clone, int64(len(bits)))
|
||||||
info.Size = int64(len(bits))
|
|
||||||
|
|
||||||
return info, nil
|
return info, nil
|
||||||
}
|
}
|
||||||
@ -436,16 +435,13 @@ func restoreCollection(
|
|||||||
metrics.Bytes += int64(len(byteArray))
|
metrics.Bytes += int64(len(byteArray))
|
||||||
metrics.Successes++
|
metrics.Successes++
|
||||||
|
|
||||||
itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
|
itemPath, err := dc.FullPath().AppendItem(itemData.UUID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs.AddRecoverable(clues.Wrap(err, "building full path with item").WithClues(ctx))
|
errs.AddRecoverable(clues.Wrap(err, "building full path with item").WithClues(ctx))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
locationRef := &path.Builder{}
|
locationRef := path.Builder{}.Append(itemPath.Folders()...)
|
||||||
if category == path.ContactsCategory {
|
|
||||||
locationRef = locationRef.Append(itemPath.Folders()...)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = deets.Add(
|
err = deets.Add(
|
||||||
itemPath,
|
itemPath,
|
||||||
|
|||||||
@ -11,14 +11,19 @@ import (
|
|||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ data.BackupCollection = emptyCollection{}
|
var _ data.BackupCollection = prefixCollection{}
|
||||||
|
|
||||||
type emptyCollection struct {
|
// TODO: move this out of graph. /data would be a much better owner
|
||||||
p path.Path
|
// for a generic struct like this. However, support.StatusUpdater makes
|
||||||
su support.StatusUpdater
|
// it difficult to extract from this package in a generic way.
|
||||||
|
type prefixCollection struct {
|
||||||
|
full path.Path
|
||||||
|
prev path.Path
|
||||||
|
su support.StatusUpdater
|
||||||
|
state data.CollectionState
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c emptyCollection) Items(ctx context.Context, _ *fault.Bus) <-chan data.Stream {
|
func (c prefixCollection) Items(ctx context.Context, _ *fault.Bus) <-chan data.Stream {
|
||||||
res := make(chan data.Stream)
|
res := make(chan data.Stream)
|
||||||
close(res)
|
close(res)
|
||||||
|
|
||||||
@ -28,21 +33,19 @@ func (c emptyCollection) Items(ctx context.Context, _ *fault.Bus) <-chan data.St
|
|||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c emptyCollection) FullPath() path.Path {
|
func (c prefixCollection) FullPath() path.Path {
|
||||||
return c.p
|
return c.full
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c emptyCollection) PreviousPath() path.Path {
|
func (c prefixCollection) PreviousPath() path.Path {
|
||||||
return c.p
|
return c.prev
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c emptyCollection) State() data.CollectionState {
|
func (c prefixCollection) State() data.CollectionState {
|
||||||
// This assumes we won't change the prefix path. Could probably use MovedState
|
return c.state
|
||||||
// as well if we do need to change things around.
|
|
||||||
return data.NotMovedState
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c emptyCollection) DoNotMergeItems() bool {
|
func (c prefixCollection) DoNotMergeItems() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -76,7 +79,7 @@ func BaseCollections(
|
|||||||
for cat := range categories {
|
for cat := range categories {
|
||||||
ictx := clues.Add(ctx, "base_service", service, "base_category", cat)
|
ictx := clues.Add(ctx, "base_service", service, "base_category", cat)
|
||||||
|
|
||||||
p, err := path.ServicePrefix(tenant, rOwner, service, cat)
|
full, err := path.ServicePrefix(tenant, rOwner, service, cat)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Shouldn't happen.
|
// Shouldn't happen.
|
||||||
err = clues.Wrap(err, "making path").WithClues(ictx)
|
err = clues.Wrap(err, "making path").WithClues(ictx)
|
||||||
@ -87,8 +90,13 @@ func BaseCollections(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// only add this collection if it doesn't already exist in the set.
|
// only add this collection if it doesn't already exist in the set.
|
||||||
if _, ok := collKeys[p.String()]; !ok {
|
if _, ok := collKeys[full.String()]; !ok {
|
||||||
res = append(res, emptyCollection{p: p, su: su})
|
res = append(res, &prefixCollection{
|
||||||
|
prev: full,
|
||||||
|
full: full,
|
||||||
|
su: su,
|
||||||
|
state: data.StateOf(full, full),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -99,45 +107,11 @@ func BaseCollections(
|
|||||||
// prefix migration
|
// prefix migration
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
var _ data.BackupCollection = prefixCollection{}
|
|
||||||
|
|
||||||
// TODO: move this out of graph. /data would be a much better owner
|
|
||||||
// for a generic struct like this. However, support.StatusUpdater makes
|
|
||||||
// it difficult to extract from this package in a generic way.
|
|
||||||
type prefixCollection struct {
|
|
||||||
full, prev path.Path
|
|
||||||
su support.StatusUpdater
|
|
||||||
state data.CollectionState
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c prefixCollection) Items(ctx context.Context, _ *fault.Bus) <-chan data.Stream {
|
|
||||||
res := make(chan data.Stream)
|
|
||||||
close(res)
|
|
||||||
|
|
||||||
s := support.CreateStatus(ctx, support.Backup, 0, support.CollectionMetrics{}, "")
|
|
||||||
c.su(s)
|
|
||||||
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c prefixCollection) FullPath() path.Path {
|
|
||||||
return c.full
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c prefixCollection) PreviousPath() path.Path {
|
|
||||||
return c.prev
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c prefixCollection) State() data.CollectionState {
|
|
||||||
return c.state
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c prefixCollection) DoNotMergeItems() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Creates a new collection that only handles prefix pathing.
|
// Creates a new collection that only handles prefix pathing.
|
||||||
func NewPrefixCollection(prev, full path.Path, su support.StatusUpdater) (*prefixCollection, error) {
|
func NewPrefixCollection(
|
||||||
|
prev, full path.Path,
|
||||||
|
su support.StatusUpdater,
|
||||||
|
) (*prefixCollection, error) {
|
||||||
if prev != nil {
|
if prev != nil {
|
||||||
if len(prev.Item()) > 0 {
|
if len(prev.Item()) > 0 {
|
||||||
return nil, clues.New("prefix collection previous path contains an item")
|
return nil, clues.New("prefix collection previous path contains an item")
|
||||||
|
|||||||
@ -1,53 +0,0 @@
|
|||||||
package graph
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/alcionai/clues"
|
|
||||||
khttp "github.com/microsoft/kiota-http-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
// concurrencyLimiter middleware limits the number of concurrent requests to graph API
|
|
||||||
type concurrencyLimiter struct {
|
|
||||||
semaphore chan struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
once sync.Once
|
|
||||||
concurrencyLim *concurrencyLimiter
|
|
||||||
maxConcurrentRequests = 4
|
|
||||||
)
|
|
||||||
|
|
||||||
func generateConcurrencyLimiter(capacity int) *concurrencyLimiter {
|
|
||||||
if capacity < 1 || capacity > maxConcurrentRequests {
|
|
||||||
capacity = maxConcurrentRequests
|
|
||||||
}
|
|
||||||
|
|
||||||
return &concurrencyLimiter{
|
|
||||||
semaphore: make(chan struct{}, capacity),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitializeConcurrencyLimiter(capacity int) {
|
|
||||||
once.Do(func() {
|
|
||||||
concurrencyLim = generateConcurrencyLimiter(capacity)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cl *concurrencyLimiter) Intercept(
|
|
||||||
pipeline khttp.Pipeline,
|
|
||||||
middlewareIndex int,
|
|
||||||
req *http.Request,
|
|
||||||
) (*http.Response, error) {
|
|
||||||
if cl == nil || cl.semaphore == nil {
|
|
||||||
return nil, clues.New("nil concurrency limiter")
|
|
||||||
}
|
|
||||||
|
|
||||||
cl.semaphore <- struct{}{}
|
|
||||||
defer func() {
|
|
||||||
<-cl.semaphore
|
|
||||||
}()
|
|
||||||
|
|
||||||
return pipeline.Next(req, middlewareIndex)
|
|
||||||
}
|
|
||||||
202
src/internal/connector/graph/concurrency_middleware.go
Normal file
202
src/internal/connector/graph/concurrency_middleware.go
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
package graph
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
|
khttp "github.com/microsoft/kiota-http-go"
|
||||||
|
"golang.org/x/time/rate"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Concurrency Limiter
|
||||||
|
// "how many calls at one time"
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// concurrencyLimiter middleware limits the number of concurrent requests to graph API
|
||||||
|
type concurrencyLimiter struct {
|
||||||
|
semaphore chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
once sync.Once
|
||||||
|
concurrencyLim *concurrencyLimiter
|
||||||
|
maxConcurrentRequests = 4
|
||||||
|
)
|
||||||
|
|
||||||
|
func generateConcurrencyLimiter(capacity int) *concurrencyLimiter {
|
||||||
|
if capacity < 1 || capacity > maxConcurrentRequests {
|
||||||
|
capacity = maxConcurrentRequests
|
||||||
|
}
|
||||||
|
|
||||||
|
return &concurrencyLimiter{
|
||||||
|
semaphore: make(chan struct{}, capacity),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitializeConcurrencyLimiter(capacity int) {
|
||||||
|
once.Do(func() {
|
||||||
|
concurrencyLim = generateConcurrencyLimiter(capacity)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cl *concurrencyLimiter) Intercept(
|
||||||
|
pipeline khttp.Pipeline,
|
||||||
|
middlewareIndex int,
|
||||||
|
req *http.Request,
|
||||||
|
) (*http.Response, error) {
|
||||||
|
if cl == nil || cl.semaphore == nil {
|
||||||
|
return nil, clues.New("nil concurrency limiter")
|
||||||
|
}
|
||||||
|
|
||||||
|
cl.semaphore <- struct{}{}
|
||||||
|
defer func() {
|
||||||
|
<-cl.semaphore
|
||||||
|
}()
|
||||||
|
|
||||||
|
return pipeline.Next(req, middlewareIndex)
|
||||||
|
}
|
||||||
|
|
||||||
|
//nolint:lll
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Rate Limiter
|
||||||
|
// "how many calls in a minute"
|
||||||
|
// https://learn.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Default goal is to keep calls below the 10k-per-10-minute threshold.
|
||||||
|
// 14 tokens every second nets 840 per minute. That's 8400 every 10 minutes,
|
||||||
|
// which is a bit below the mark.
|
||||||
|
// But suppose we have a minute-long dry spell followed by a 10 minute tsunami.
|
||||||
|
// We'll have built up 750 tokens in reserve, so the first 750 calls go through
|
||||||
|
// immediately. Over the next 10 minutes, we'll partition out the other calls
|
||||||
|
// at a rate of 840-per-minute, ending at a total of 9150. Theoretically, if
|
||||||
|
// the volume keeps up after that, we'll always stay between 8400 and 9150 out
|
||||||
|
// of 10k. Worst case scenario, we have an extra minute of padding to allow
|
||||||
|
// up to 9990.
|
||||||
|
defaultPerSecond = 14 // 14 * 60 = 840
|
||||||
|
defaultMaxCap = 750 // real cap is 10k-per-10-minutes
|
||||||
|
// since drive runs on a per-minute, rather than per-10-minute bucket, we have
|
||||||
|
// to keep the max cap equal to the per-second cap. A large maxCap pool (say,
|
||||||
|
// 1200, similar to the per-minute cap) would allow us to make a flood of 2400
|
||||||
|
// calls in the first minute, putting us over the per-minute limit. Keeping
|
||||||
|
// the cap at the per-second burst means we only dole out a max of 1240 in one
|
||||||
|
// minute (20 cap + 1200 per minute + one burst of padding).
|
||||||
|
drivePerSecond = 20 // 20 * 60 = 1200
|
||||||
|
driveMaxCap = 20 // real cap is 1250-per-minute
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
driveLimiter = rate.NewLimiter(drivePerSecond, driveMaxCap)
|
||||||
|
// also used as the exchange service limiter
|
||||||
|
defaultLimiter = rate.NewLimiter(defaultPerSecond, defaultMaxCap)
|
||||||
|
)
|
||||||
|
|
||||||
|
type LimiterCfg struct {
|
||||||
|
Service path.ServiceType
|
||||||
|
}
|
||||||
|
|
||||||
|
type limiterCfgKey string
|
||||||
|
|
||||||
|
const limiterCfgCtxKey limiterCfgKey = "corsoGaphRateLimiterCfg"
|
||||||
|
|
||||||
|
func BindRateLimiterConfig(ctx context.Context, lc LimiterCfg) context.Context {
|
||||||
|
return context.WithValue(ctx, limiterCfgCtxKey, lc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ctxLimiter(ctx context.Context) *rate.Limiter {
|
||||||
|
lc, ok := extractRateLimiterConfig(ctx)
|
||||||
|
if !ok {
|
||||||
|
return defaultLimiter
|
||||||
|
}
|
||||||
|
|
||||||
|
switch lc.Service {
|
||||||
|
case path.OneDriveService, path.SharePointService:
|
||||||
|
return driveLimiter
|
||||||
|
default:
|
||||||
|
return defaultLimiter
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractRateLimiterConfig(ctx context.Context) (LimiterCfg, bool) {
|
||||||
|
l := ctx.Value(limiterCfgCtxKey)
|
||||||
|
if l == nil {
|
||||||
|
return LimiterCfg{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
lc, ok := l.(LimiterCfg)
|
||||||
|
|
||||||
|
return lc, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
type limiterConsumptionKey string
|
||||||
|
|
||||||
|
const limiterConsumptionCtxKey limiterConsumptionKey = "corsoGraphRateLimiterConsumption"
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultLC = 1
|
||||||
|
driveDefaultLC = 2
|
||||||
|
// limit consumption rate for single-item GETs requests,
|
||||||
|
// or delta-based multi-item GETs.
|
||||||
|
SingleGetOrDeltaLC = 1
|
||||||
|
// limit consumption rate for anything permissions related
|
||||||
|
PermissionsLC = 5
|
||||||
|
)
|
||||||
|
|
||||||
|
// ConsumeNTokens ensures any calls using this context will consume
|
||||||
|
// n rate-limiter tokens. Default is 1, and this value does not need
|
||||||
|
// to be established in the context to consume the default tokens.
|
||||||
|
// This should only get used on a per-call basis, to avoid cross-pollination.
|
||||||
|
func ConsumeNTokens(ctx context.Context, n int) context.Context {
|
||||||
|
return context.WithValue(ctx, limiterConsumptionCtxKey, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ctxLimiterConsumption(ctx context.Context, defaultConsumption int) int {
|
||||||
|
l := ctx.Value(limiterConsumptionCtxKey)
|
||||||
|
if l == nil {
|
||||||
|
return defaultConsumption
|
||||||
|
}
|
||||||
|
|
||||||
|
lc, ok := l.(int)
|
||||||
|
if !ok || lc < 1 {
|
||||||
|
return defaultConsumption
|
||||||
|
}
|
||||||
|
|
||||||
|
return lc
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueueRequest will allow the request to occur immediately if we're under the
|
||||||
|
// calls-per-minute rate. Otherwise, the call will wait in a queue until
|
||||||
|
// the next token set is available.
|
||||||
|
func QueueRequest(ctx context.Context) {
|
||||||
|
limiter := ctxLimiter(ctx)
|
||||||
|
defaultConsumed := defaultLC
|
||||||
|
|
||||||
|
if limiter == driveLimiter {
|
||||||
|
defaultConsumed = driveDefaultLC
|
||||||
|
}
|
||||||
|
|
||||||
|
consume := ctxLimiterConsumption(ctx, defaultConsumed)
|
||||||
|
|
||||||
|
if err := limiter.WaitN(ctx, consume); err != nil {
|
||||||
|
logger.CtxErr(ctx, err).Error("graph middleware waiting on the limiter")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimiterMiddleware is used to ensure we don't overstep per-min request limits.
|
||||||
|
type RateLimiterMiddleware struct{}
|
||||||
|
|
||||||
|
func (mw *RateLimiterMiddleware) Intercept(
|
||||||
|
pipeline khttp.Pipeline,
|
||||||
|
middlewareIndex int,
|
||||||
|
req *http.Request,
|
||||||
|
) (*http.Response, error) {
|
||||||
|
QueueRequest(req.Context())
|
||||||
|
return pipeline.Next(req, middlewareIndex)
|
||||||
|
}
|
||||||
@ -140,13 +140,20 @@ func defaultTransport() http.RoundTripper {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func internalMiddleware(cc *clientConfig) []khttp.Middleware {
|
func internalMiddleware(cc *clientConfig) []khttp.Middleware {
|
||||||
return []khttp.Middleware{
|
mw := []khttp.Middleware{
|
||||||
&RetryMiddleware{
|
&RetryMiddleware{
|
||||||
MaxRetries: cc.maxRetries,
|
MaxRetries: cc.maxRetries,
|
||||||
Delay: cc.minDelay,
|
Delay: cc.minDelay,
|
||||||
},
|
},
|
||||||
|
khttp.NewRedirectHandler(),
|
||||||
&LoggingMiddleware{},
|
&LoggingMiddleware{},
|
||||||
&ThrottleControlMiddleware{},
|
&RateLimiterMiddleware{},
|
||||||
&MetricsMiddleware{},
|
&MetricsMiddleware{},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(cc.appendMiddleware) > 0 {
|
||||||
|
mw = append(mw, cc.appendMiddleware...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return mw
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2,9 +2,11 @@ package graph
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
|
khttp "github.com/microsoft/kiota-http-go"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
@ -43,3 +45,72 @@ func (suite *HTTPWrapperIntgSuite) TestNewHTTPWrapper() {
|
|||||||
require.NotNil(t, resp)
|
require.NotNil(t, resp)
|
||||||
require.Equal(t, http.StatusOK, resp.StatusCode)
|
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type mwForceResp struct {
|
||||||
|
err error
|
||||||
|
resp *http.Response
|
||||||
|
alternate func(*http.Request) (bool, *http.Response, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mw *mwForceResp) Intercept(
|
||||||
|
pipeline khttp.Pipeline,
|
||||||
|
middlewareIndex int,
|
||||||
|
req *http.Request,
|
||||||
|
) (*http.Response, error) {
|
||||||
|
ok, r, e := mw.alternate(req)
|
||||||
|
if ok {
|
||||||
|
return r, e
|
||||||
|
}
|
||||||
|
|
||||||
|
return mw.resp, mw.err
|
||||||
|
}
|
||||||
|
|
||||||
|
type HTTPWrapperUnitSuite struct {
|
||||||
|
tester.Suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHTTPWrapperUnitSuite(t *testing.T) {
|
||||||
|
suite.Run(t, &HTTPWrapperUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *HTTPWrapperUnitSuite) TestNewHTTPWrapper_redirectMiddleware() {
|
||||||
|
ctx, flush := tester.NewContext()
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
var (
|
||||||
|
t = suite.T()
|
||||||
|
uri = "https://graph.microsoft.com"
|
||||||
|
path = "/fnords/beaux/regard"
|
||||||
|
url = uri + path
|
||||||
|
)
|
||||||
|
|
||||||
|
// can't use gock for this, or else it'll short-circuit the transport,
|
||||||
|
// and thus skip all the middleware
|
||||||
|
hdr := http.Header{}
|
||||||
|
hdr.Set("Location", "localhost:99999999/smarfs")
|
||||||
|
|
||||||
|
toResp := &http.Response{
|
||||||
|
StatusCode: 302,
|
||||||
|
Header: hdr,
|
||||||
|
}
|
||||||
|
|
||||||
|
mwResp := mwForceResp{
|
||||||
|
resp: toResp,
|
||||||
|
alternate: func(req *http.Request) (bool, *http.Response, error) {
|
||||||
|
if strings.HasSuffix(req.URL.String(), "smarfs") {
|
||||||
|
return true, &http.Response{StatusCode: http.StatusOK}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
hw := NewHTTPWrapper(appendMiddleware(&mwResp))
|
||||||
|
|
||||||
|
resp, err := hw.Request(ctx, http.MethodGet, url, nil, nil)
|
||||||
|
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
require.NotNil(t, resp)
|
||||||
|
// require.Equal(t, 1, calledCorrectly, "test server was called with expected path")
|
||||||
|
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||||
|
}
|
||||||
|
|||||||
@ -2,7 +2,6 @@ package graph
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httputil"
|
"net/http/httputil"
|
||||||
@ -15,7 +14,6 @@ import (
|
|||||||
backoff "github.com/cenkalti/backoff/v4"
|
backoff "github.com/cenkalti/backoff/v4"
|
||||||
khttp "github.com/microsoft/kiota-http-go"
|
khttp "github.com/microsoft/kiota-http-go"
|
||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
"golang.org/x/time/rate"
|
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/pii"
|
"github.com/alcionai/corso/src/internal/common/pii"
|
||||||
"github.com/alcionai/corso/src/internal/events"
|
"github.com/alcionai/corso/src/internal/events"
|
||||||
@ -100,6 +98,9 @@ func LoggableURL(url string) pii.SafeURL {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 1 MB
|
||||||
|
const logMBLimit = 1 * 1048576
|
||||||
|
|
||||||
func (mw *LoggingMiddleware) Intercept(
|
func (mw *LoggingMiddleware) Intercept(
|
||||||
pipeline khttp.Pipeline,
|
pipeline khttp.Pipeline,
|
||||||
middlewareIndex int,
|
middlewareIndex int,
|
||||||
@ -122,42 +123,61 @@ func (mw *LoggingMiddleware) Intercept(
|
|||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx = clues.Add(ctx, "status", resp.Status, "statusCode", resp.StatusCode)
|
ctx = clues.Add(
|
||||||
log := logger.Ctx(ctx)
|
ctx,
|
||||||
|
"status", resp.Status,
|
||||||
|
"statusCode", resp.StatusCode,
|
||||||
|
"content_len", resp.ContentLength)
|
||||||
|
|
||||||
// Return immediately if the response is good (2xx).
|
var (
|
||||||
// If api logging is toggled, log a body-less dump of the request/resp.
|
log = logger.Ctx(ctx)
|
||||||
if (resp.StatusCode / 100) == 2 {
|
respClass = resp.StatusCode / 100
|
||||||
if logger.DebugAPIFV || os.Getenv(log2xxGraphRequestsEnvKey) != "" {
|
logExtra = logger.DebugAPIFV || os.Getenv(logGraphRequestsEnvKey) != ""
|
||||||
log.Debugw("2xx graph api resp", "response", getRespDump(ctx, resp, os.Getenv(log2xxGraphResponseEnvKey) != ""))
|
)
|
||||||
}
|
|
||||||
|
|
||||||
return resp, err
|
// special case: always info log 429 responses
|
||||||
}
|
|
||||||
|
|
||||||
// Log errors according to api debugging configurations.
|
|
||||||
// When debugging is toggled, every non-2xx is recorded with a response dump.
|
|
||||||
// Otherwise, throttling cases and other non-2xx responses are logged
|
|
||||||
// with a slimmer reference for telemetry/supportability purposes.
|
|
||||||
if logger.DebugAPIFV || os.Getenv(logGraphRequestsEnvKey) != "" {
|
|
||||||
log.Errorw("non-2xx graph api response", "response", getRespDump(ctx, resp, true))
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
|
|
||||||
msg := fmt.Sprintf("graph api error: %s", resp.Status)
|
|
||||||
|
|
||||||
// special case for supportability: log all throttling cases.
|
|
||||||
if resp.StatusCode == http.StatusTooManyRequests {
|
if resp.StatusCode == http.StatusTooManyRequests {
|
||||||
log = log.With(
|
log.Infow(
|
||||||
|
"graph api throttling",
|
||||||
"limit", resp.Header.Get(rateLimitHeader),
|
"limit", resp.Header.Get(rateLimitHeader),
|
||||||
"remaining", resp.Header.Get(rateRemainingHeader),
|
"remaining", resp.Header.Get(rateRemainingHeader),
|
||||||
"reset", resp.Header.Get(rateResetHeader),
|
"reset", resp.Header.Get(rateResetHeader),
|
||||||
"retry-after", resp.Header.Get(retryAfterHeader))
|
"retry-after", resp.Header.Get(retryAfterHeader))
|
||||||
} else if resp.StatusCode/100 == 4 || resp.StatusCode == http.StatusServiceUnavailable {
|
|
||||||
log = log.With("response", getRespDump(ctx, resp, true))
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info(msg)
|
// special case: always dump status-400-bad-request
|
||||||
|
if resp.StatusCode == http.StatusBadRequest {
|
||||||
|
log.With("response", getRespDump(ctx, resp, true)).
|
||||||
|
Error("graph api error: " + resp.Status)
|
||||||
|
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log api calls according to api debugging configurations.
|
||||||
|
switch respClass {
|
||||||
|
case 2:
|
||||||
|
if logExtra {
|
||||||
|
// only dump the body if it's under a size limit. We don't want to copy gigs into memory for a log.
|
||||||
|
dump := getRespDump(ctx, resp, os.Getenv(log2xxGraphResponseEnvKey) != "" && resp.ContentLength < logMBLimit)
|
||||||
|
log.Infow("2xx graph api resp", "response", dump)
|
||||||
|
}
|
||||||
|
case 3:
|
||||||
|
log.With("redirect_location", LoggableURL(resp.Header.Get(locationHeader)))
|
||||||
|
|
||||||
|
if logExtra {
|
||||||
|
log.With("response", getRespDump(ctx, resp, false))
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("graph api redirect: " + resp.Status)
|
||||||
|
default:
|
||||||
|
if logExtra {
|
||||||
|
log.With("response", getRespDump(ctx, resp, true))
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Error("graph api error: " + resp.Status)
|
||||||
|
}
|
||||||
|
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
@ -359,50 +379,6 @@ func (mw RetryMiddleware) getRetryDelay(
|
|||||||
return exponentialBackoff.NextBackOff()
|
return exponentialBackoff.NextBackOff()
|
||||||
}
|
}
|
||||||
|
|
||||||
// We're trying to keep calls below the 10k-per-10-minute threshold.
|
|
||||||
// 15 tokens every second nets 900 per minute. That's 9000 every 10 minutes,
|
|
||||||
// which is a bit below the mark.
|
|
||||||
// But suppose we have a minute-long dry spell followed by a 10 minute tsunami.
|
|
||||||
// We'll have built up 900 tokens in reserve, so the first 900 calls go through
|
|
||||||
// immediately. Over the next 10 minutes, we'll partition out the other calls
|
|
||||||
// at a rate of 900-per-minute, ending at a total of 9900. Theoretically, if
|
|
||||||
// the volume keeps up after that, we'll always stay between 9000 and 9900 out
|
|
||||||
// of 10k.
|
|
||||||
const (
|
|
||||||
perSecond = 15
|
|
||||||
maxCap = 900
|
|
||||||
)
|
|
||||||
|
|
||||||
// Single, global rate limiter at this time. Refinements for method (creates,
|
|
||||||
// versus reads) or service can come later.
|
|
||||||
var limiter = rate.NewLimiter(perSecond, maxCap)
|
|
||||||
|
|
||||||
// QueueRequest will allow the request to occur immediately if we're under the
|
|
||||||
// 1k-calls-per-minute rate. Otherwise, the call will wait in a queue until
|
|
||||||
// the next token set is available.
|
|
||||||
func QueueRequest(ctx context.Context) {
|
|
||||||
if err := limiter.Wait(ctx); err != nil {
|
|
||||||
logger.CtxErr(ctx, err).Error("graph middleware waiting on the limiter")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Rate Limiting
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// ThrottleControlMiddleware is used to ensure we don't overstep 10k-per-10-min
|
|
||||||
// request limits.
|
|
||||||
type ThrottleControlMiddleware struct{}
|
|
||||||
|
|
||||||
func (mw *ThrottleControlMiddleware) Intercept(
|
|
||||||
pipeline khttp.Pipeline,
|
|
||||||
middlewareIndex int,
|
|
||||||
req *http.Request,
|
|
||||||
) (*http.Response, error) {
|
|
||||||
QueueRequest(req.Context())
|
|
||||||
return pipeline.Next(req, middlewareIndex)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Metrics
|
// Metrics
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
@ -410,6 +386,8 @@ func (mw *ThrottleControlMiddleware) Intercept(
|
|||||||
// MetricsMiddleware aggregates per-request metrics on the events bus
|
// MetricsMiddleware aggregates per-request metrics on the events bus
|
||||||
type MetricsMiddleware struct{}
|
type MetricsMiddleware struct{}
|
||||||
|
|
||||||
|
const xmruHeader = "x-ms-resource-unit"
|
||||||
|
|
||||||
func (mw *MetricsMiddleware) Intercept(
|
func (mw *MetricsMiddleware) Intercept(
|
||||||
pipeline khttp.Pipeline,
|
pipeline khttp.Pipeline,
|
||||||
middlewareIndex int,
|
middlewareIndex int,
|
||||||
@ -430,5 +408,23 @@ func (mw *MetricsMiddleware) Intercept(
|
|||||||
events.Since(start, events.APICall)
|
events.Since(start, events.APICall)
|
||||||
events.Since(start, events.APICall, status)
|
events.Since(start, events.APICall, status)
|
||||||
|
|
||||||
|
// track the graph "resource cost" for each call (if not provided, assume 1)
|
||||||
|
|
||||||
|
// nil-pointer guard
|
||||||
|
if len(resp.Header) == 0 {
|
||||||
|
resp.Header = http.Header{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// from msoft throttling documentation:
|
||||||
|
// x-ms-resource-unit - Indicates the resource unit used for this request. Values are positive integer
|
||||||
|
xmru := resp.Header.Get(xmruHeader)
|
||||||
|
xmrui, e := strconv.Atoi(xmru)
|
||||||
|
|
||||||
|
if len(xmru) == 0 || e != nil {
|
||||||
|
xmrui = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
events.IncN(xmrui, events.APICall, xmruHeader)
|
||||||
|
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -17,10 +17,12 @@ import (
|
|||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
"golang.org/x/time/rate"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
|
|
||||||
type mwReturns struct {
|
type mwReturns struct {
|
||||||
@ -132,9 +134,9 @@ func (suite *RetryMWIntgSuite) SetupSuite() {
|
|||||||
|
|
||||||
func (suite *RetryMWIntgSuite) TestRetryMiddleware_Intercept_byStatusCode() {
|
func (suite *RetryMWIntgSuite) TestRetryMiddleware_Intercept_byStatusCode() {
|
||||||
var (
|
var (
|
||||||
uri = "https://graph.microsoft.com"
|
uri = "https://graph.microsoft.com"
|
||||||
path = "/v1.0/users/user/messages/foo"
|
urlPath = "/v1.0/users/user/messages/foo"
|
||||||
url = uri + path
|
url = uri + urlPath
|
||||||
)
|
)
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
@ -230,3 +232,107 @@ func (suite *RetryMWIntgSuite) TestRetryMiddleware_RetryRequest_resetBodyAfter50
|
|||||||
Post(ctx, body, nil)
|
Post(ctx, body, nil)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type MiddlewareUnitSuite struct {
|
||||||
|
tester.Suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMiddlewareUnitSuite(t *testing.T) {
|
||||||
|
suite.Run(t, &MiddlewareUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *MiddlewareUnitSuite) TestBindExtractLimiterConfig() {
|
||||||
|
ctx, flush := tester.NewContext()
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
// an unpopulated ctx should produce the default limiter
|
||||||
|
assert.Equal(suite.T(), defaultLimiter, ctxLimiter(ctx))
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
service path.ServiceType
|
||||||
|
expectOK require.BoolAssertionFunc
|
||||||
|
expectLimiter *rate.Limiter
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "exchange",
|
||||||
|
service: path.ExchangeService,
|
||||||
|
expectLimiter: defaultLimiter,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "oneDrive",
|
||||||
|
service: path.OneDriveService,
|
||||||
|
expectLimiter: driveLimiter,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "sharePoint",
|
||||||
|
service: path.SharePointService,
|
||||||
|
expectLimiter: driveLimiter,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "unknownService",
|
||||||
|
service: path.UnknownService,
|
||||||
|
expectLimiter: defaultLimiter,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "badService",
|
||||||
|
service: path.ServiceType(-1),
|
||||||
|
expectLimiter: defaultLimiter,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
tctx := BindRateLimiterConfig(ctx, LimiterCfg{Service: test.service})
|
||||||
|
lc, ok := extractRateLimiterConfig(tctx)
|
||||||
|
require.True(t, ok, "found rate limiter in ctx")
|
||||||
|
assert.Equal(t, test.service, lc.Service)
|
||||||
|
assert.Equal(t, test.expectLimiter, ctxLimiter(tctx))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *MiddlewareUnitSuite) TestLimiterConsumption() {
|
||||||
|
ctx, flush := tester.NewContext()
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
// an unpopulated ctx should produce the default consumption
|
||||||
|
assert.Equal(suite.T(), defaultLC, ctxLimiterConsumption(ctx, defaultLC))
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
n int
|
||||||
|
expect int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "matches default",
|
||||||
|
n: defaultLC,
|
||||||
|
expect: defaultLC,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "default+1",
|
||||||
|
n: defaultLC + 1,
|
||||||
|
expect: defaultLC + 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "zero",
|
||||||
|
n: 0,
|
||||||
|
expect: defaultLC,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "negative",
|
||||||
|
n: -1,
|
||||||
|
expect: defaultLC,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
tctx := ConsumeNTokens(ctx, test.n)
|
||||||
|
lc := ctxLimiterConsumption(tctx, defaultLC)
|
||||||
|
assert.Equal(t, test.expect, lc)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -21,13 +21,14 @@ const (
|
|||||||
logGraphRequestsEnvKey = "LOG_GRAPH_REQUESTS"
|
logGraphRequestsEnvKey = "LOG_GRAPH_REQUESTS"
|
||||||
log2xxGraphRequestsEnvKey = "LOG_2XX_GRAPH_REQUESTS"
|
log2xxGraphRequestsEnvKey = "LOG_2XX_GRAPH_REQUESTS"
|
||||||
log2xxGraphResponseEnvKey = "LOG_2XX_GRAPH_RESPONSES"
|
log2xxGraphResponseEnvKey = "LOG_2XX_GRAPH_RESPONSES"
|
||||||
retryAttemptHeader = "Retry-Attempt"
|
|
||||||
retryAfterHeader = "Retry-After"
|
|
||||||
defaultMaxRetries = 3
|
defaultMaxRetries = 3
|
||||||
defaultDelay = 3 * time.Second
|
defaultDelay = 3 * time.Second
|
||||||
|
locationHeader = "Location"
|
||||||
rateLimitHeader = "RateLimit-Limit"
|
rateLimitHeader = "RateLimit-Limit"
|
||||||
rateRemainingHeader = "RateLimit-Remaining"
|
rateRemainingHeader = "RateLimit-Remaining"
|
||||||
rateResetHeader = "RateLimit-Reset"
|
rateResetHeader = "RateLimit-Reset"
|
||||||
|
retryAfterHeader = "Retry-After"
|
||||||
|
retryAttemptHeader = "Retry-Attempt"
|
||||||
defaultHTTPClientTimeout = 1 * time.Hour
|
defaultHTTPClientTimeout = 1 * time.Hour
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -173,6 +174,8 @@ type clientConfig struct {
|
|||||||
// The minimum delay in seconds between retries
|
// The minimum delay in seconds between retries
|
||||||
minDelay time.Duration
|
minDelay time.Duration
|
||||||
overrideRetryCount bool
|
overrideRetryCount bool
|
||||||
|
|
||||||
|
appendMiddleware []khttp.Middleware
|
||||||
}
|
}
|
||||||
|
|
||||||
type Option func(*clientConfig)
|
type Option func(*clientConfig)
|
||||||
@ -225,6 +228,14 @@ func MinimumBackoff(dur time.Duration) Option {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func appendMiddleware(mw ...khttp.Middleware) Option {
|
||||||
|
return func(c *clientConfig) {
|
||||||
|
if len(mw) > 0 {
|
||||||
|
c.appendMiddleware = mw
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Middleware Control
|
// Middleware Control
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
@ -253,9 +264,13 @@ func kiotaMiddlewares(
|
|||||||
khttp.NewParametersNameDecodingHandler(),
|
khttp.NewParametersNameDecodingHandler(),
|
||||||
khttp.NewUserAgentHandler(),
|
khttp.NewUserAgentHandler(),
|
||||||
&LoggingMiddleware{},
|
&LoggingMiddleware{},
|
||||||
&ThrottleControlMiddleware{},
|
&RateLimiterMiddleware{},
|
||||||
&MetricsMiddleware{},
|
&MetricsMiddleware{},
|
||||||
}...)
|
}...)
|
||||||
|
|
||||||
|
if len(cc.appendMiddleware) > 0 {
|
||||||
|
mw = append(mw, cc.appendMiddleware...)
|
||||||
|
}
|
||||||
|
|
||||||
return mw
|
return mw
|
||||||
}
|
}
|
||||||
|
|||||||
@ -11,6 +11,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/connector/support"
|
"github.com/alcionai/corso/src/internal/connector/support"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
|
selTD "github.com/alcionai/corso/src/pkg/selectors/testdata"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ---------------------------------------------------------------
|
// ---------------------------------------------------------------
|
||||||
@ -82,19 +83,19 @@ func (suite *DisconnectedGraphConnectorSuite) TestVerifyBackupInputs_allServices
|
|||||||
checkError: assert.NoError,
|
checkError: assert.NoError,
|
||||||
excludes: func(t *testing.T) selectors.Selector {
|
excludes: func(t *testing.T) selectors.Selector {
|
||||||
sel := selectors.NewOneDriveBackup([]string{"elliotReid@someHospital.org", "foo@SomeCompany.org"})
|
sel := selectors.NewOneDriveBackup([]string{"elliotReid@someHospital.org", "foo@SomeCompany.org"})
|
||||||
sel.Exclude(sel.Folders(selectors.Any()))
|
sel.Exclude(selTD.OneDriveBackupFolderScope(sel))
|
||||||
sel.DiscreteOwner = "elliotReid@someHospital.org"
|
sel.DiscreteOwner = "elliotReid@someHospital.org"
|
||||||
return sel.Selector
|
return sel.Selector
|
||||||
},
|
},
|
||||||
filters: func(t *testing.T) selectors.Selector {
|
filters: func(t *testing.T) selectors.Selector {
|
||||||
sel := selectors.NewOneDriveBackup([]string{"elliotReid@someHospital.org", "foo@SomeCompany.org"})
|
sel := selectors.NewOneDriveBackup([]string{"elliotReid@someHospital.org", "foo@SomeCompany.org"})
|
||||||
sel.Filter(sel.Folders(selectors.Any()))
|
sel.Filter(selTD.OneDriveBackupFolderScope(sel))
|
||||||
sel.DiscreteOwner = "elliotReid@someHospital.org"
|
sel.DiscreteOwner = "elliotReid@someHospital.org"
|
||||||
return sel.Selector
|
return sel.Selector
|
||||||
},
|
},
|
||||||
includes: func(t *testing.T) selectors.Selector {
|
includes: func(t *testing.T) selectors.Selector {
|
||||||
sel := selectors.NewOneDriveBackup([]string{"elliotReid@someHospital.org", "foo@SomeCompany.org"})
|
sel := selectors.NewOneDriveBackup([]string{"elliotReid@someHospital.org", "foo@SomeCompany.org"})
|
||||||
sel.Include(sel.Folders(selectors.Any()))
|
sel.Include(selTD.OneDriveBackupFolderScope(sel))
|
||||||
sel.DiscreteOwner = "elliotReid@someHospital.org"
|
sel.DiscreteOwner = "elliotReid@someHospital.org"
|
||||||
return sel.Selector
|
return sel.Selector
|
||||||
},
|
},
|
||||||
@ -104,17 +105,17 @@ func (suite *DisconnectedGraphConnectorSuite) TestVerifyBackupInputs_allServices
|
|||||||
checkError: assert.NoError,
|
checkError: assert.NoError,
|
||||||
excludes: func(t *testing.T) selectors.Selector {
|
excludes: func(t *testing.T) selectors.Selector {
|
||||||
sel := selectors.NewOneDriveBackup([]string{"foo@SomeCompany.org"})
|
sel := selectors.NewOneDriveBackup([]string{"foo@SomeCompany.org"})
|
||||||
sel.Exclude(sel.Folders(selectors.Any()))
|
sel.Exclude(selTD.OneDriveBackupFolderScope(sel))
|
||||||
return sel.Selector
|
return sel.Selector
|
||||||
},
|
},
|
||||||
filters: func(t *testing.T) selectors.Selector {
|
filters: func(t *testing.T) selectors.Selector {
|
||||||
sel := selectors.NewOneDriveBackup([]string{"foo@SomeCompany.org"})
|
sel := selectors.NewOneDriveBackup([]string{"foo@SomeCompany.org"})
|
||||||
sel.Filter(sel.Folders(selectors.Any()))
|
sel.Filter(selTD.OneDriveBackupFolderScope(sel))
|
||||||
return sel.Selector
|
return sel.Selector
|
||||||
},
|
},
|
||||||
includes: func(t *testing.T) selectors.Selector {
|
includes: func(t *testing.T) selectors.Selector {
|
||||||
sel := selectors.NewOneDriveBackup([]string{"foo@SomeCompany.org"})
|
sel := selectors.NewOneDriveBackup([]string{"foo@SomeCompany.org"})
|
||||||
sel.Include(sel.Folders(selectors.Any()))
|
sel.Include(selTD.OneDriveBackupFolderScope(sel))
|
||||||
return sel.Selector
|
return sel.Selector
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
@ -866,7 +866,36 @@ func checkHasCollections(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, g := range got {
|
for _, g := range got {
|
||||||
gotNames = append(gotNames, g.FullPath().String())
|
// TODO(ashmrtn): Remove when LocationPath is made part of BackupCollection
|
||||||
|
// interface.
|
||||||
|
if !assert.Implements(t, (*data.LocationPather)(nil), g) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fp := g.FullPath()
|
||||||
|
loc := g.(data.LocationPather).LocationPath()
|
||||||
|
|
||||||
|
if fp.Service() == path.OneDriveService ||
|
||||||
|
(fp.Service() == path.SharePointService && fp.Category() == path.LibrariesCategory) {
|
||||||
|
dp, err := path.ToDrivePath(fp)
|
||||||
|
if !assert.NoError(t, err, clues.ToCore(err)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
loc = path.BuildDriveLocation(dp.DriveID, loc.Elements()...)
|
||||||
|
}
|
||||||
|
|
||||||
|
p, err := loc.ToDataLayerPath(
|
||||||
|
fp.Tenant(),
|
||||||
|
fp.ResourceOwner(),
|
||||||
|
fp.Service(),
|
||||||
|
fp.Category(),
|
||||||
|
false)
|
||||||
|
if !assert.NoError(t, err, clues.ToCore(err)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
gotNames = append(gotNames, p.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.ElementsMatch(t, expectedNames, gotNames, "returned collections")
|
assert.ElementsMatch(t, expectedNames, gotNames, "returned collections")
|
||||||
@ -1047,7 +1076,7 @@ func makeSharePointBackupSel(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// backupSelectorForExpected creates a selector that can be used to backup the
|
// backupSelectorForExpected creates a selector that can be used to backup the
|
||||||
// given Items in expected based on the item paths. Fails the test if items from
|
// given dests based on the item paths. Fails the test if items from
|
||||||
// multiple services are in expected.
|
// multiple services are in expected.
|
||||||
func backupSelectorForExpected(
|
func backupSelectorForExpected(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
|
|||||||
@ -14,6 +14,7 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
|
odConsts "github.com/alcionai/corso/src/internal/connector/onedrive/consts"
|
||||||
"github.com/alcionai/corso/src/internal/connector/onedrive/metadata"
|
"github.com/alcionai/corso/src/internal/connector/onedrive/metadata"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/internal/version"
|
"github.com/alcionai/corso/src/internal/version"
|
||||||
@ -351,35 +352,35 @@ func testRestoreAndBackupMultipleFilesAndFoldersNoPermissions(
|
|||||||
suite.BackupResourceOwner())
|
suite.BackupResourceOwner())
|
||||||
|
|
||||||
rootPath := []string{
|
rootPath := []string{
|
||||||
"drives",
|
odConsts.DrivesPathDir,
|
||||||
driveID,
|
driveID,
|
||||||
rootFolder,
|
odConsts.RootPathDir,
|
||||||
}
|
}
|
||||||
folderAPath := []string{
|
folderAPath := []string{
|
||||||
"drives",
|
odConsts.DrivesPathDir,
|
||||||
driveID,
|
driveID,
|
||||||
rootFolder,
|
odConsts.RootPathDir,
|
||||||
folderAName,
|
folderAName,
|
||||||
}
|
}
|
||||||
subfolderBPath := []string{
|
subfolderBPath := []string{
|
||||||
"drives",
|
odConsts.DrivesPathDir,
|
||||||
driveID,
|
driveID,
|
||||||
rootFolder,
|
odConsts.RootPathDir,
|
||||||
folderAName,
|
folderAName,
|
||||||
folderBName,
|
folderBName,
|
||||||
}
|
}
|
||||||
subfolderAPath := []string{
|
subfolderAPath := []string{
|
||||||
"drives",
|
odConsts.DrivesPathDir,
|
||||||
driveID,
|
driveID,
|
||||||
rootFolder,
|
odConsts.RootPathDir,
|
||||||
folderAName,
|
folderAName,
|
||||||
folderBName,
|
folderBName,
|
||||||
folderAName,
|
folderAName,
|
||||||
}
|
}
|
||||||
folderBPath := []string{
|
folderBPath := []string{
|
||||||
"drives",
|
odConsts.DrivesPathDir,
|
||||||
driveID,
|
driveID,
|
||||||
rootFolder,
|
odConsts.RootPathDir,
|
||||||
folderBName,
|
folderBName,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -496,34 +497,34 @@ func testPermissionsRestoreAndBackup(suite oneDriveSuite, startVersion int) {
|
|||||||
folderCName := "folder-c"
|
folderCName := "folder-c"
|
||||||
|
|
||||||
rootPath := []string{
|
rootPath := []string{
|
||||||
"drives",
|
odConsts.DrivesPathDir,
|
||||||
driveID,
|
driveID,
|
||||||
rootFolder,
|
odConsts.RootPathDir,
|
||||||
}
|
}
|
||||||
folderAPath := []string{
|
folderAPath := []string{
|
||||||
"drives",
|
odConsts.DrivesPathDir,
|
||||||
driveID,
|
driveID,
|
||||||
rootFolder,
|
odConsts.RootPathDir,
|
||||||
folderAName,
|
folderAName,
|
||||||
}
|
}
|
||||||
folderBPath := []string{
|
folderBPath := []string{
|
||||||
"drives",
|
odConsts.DrivesPathDir,
|
||||||
driveID,
|
driveID,
|
||||||
rootFolder,
|
odConsts.RootPathDir,
|
||||||
folderBName,
|
folderBName,
|
||||||
}
|
}
|
||||||
// For skipped test
|
// For skipped test
|
||||||
// subfolderAPath := []string{
|
// subfolderAPath := []string{
|
||||||
// "drives",
|
// odConsts.DrivesPathDir,
|
||||||
// driveID,
|
// driveID,
|
||||||
// rootFolder,
|
// odConsts.RootPathDir,
|
||||||
// folderBName,
|
// folderBName,
|
||||||
// folderAName,
|
// folderAName,
|
||||||
// }
|
// }
|
||||||
folderCPath := []string{
|
folderCPath := []string{
|
||||||
"drives",
|
odConsts.DrivesPathDir,
|
||||||
driveID,
|
driveID,
|
||||||
rootFolder,
|
odConsts.RootPathDir,
|
||||||
folderCName,
|
folderCName,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -707,9 +708,9 @@ func testPermissionsBackupAndNoRestore(suite oneDriveSuite, startVersion int) {
|
|||||||
inputCols := []OnedriveColInfo{
|
inputCols := []OnedriveColInfo{
|
||||||
{
|
{
|
||||||
PathElements: []string{
|
PathElements: []string{
|
||||||
"drives",
|
odConsts.DrivesPathDir,
|
||||||
driveID,
|
driveID,
|
||||||
rootFolder,
|
odConsts.RootPathDir,
|
||||||
},
|
},
|
||||||
Files: []ItemData{
|
Files: []ItemData{
|
||||||
{
|
{
|
||||||
@ -728,9 +729,9 @@ func testPermissionsBackupAndNoRestore(suite oneDriveSuite, startVersion int) {
|
|||||||
expectedCols := []OnedriveColInfo{
|
expectedCols := []OnedriveColInfo{
|
||||||
{
|
{
|
||||||
PathElements: []string{
|
PathElements: []string{
|
||||||
"drives",
|
odConsts.DrivesPathDir,
|
||||||
driveID,
|
driveID,
|
||||||
rootFolder,
|
odConsts.RootPathDir,
|
||||||
},
|
},
|
||||||
Files: []ItemData{
|
Files: []ItemData{
|
||||||
{
|
{
|
||||||
@ -793,34 +794,34 @@ func testPermissionsInheritanceRestoreAndBackup(suite oneDriveSuite, startVersio
|
|||||||
folderCName := "empty"
|
folderCName := "empty"
|
||||||
|
|
||||||
rootPath := []string{
|
rootPath := []string{
|
||||||
"drives",
|
odConsts.DrivesPathDir,
|
||||||
driveID,
|
driveID,
|
||||||
rootFolder,
|
odConsts.RootPathDir,
|
||||||
}
|
}
|
||||||
folderAPath := []string{
|
folderAPath := []string{
|
||||||
"drives",
|
odConsts.DrivesPathDir,
|
||||||
driveID,
|
driveID,
|
||||||
rootFolder,
|
odConsts.RootPathDir,
|
||||||
folderAName,
|
folderAName,
|
||||||
}
|
}
|
||||||
subfolderAAPath := []string{
|
subfolderAAPath := []string{
|
||||||
"drives",
|
odConsts.DrivesPathDir,
|
||||||
driveID,
|
driveID,
|
||||||
rootFolder,
|
odConsts.RootPathDir,
|
||||||
folderAName,
|
folderAName,
|
||||||
folderAName,
|
folderAName,
|
||||||
}
|
}
|
||||||
subfolderABPath := []string{
|
subfolderABPath := []string{
|
||||||
"drives",
|
odConsts.DrivesPathDir,
|
||||||
driveID,
|
driveID,
|
||||||
rootFolder,
|
odConsts.RootPathDir,
|
||||||
folderAName,
|
folderAName,
|
||||||
folderBName,
|
folderBName,
|
||||||
}
|
}
|
||||||
subfolderACPath := []string{
|
subfolderACPath := []string{
|
||||||
"drives",
|
odConsts.DrivesPathDir,
|
||||||
driveID,
|
driveID,
|
||||||
rootFolder,
|
odConsts.RootPathDir,
|
||||||
folderAName,
|
folderAName,
|
||||||
folderCName,
|
folderCName,
|
||||||
}
|
}
|
||||||
@ -966,20 +967,20 @@ func testRestoreFolderNamedFolderRegression(
|
|||||||
suite.BackupResourceOwner())
|
suite.BackupResourceOwner())
|
||||||
|
|
||||||
rootPath := []string{
|
rootPath := []string{
|
||||||
"drives",
|
odConsts.DrivesPathDir,
|
||||||
driveID,
|
driveID,
|
||||||
rootFolder,
|
odConsts.RootPathDir,
|
||||||
}
|
}
|
||||||
folderFolderPath := []string{
|
folderFolderPath := []string{
|
||||||
"drives",
|
odConsts.DrivesPathDir,
|
||||||
driveID,
|
driveID,
|
||||||
rootFolder,
|
odConsts.RootPathDir,
|
||||||
folderNamedFolder,
|
folderNamedFolder,
|
||||||
}
|
}
|
||||||
subfolderPath := []string{
|
subfolderPath := []string{
|
||||||
"drives",
|
odConsts.DrivesPathDir,
|
||||||
driveID,
|
driveID,
|
||||||
rootFolder,
|
odConsts.RootPathDir,
|
||||||
folderNamedFolder,
|
folderNamedFolder,
|
||||||
folderBName,
|
folderBName,
|
||||||
}
|
}
|
||||||
|
|||||||
@ -13,6 +13,8 @@ import (
|
|||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"golang.org/x/exp/maps"
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
|
odConsts "github.com/alcionai/corso/src/internal/connector/onedrive/consts"
|
||||||
)
|
)
|
||||||
|
|
||||||
// For any version post this(inclusive), we expect to be using IDs for
|
// For any version post this(inclusive), we expect to be using IDs for
|
||||||
@ -230,7 +232,7 @@ func (c *onedriveCollection) withPermissions(perm PermData) *onedriveCollection
|
|||||||
metaName = ""
|
metaName = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
if name == rootFolder {
|
if name == odConsts.RootPathDir {
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -294,7 +294,7 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreFailsBadService() {
|
|||||||
var (
|
var (
|
||||||
t = suite.T()
|
t = suite.T()
|
||||||
acct = tester.NewM365Account(t)
|
acct = tester.NewM365Account(t)
|
||||||
dest = tester.DefaultTestRestoreDestination()
|
dest = tester.DefaultTestRestoreDestination("")
|
||||||
sel = selectors.Selector{
|
sel = selectors.Selector{
|
||||||
Service: selectors.ServiceUnknown,
|
Service: selectors.ServiceUnknown,
|
||||||
}
|
}
|
||||||
@ -322,7 +322,7 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreFailsBadService() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (suite *GraphConnectorIntegrationSuite) TestEmptyCollections() {
|
func (suite *GraphConnectorIntegrationSuite) TestEmptyCollections() {
|
||||||
dest := tester.DefaultTestRestoreDestination()
|
dest := tester.DefaultTestRestoreDestination("")
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
col []data.RestoreCollection
|
col []data.RestoreCollection
|
||||||
@ -542,7 +542,7 @@ func runRestoreBackupTest(
|
|||||||
Service: test.service,
|
Service: test.service,
|
||||||
Tenant: tenant,
|
Tenant: tenant,
|
||||||
ResourceOwners: resourceOwners,
|
ResourceOwners: resourceOwners,
|
||||||
Dest: tester.DefaultTestRestoreDestination(),
|
Dest: tester.DefaultTestRestoreDestination(""),
|
||||||
}
|
}
|
||||||
|
|
||||||
totalItems, totalKopiaItems, collections, expectedData := GetCollectionsAndExpected(
|
totalItems, totalKopiaItems, collections, expectedData := GetCollectionsAndExpected(
|
||||||
@ -588,7 +588,7 @@ func runRestoreTestWithVerion(
|
|||||||
Service: test.service,
|
Service: test.service,
|
||||||
Tenant: tenant,
|
Tenant: tenant,
|
||||||
ResourceOwners: resourceOwners,
|
ResourceOwners: resourceOwners,
|
||||||
Dest: tester.DefaultTestRestoreDestination(),
|
Dest: tester.DefaultTestRestoreDestination(""),
|
||||||
}
|
}
|
||||||
|
|
||||||
totalItems, _, collections, _ := GetCollectionsAndExpected(
|
totalItems, _, collections, _ := GetCollectionsAndExpected(
|
||||||
@ -627,7 +627,7 @@ func runRestoreBackupTestVersions(
|
|||||||
Service: test.service,
|
Service: test.service,
|
||||||
Tenant: tenant,
|
Tenant: tenant,
|
||||||
ResourceOwners: resourceOwners,
|
ResourceOwners: resourceOwners,
|
||||||
Dest: tester.DefaultTestRestoreDestination(),
|
Dest: tester.DefaultTestRestoreDestination(""),
|
||||||
}
|
}
|
||||||
|
|
||||||
totalItems, _, collections, _ := GetCollectionsAndExpected(
|
totalItems, _, collections, _ := GetCollectionsAndExpected(
|
||||||
@ -1005,7 +1005,7 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames
|
|||||||
|
|
||||||
for i, collection := range test.collections {
|
for i, collection := range test.collections {
|
||||||
// Get a dest per collection so they're independent.
|
// Get a dest per collection so they're independent.
|
||||||
dest := tester.DefaultTestRestoreDestination()
|
dest := tester.DefaultTestRestoreDestination("")
|
||||||
expectedDests = append(expectedDests, destAndCats{
|
expectedDests = append(expectedDests, destAndCats{
|
||||||
resourceOwner: suite.user,
|
resourceOwner: suite.user,
|
||||||
dest: dest.ContainerName,
|
dest: dest.ContainerName,
|
||||||
@ -1177,9 +1177,7 @@ func (suite *GraphConnectorIntegrationSuite) TestBackup_CreatesPrefixCollections
|
|||||||
resource: Users,
|
resource: Users,
|
||||||
selectorFunc: func(t *testing.T) selectors.Selector {
|
selectorFunc: func(t *testing.T) selectors.Selector {
|
||||||
sel := selectors.NewOneDriveBackup([]string{suite.user})
|
sel := selectors.NewOneDriveBackup([]string{suite.user})
|
||||||
sel.Include(
|
sel.Include(sel.Folders([]string{selectors.NoneTgt}))
|
||||||
sel.Folders([]string{selectors.NoneTgt}),
|
|
||||||
)
|
|
||||||
|
|
||||||
return sel.Selector
|
return sel.Selector
|
||||||
},
|
},
|
||||||
|
|||||||
@ -336,18 +336,33 @@ func GetItemPermission(
|
|||||||
return perm, nil
|
return perm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetDriveByID(
|
func GetUsersDrive(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
srv graph.Servicer,
|
srv graph.Servicer,
|
||||||
userID string,
|
user string,
|
||||||
) (models.Driveable, error) {
|
) (models.Driveable, error) {
|
||||||
//revive:enable:context-as-argument
|
|
||||||
d, err := srv.Client().
|
d, err := srv.Client().
|
||||||
UsersById(userID).
|
UsersById(user).
|
||||||
Drive().
|
Drive().
|
||||||
Get(ctx, nil)
|
Get(ctx, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, graph.Wrap(ctx, err, "getting drive")
|
return nil, graph.Wrap(ctx, err, "getting user's drive")
|
||||||
|
}
|
||||||
|
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetSitesDefaultDrive(
|
||||||
|
ctx context.Context,
|
||||||
|
srv graph.Servicer,
|
||||||
|
site string,
|
||||||
|
) (models.Driveable, error) {
|
||||||
|
d, err := srv.Client().
|
||||||
|
SitesById(site).
|
||||||
|
Drive().
|
||||||
|
Get(ctx, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, graph.Wrap(ctx, err, "getting site's drive")
|
||||||
}
|
}
|
||||||
|
|
||||||
return d, nil
|
return d, nil
|
||||||
@ -358,7 +373,10 @@ func GetDriveRoot(
|
|||||||
srv graph.Servicer,
|
srv graph.Servicer,
|
||||||
driveID string,
|
driveID string,
|
||||||
) (models.DriveItemable, error) {
|
) (models.DriveItemable, error) {
|
||||||
root, err := srv.Client().DrivesById(driveID).Root().Get(ctx, nil)
|
root, err := srv.Client().
|
||||||
|
DrivesById(driveID).
|
||||||
|
Root().
|
||||||
|
Get(ctx, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, graph.Wrap(ctx, err, "getting drive root")
|
return nil, graph.Wrap(ctx, err, "getting drive root")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -131,7 +131,7 @@ func pathToLocation(p path.Path) (*path.Builder, error) {
|
|||||||
// NewCollection creates a Collection
|
// NewCollection creates a Collection
|
||||||
func NewCollection(
|
func NewCollection(
|
||||||
itemClient graph.Requester,
|
itemClient graph.Requester,
|
||||||
folderPath path.Path,
|
currPath path.Path,
|
||||||
prevPath path.Path,
|
prevPath path.Path,
|
||||||
driveID string,
|
driveID string,
|
||||||
service graph.Servicer,
|
service graph.Servicer,
|
||||||
@ -145,9 +145,9 @@ func NewCollection(
|
|||||||
// to be changed as we won't be able to extract path information from the
|
// to be changed as we won't be able to extract path information from the
|
||||||
// storage path. In that case, we'll need to start storing the location paths
|
// storage path. In that case, we'll need to start storing the location paths
|
||||||
// like we do the previous path.
|
// like we do the previous path.
|
||||||
locPath, err := pathToLocation(folderPath)
|
locPath, err := pathToLocation(currPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Wrap(err, "getting location").With("folder_path", folderPath.String())
|
return nil, clues.Wrap(err, "getting location").With("curr_path", currPath.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
prevLocPath, err := pathToLocation(prevPath)
|
prevLocPath, err := pathToLocation(prevPath)
|
||||||
@ -157,7 +157,7 @@ func NewCollection(
|
|||||||
|
|
||||||
c := newColl(
|
c := newColl(
|
||||||
itemClient,
|
itemClient,
|
||||||
folderPath,
|
currPath,
|
||||||
prevPath,
|
prevPath,
|
||||||
driveID,
|
driveID,
|
||||||
service,
|
service,
|
||||||
@ -175,7 +175,7 @@ func NewCollection(
|
|||||||
|
|
||||||
func newColl(
|
func newColl(
|
||||||
gr graph.Requester,
|
gr graph.Requester,
|
||||||
folderPath path.Path,
|
currPath path.Path,
|
||||||
prevPath path.Path,
|
prevPath path.Path,
|
||||||
driveID string,
|
driveID string,
|
||||||
service graph.Servicer,
|
service graph.Servicer,
|
||||||
@ -188,7 +188,7 @@ func newColl(
|
|||||||
c := &Collection{
|
c := &Collection{
|
||||||
itemClient: gr,
|
itemClient: gr,
|
||||||
itemGetter: api.GetDriveItem,
|
itemGetter: api.GetDriveItem,
|
||||||
folderPath: folderPath,
|
folderPath: currPath,
|
||||||
prevPath: prevPath,
|
prevPath: prevPath,
|
||||||
driveItems: map[string]models.DriveItemable{},
|
driveItems: map[string]models.DriveItemable{},
|
||||||
driveID: driveID,
|
driveID: driveID,
|
||||||
@ -197,7 +197,7 @@ func newColl(
|
|||||||
data: make(chan data.Stream, graph.Parallelism(path.OneDriveMetadataService).CollectionBufferSize()),
|
data: make(chan data.Stream, graph.Parallelism(path.OneDriveMetadataService).CollectionBufferSize()),
|
||||||
statusUpdater: statusUpdater,
|
statusUpdater: statusUpdater,
|
||||||
ctrl: ctrlOpts,
|
ctrl: ctrlOpts,
|
||||||
state: data.StateOf(prevPath, folderPath),
|
state: data.StateOf(prevPath, currPath),
|
||||||
scope: colScope,
|
scope: colScope,
|
||||||
doNotMergeItems: doNotMergeItems,
|
doNotMergeItems: doNotMergeItems,
|
||||||
}
|
}
|
||||||
|
|||||||
@ -101,6 +101,7 @@ type Collections struct {
|
|||||||
servicer graph.Servicer,
|
servicer graph.Servicer,
|
||||||
driveID, link string,
|
driveID, link string,
|
||||||
) itemPager
|
) itemPager
|
||||||
|
servicePathPfxFunc pathPrefixerFunc
|
||||||
|
|
||||||
// Track stats from drive enumeration. Represents the items backed up.
|
// Track stats from drive enumeration. Represents the items backed up.
|
||||||
NumItems int
|
NumItems int
|
||||||
@ -119,17 +120,18 @@ func NewCollections(
|
|||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
) *Collections {
|
) *Collections {
|
||||||
return &Collections{
|
return &Collections{
|
||||||
itemClient: itemClient,
|
itemClient: itemClient,
|
||||||
tenant: tenant,
|
tenant: tenant,
|
||||||
resourceOwner: resourceOwner,
|
resourceOwner: resourceOwner,
|
||||||
source: source,
|
source: source,
|
||||||
matcher: matcher,
|
matcher: matcher,
|
||||||
CollectionMap: map[string]map[string]*Collection{},
|
CollectionMap: map[string]map[string]*Collection{},
|
||||||
drivePagerFunc: PagerForSource,
|
drivePagerFunc: PagerForSource,
|
||||||
itemPagerFunc: defaultItemPager,
|
itemPagerFunc: defaultItemPager,
|
||||||
service: service,
|
servicePathPfxFunc: pathPrefixerForSource(tenant, resourceOwner, source),
|
||||||
statusUpdater: statusUpdater,
|
service: service,
|
||||||
ctrl: ctrlOpts,
|
statusUpdater: statusUpdater,
|
||||||
|
ctrl: ctrlOpts,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -280,6 +282,12 @@ func (c *Collections) Get(
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
driveTombstones := map[string]struct{}{}
|
||||||
|
|
||||||
|
for driveID := range oldPathsByDriveID {
|
||||||
|
driveTombstones[driveID] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
driveComplete, closer := observe.MessageWithCompletion(ctx, observe.Bulletf("files"))
|
driveComplete, closer := observe.MessageWithCompletion(ctx, observe.Bulletf("files"))
|
||||||
defer closer()
|
defer closer()
|
||||||
defer close(driveComplete)
|
defer close(driveComplete)
|
||||||
@ -312,6 +320,8 @@ func (c *Collections) Get(
|
|||||||
ictx = clues.Add(ctx, "drive_id", driveID, "drive_name", driveName)
|
ictx = clues.Add(ctx, "drive_id", driveID, "drive_name", driveName)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
delete(driveTombstones, driveID)
|
||||||
|
|
||||||
if _, ok := c.CollectionMap[driveID]; !ok {
|
if _, ok := c.CollectionMap[driveID]; !ok {
|
||||||
c.CollectionMap[driveID] = map[string]*Collection{}
|
c.CollectionMap[driveID] = map[string]*Collection{}
|
||||||
}
|
}
|
||||||
@ -408,7 +418,7 @@ func (c *Collections) Get(
|
|||||||
|
|
||||||
col, err := NewCollection(
|
col, err := NewCollection(
|
||||||
c.itemClient,
|
c.itemClient,
|
||||||
nil,
|
nil, // delete the folder
|
||||||
prevPath,
|
prevPath,
|
||||||
driveID,
|
driveID,
|
||||||
c.service,
|
c.service,
|
||||||
@ -427,15 +437,41 @@ func (c *Collections) Get(
|
|||||||
|
|
||||||
observe.Message(ctx, fmt.Sprintf("Discovered %d items to backup", c.NumItems))
|
observe.Message(ctx, fmt.Sprintf("Discovered %d items to backup", c.NumItems))
|
||||||
|
|
||||||
// Add an extra for the metadata collection.
|
|
||||||
collections := []data.BackupCollection{}
|
collections := []data.BackupCollection{}
|
||||||
|
|
||||||
|
// add all the drives we found
|
||||||
for _, driveColls := range c.CollectionMap {
|
for _, driveColls := range c.CollectionMap {
|
||||||
for _, coll := range driveColls {
|
for _, coll := range driveColls {
|
||||||
collections = append(collections, coll)
|
collections = append(collections, coll)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// generate tombstones for drives that were removed.
|
||||||
|
for driveID := range driveTombstones {
|
||||||
|
prevDrivePath, err := c.servicePathPfxFunc(driveID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, clues.Wrap(err, "making drive tombstone previous path").WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
coll, err := NewCollection(
|
||||||
|
c.itemClient,
|
||||||
|
nil, // delete the drive
|
||||||
|
prevDrivePath,
|
||||||
|
driveID,
|
||||||
|
c.service,
|
||||||
|
c.statusUpdater,
|
||||||
|
c.source,
|
||||||
|
c.ctrl,
|
||||||
|
CollectionScopeUnknown,
|
||||||
|
true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, clues.Wrap(err, "making drive tombstone").WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
collections = append(collections, coll)
|
||||||
|
}
|
||||||
|
|
||||||
|
// add metadata collections
|
||||||
service, category := c.source.toPathServiceCat()
|
service, category := c.source.toPathServiceCat()
|
||||||
md, err := graph.MakeMetadataCollection(
|
md, err := graph.MakeMetadataCollection(
|
||||||
c.tenant,
|
c.tenant,
|
||||||
@ -457,7 +493,6 @@ func (c *Collections) Get(
|
|||||||
collections = append(collections, md)
|
collections = append(collections, md)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(ashmrtn): Track and return the set of items to exclude.
|
|
||||||
return collections, nil
|
return collections, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -642,7 +677,7 @@ func (c *Collections) getCollectionPath(
|
|||||||
return nil, clues.New("folder with empty name")
|
return nil, clues.New("folder with empty name")
|
||||||
}
|
}
|
||||||
|
|
||||||
collectionPath, err = collectionPath.Append(name, false)
|
collectionPath, err = collectionPath.Append(false, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Wrap(err, "making non-root folder path")
|
return nil, clues.Wrap(err, "making non-root folder path")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1246,16 +1246,15 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
user,
|
user,
|
||||||
path.OneDriveService,
|
path.OneDriveService,
|
||||||
path.FilesCategory,
|
path.FilesCategory,
|
||||||
false,
|
false)
|
||||||
)
|
|
||||||
require.NoError(suite.T(), err, "making metadata path", clues.ToCore(err))
|
require.NoError(suite.T(), err, "making metadata path", clues.ToCore(err))
|
||||||
|
|
||||||
driveID1 := uuid.NewString()
|
driveID1 := "drive-1-" + uuid.NewString()
|
||||||
drive1 := models.NewDrive()
|
drive1 := models.NewDrive()
|
||||||
drive1.SetId(&driveID1)
|
drive1.SetId(&driveID1)
|
||||||
drive1.SetName(&driveID1)
|
drive1.SetName(&driveID1)
|
||||||
|
|
||||||
driveID2 := uuid.NewString()
|
driveID2 := "drive-2-" + uuid.NewString()
|
||||||
drive2 := models.NewDrive()
|
drive2 := models.NewDrive()
|
||||||
drive2.SetId(&driveID2)
|
drive2.SetId(&driveID2)
|
||||||
drive2.SetName(&driveID2)
|
drive2.SetName(&driveID2)
|
||||||
@ -1287,7 +1286,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
expectedFolderPaths map[string]map[string]string
|
expectedFolderPaths map[string]map[string]string
|
||||||
expectedDelList *pmMock.PrefixMap
|
expectedDelList *pmMock.PrefixMap
|
||||||
expectedSkippedCount int
|
expectedSkippedCount int
|
||||||
doNotMergeItems bool
|
// map full or previous path (prefers full) -> bool
|
||||||
|
doNotMergeItems map[string]bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "OneDrive_OneItemPage_DelFileOnly_NoFolders_NoErrors",
|
name: "OneDrive_OneItemPage_DelFileOnly_NoFolders_NoErrors",
|
||||||
@ -1321,7 +1321,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "OneDrive_OneItemPage_NoFolders_NoErrors",
|
name: "OneDrive_OneItemPage_NoFolderDeltas_NoErrors",
|
||||||
drives: []models.Driveable{drive1},
|
drives: []models.Driveable{drive1},
|
||||||
items: map[string][]deltaPagerResult{
|
items: map[string][]deltaPagerResult{
|
||||||
driveID1: {
|
driveID1: {
|
||||||
@ -1699,7 +1699,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}),
|
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}),
|
||||||
doNotMergeItems: true,
|
doNotMergeItems: map[string]bool{
|
||||||
|
rootFolderPath1: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "OneDrive_TwoItemPage_DeltaError",
|
name: "OneDrive_TwoItemPage_DeltaError",
|
||||||
@ -1741,7 +1743,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}),
|
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}),
|
||||||
doNotMergeItems: true,
|
doNotMergeItems: map[string]bool{
|
||||||
|
rootFolderPath1: true,
|
||||||
|
folderPath1: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "OneDrive_TwoItemPage_NoDeltaError",
|
name: "OneDrive_TwoItemPage_NoDeltaError",
|
||||||
@ -1785,7 +1790,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{
|
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{
|
||||||
rootFolderPath1: getDelList("file", "file2"),
|
rootFolderPath1: getDelList("file", "file2"),
|
||||||
}),
|
}),
|
||||||
doNotMergeItems: false,
|
doNotMergeItems: map[string]bool{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "OneDrive_OneItemPage_InvalidPrevDelta_DeleteNonExistentFolder",
|
name: "OneDrive_OneItemPage_InvalidPrevDelta_DeleteNonExistentFolder",
|
||||||
@ -1827,7 +1832,11 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}),
|
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}),
|
||||||
doNotMergeItems: true,
|
doNotMergeItems: map[string]bool{
|
||||||
|
rootFolderPath1: true,
|
||||||
|
folderPath1: true,
|
||||||
|
expectedPath1("/folder2"): true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "OneDrive_OneItemPage_InvalidPrevDelta_AnotherFolderAtDeletedLocation",
|
name: "OneDrive_OneItemPage_InvalidPrevDelta_AnotherFolderAtDeletedLocation",
|
||||||
@ -1873,7 +1882,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}),
|
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}),
|
||||||
doNotMergeItems: true,
|
doNotMergeItems: map[string]bool{
|
||||||
|
rootFolderPath1: true,
|
||||||
|
folderPath1: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "OneDrive Two Item Pages with Malware",
|
name: "OneDrive Two Item Pages with Malware",
|
||||||
@ -1973,7 +1985,11 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}),
|
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}),
|
||||||
doNotMergeItems: true,
|
doNotMergeItems: map[string]bool{
|
||||||
|
rootFolderPath1: true,
|
||||||
|
folderPath1: true,
|
||||||
|
expectedPath1("/folder2"): true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "One Drive Delta Error Random Folder Delete",
|
name: "One Drive Delta Error Random Folder Delete",
|
||||||
@ -2012,7 +2028,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}),
|
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}),
|
||||||
doNotMergeItems: true,
|
doNotMergeItems: map[string]bool{
|
||||||
|
rootFolderPath1: true,
|
||||||
|
folderPath1: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "One Drive Delta Error Random Item Delete",
|
name: "One Drive Delta Error Random Item Delete",
|
||||||
@ -2049,7 +2068,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}),
|
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}),
|
||||||
doNotMergeItems: true,
|
doNotMergeItems: map[string]bool{
|
||||||
|
rootFolderPath1: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "One Drive Folder Made And Deleted",
|
name: "One Drive Folder Made And Deleted",
|
||||||
@ -2200,6 +2221,37 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
rootFolderPath1: getDelList("file"),
|
rootFolderPath1: getDelList("file"),
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "TwoPriorDrives_OneTombstoned",
|
||||||
|
drives: []models.Driveable{drive1},
|
||||||
|
items: map[string][]deltaPagerResult{
|
||||||
|
driveID1: {
|
||||||
|
{
|
||||||
|
items: []models.DriveItemable{
|
||||||
|
driveRootItem("root"), // will be present
|
||||||
|
},
|
||||||
|
deltaLink: &delta,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errCheck: assert.NoError,
|
||||||
|
prevFolderPaths: map[string]map[string]string{
|
||||||
|
driveID1: {"root": rootFolderPath1},
|
||||||
|
driveID2: {"root": rootFolderPath2},
|
||||||
|
},
|
||||||
|
expectedCollections: map[string]map[data.CollectionState][]string{
|
||||||
|
rootFolderPath1: {data.NotMovedState: {}},
|
||||||
|
rootFolderPath2: {data.DeletedState: {}},
|
||||||
|
},
|
||||||
|
expectedDeltaURLs: map[string]string{driveID1: delta},
|
||||||
|
expectedFolderPaths: map[string]map[string]string{
|
||||||
|
driveID1: {"root": rootFolderPath1},
|
||||||
|
},
|
||||||
|
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}),
|
||||||
|
doNotMergeItems: map[string]bool{
|
||||||
|
rootFolderPath2: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
suite.Run(test.name, func() {
|
suite.Run(test.name, func() {
|
||||||
@ -2257,12 +2309,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
map[string]string{
|
map[string]string{
|
||||||
driveID1: prevDelta,
|
driveID1: prevDelta,
|
||||||
driveID2: prevDelta,
|
driveID2: prevDelta,
|
||||||
},
|
}),
|
||||||
),
|
|
||||||
graph.NewMetadataEntry(
|
graph.NewMetadataEntry(
|
||||||
graph.PreviousPathFileName,
|
graph.PreviousPathFileName,
|
||||||
test.prevFolderPaths,
|
test.prevFolderPaths),
|
||||||
),
|
|
||||||
},
|
},
|
||||||
func(*support.ConnectorOperationStatus) {},
|
func(*support.ConnectorOperationStatus) {},
|
||||||
)
|
)
|
||||||
@ -2329,18 +2379,24 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
"state: %d, path: %s",
|
"state: %d, path: %s",
|
||||||
baseCol.State(),
|
baseCol.State(),
|
||||||
folderPath)
|
folderPath)
|
||||||
assert.Equal(t, test.doNotMergeItems, baseCol.DoNotMergeItems(), "DoNotMergeItems")
|
|
||||||
|
p := baseCol.FullPath()
|
||||||
|
if p == nil {
|
||||||
|
p = baseCol.PreviousPath()
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equalf(
|
||||||
|
t,
|
||||||
|
test.doNotMergeItems[p.String()],
|
||||||
|
baseCol.DoNotMergeItems(),
|
||||||
|
"DoNotMergeItems in collection: %s", p)
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedCollectionCount := 0
|
expectedCollectionCount := 0
|
||||||
for c := range test.expectedCollections {
|
for _, ec := range test.expectedCollections {
|
||||||
for range test.expectedCollections[c] {
|
expectedCollectionCount += len(ec)
|
||||||
expectedCollectionCount++
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// This check is necessary to make sure we are all the
|
|
||||||
// collections we expect it to
|
|
||||||
assert.Equal(t, expectedCollectionCount, collectionCount, "number of collections")
|
assert.Equal(t, expectedCollectionCount, collectionCount, "number of collections")
|
||||||
|
|
||||||
test.expectedDelList.AssertEqual(t, delList)
|
test.expectedDelList.AssertEqual(t, delList)
|
||||||
|
|||||||
10
src/internal/connector/onedrive/consts/consts.go
Normal file
10
src/internal/connector/onedrive/consts/consts.go
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
package onedrive
|
||||||
|
|
||||||
|
const (
|
||||||
|
// const used as the root dir for the drive portion of a path prefix.
|
||||||
|
// eg: tid/onedrive/ro/files/drives/driveid/...
|
||||||
|
DrivesPathDir = "drives"
|
||||||
|
// const used as the root-of-drive dir for the drive portion of a path prefix.
|
||||||
|
// eg: tid/onedrive/ro/files/drives/driveid/root:/...
|
||||||
|
RootPathDir = "root:"
|
||||||
|
)
|
||||||
@ -14,8 +14,10 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
gapi "github.com/alcionai/corso/src/internal/connector/graph/api"
|
gapi "github.com/alcionai/corso/src/internal/connector/graph/api"
|
||||||
"github.com/alcionai/corso/src/internal/connector/onedrive/api"
|
"github.com/alcionai/corso/src/internal/connector/onedrive/api"
|
||||||
|
odConsts "github.com/alcionai/corso/src/internal/connector/onedrive/consts"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -55,6 +57,25 @@ func PagerForSource(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type pathPrefixerFunc func(driveID string) (path.Path, error)
|
||||||
|
|
||||||
|
func pathPrefixerForSource(
|
||||||
|
tenantID, resourceOwner string,
|
||||||
|
source driveSource,
|
||||||
|
) pathPrefixerFunc {
|
||||||
|
cat := path.FilesCategory
|
||||||
|
serv := path.OneDriveService
|
||||||
|
|
||||||
|
if source == SharePointSource {
|
||||||
|
cat = path.LibrariesCategory
|
||||||
|
serv = path.SharePointService
|
||||||
|
}
|
||||||
|
|
||||||
|
return func(driveID string) (path.Path, error) {
|
||||||
|
return path.Build(tenantID, resourceOwner, serv, cat, false, odConsts.DrivesPathDir, driveID, odConsts.RootPathDir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// itemCollector functions collect the items found in a drive
|
// itemCollector functions collect the items found in a drive
|
||||||
type itemCollector func(
|
type itemCollector func(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
@ -137,7 +158,8 @@ func collectItems(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
page, err := pager.GetPage(ctx)
|
// assume delta urls here, which allows single-token consumption
|
||||||
|
page, err := pager.GetPage(graph.ConsumeNTokens(ctx, graph.SingleGetOrDeltaLC))
|
||||||
|
|
||||||
if graph.IsErrInvalidDelta(err) {
|
if graph.IsErrInvalidDelta(err) {
|
||||||
logger.Ctx(ctx).Infow("Invalid previous delta link", "link", prevDelta)
|
logger.Ctx(ctx).Infow("Invalid previous delta link", "link", prevDelta)
|
||||||
|
|||||||
@ -279,24 +279,24 @@ func (suite *OneDriveUnitSuite) TestDrives() {
|
|||||||
|
|
||||||
// Integration tests
|
// Integration tests
|
||||||
|
|
||||||
type OneDriveSuite struct {
|
type OneDriveIntgSuite struct {
|
||||||
tester.Suite
|
tester.Suite
|
||||||
userID string
|
userID string
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOneDriveSuite(t *testing.T) {
|
func TestOneDriveSuite(t *testing.T) {
|
||||||
suite.Run(t, &OneDriveSuite{
|
suite.Run(t, &OneDriveIntgSuite{
|
||||||
Suite: tester.NewIntegrationSuite(
|
Suite: tester.NewIntegrationSuite(
|
||||||
t,
|
t,
|
||||||
[][]string{tester.M365AcctCredEnvs}),
|
[][]string{tester.M365AcctCredEnvs}),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *OneDriveSuite) SetupSuite() {
|
func (suite *OneDriveIntgSuite) SetupSuite() {
|
||||||
suite.userID = tester.SecondaryM365UserID(suite.T())
|
suite.userID = tester.SecondaryM365UserID(suite.T())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *OneDriveSuite) TestCreateGetDeleteFolder() {
|
func (suite *OneDriveIntgSuite) TestCreateGetDeleteFolder() {
|
||||||
ctx, flush := tester.NewContext()
|
ctx, flush := tester.NewContext()
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
@ -401,7 +401,7 @@ func (fm testFolderMatcher) Matches(p string) bool {
|
|||||||
return fm.scope.Matches(selectors.OneDriveFolder, p)
|
return fm.scope.Matches(selectors.OneDriveFolder, p)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *OneDriveSuite) TestOneDriveNewCollections() {
|
func (suite *OneDriveIntgSuite) TestOneDriveNewCollections() {
|
||||||
creds, err := tester.NewM365Account(suite.T()).M365Config()
|
creds, err := tester.NewM365Account(suite.T()).M365Config()
|
||||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||||
|
|
||||||
|
|||||||
@ -333,7 +333,11 @@ func driveItemWriter(
|
|||||||
session := drives.NewItemItemsItemCreateUploadSessionPostRequestBody()
|
session := drives.NewItemItemsItemCreateUploadSessionPostRequestBody()
|
||||||
ctx = clues.Add(ctx, "upload_item_id", itemID)
|
ctx = clues.Add(ctx, "upload_item_id", itemID)
|
||||||
|
|
||||||
r, err := service.Client().DrivesById(driveID).ItemsById(itemID).CreateUploadSession().Post(ctx, session, nil)
|
r, err := service.Client().
|
||||||
|
DrivesById(driveID).
|
||||||
|
ItemsById(itemID).
|
||||||
|
CreateUploadSession().
|
||||||
|
Post(ctx, session, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, graph.Wrap(ctx, err, "creating item upload session")
|
return nil, graph.Wrap(ctx, err, "creating item upload session")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -128,8 +128,7 @@ func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TestItemWriter is an integration test for uploading data to OneDrive
|
// TestItemWriter is an integration test for uploading data to OneDrive
|
||||||
// It creates a new `testfolder_<timestamp` folder with a new
|
// It creates a new folder with a new item and writes data to it
|
||||||
// testitem_<timestamp> item and writes data to it
|
|
||||||
func (suite *ItemIntegrationSuite) TestItemWriter() {
|
func (suite *ItemIntegrationSuite) TestItemWriter() {
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
@ -155,24 +154,20 @@ func (suite *ItemIntegrationSuite) TestItemWriter() {
|
|||||||
root, err := srv.Client().DrivesById(test.driveID).Root().Get(ctx, nil)
|
root, err := srv.Client().DrivesById(test.driveID).Root().Get(ctx, nil)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
// Test Requirement 2: "Test Folder" should exist
|
newFolderName := tester.DefaultTestRestoreDestination("folder").ContainerName
|
||||||
folder, err := api.GetFolderByName(ctx, srv, test.driveID, ptr.Val(root.GetId()), "Test Folder")
|
t.Logf("creating folder %s", newFolderName)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
|
||||||
|
|
||||||
newFolderName := "testfolder_" + dttm.FormatNow(dttm.SafeForTesting)
|
|
||||||
t.Logf("Test will create folder %s", newFolderName)
|
|
||||||
|
|
||||||
newFolder, err := CreateItem(
|
newFolder, err := CreateItem(
|
||||||
ctx,
|
ctx,
|
||||||
srv,
|
srv,
|
||||||
test.driveID,
|
test.driveID,
|
||||||
ptr.Val(folder.GetId()),
|
ptr.Val(root.GetId()),
|
||||||
newItem(newFolderName, true))
|
newItem(newFolderName, true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
require.NotNil(t, newFolder.GetId())
|
require.NotNil(t, newFolder.GetId())
|
||||||
|
|
||||||
newItemName := "testItem_" + dttm.FormatNow(dttm.SafeForTesting)
|
newItemName := "testItem_" + dttm.FormatNow(dttm.SafeForTesting)
|
||||||
t.Logf("Test will create item %s", newItemName)
|
t.Logf("creating item %s", newItemName)
|
||||||
|
|
||||||
newItem, err := CreateItem(
|
newItem, err := CreateItem(
|
||||||
ctx,
|
ctx,
|
||||||
|
|||||||
@ -161,7 +161,7 @@ func UpdatePermissions(
|
|||||||
DrivesById(driveID).
|
DrivesById(driveID).
|
||||||
ItemsById(itemID).
|
ItemsById(itemID).
|
||||||
PermissionsById(pid).
|
PermissionsById(pid).
|
||||||
Delete(ctx, nil)
|
Delete(graph.ConsumeNTokens(ctx, graph.PermissionsLC), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return graph.Wrap(ctx, err, "removing permissions")
|
return graph.Wrap(ctx, err, "removing permissions")
|
||||||
}
|
}
|
||||||
@ -207,7 +207,11 @@ func UpdatePermissions(
|
|||||||
|
|
||||||
pbody.SetRecipients([]models.DriveRecipientable{rec})
|
pbody.SetRecipients([]models.DriveRecipientable{rec})
|
||||||
|
|
||||||
np, err := service.Client().DrivesById(driveID).ItemsById(itemID).Invite().Post(ctx, pbody, nil)
|
np, err := service.Client().
|
||||||
|
DrivesById(driveID).
|
||||||
|
ItemsById(itemID).
|
||||||
|
Invite().
|
||||||
|
Post(graph.ConsumeNTokens(ctx, graph.PermissionsLC), pbody, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return graph.Wrap(ctx, err, "setting permissions")
|
return graph.Wrap(ctx, err, "setting permissions")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -231,7 +231,7 @@ func RestoreCollection(
|
|||||||
return metrics, nil
|
return metrics, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
|
itemPath, err := dc.FullPath().AppendItem(itemData.UUID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
el.AddRecoverable(clues.Wrap(err, "appending item to full path").WithClues(ctx))
|
el.AddRecoverable(clues.Wrap(err, "appending item to full path").WithClues(ctx))
|
||||||
continue
|
continue
|
||||||
@ -781,17 +781,29 @@ func getMetadata(metar io.ReadCloser) (metadata.Metadata, error) {
|
|||||||
|
|
||||||
// Augment restore path to add extra files(meta) needed for restore as
|
// Augment restore path to add extra files(meta) needed for restore as
|
||||||
// well as do any other ordering operations on the paths
|
// well as do any other ordering operations on the paths
|
||||||
func AugmentRestorePaths(backupVersion int, paths []path.Path) ([]path.Path, error) {
|
//
|
||||||
colPaths := map[string]path.Path{}
|
// Only accepts StoragePath/RestorePath pairs where the RestorePath is
|
||||||
|
// at least as long as the StoragePath. If the RestorePath is longer than the
|
||||||
|
// StoragePath then the first few (closest to the root) directories will use
|
||||||
|
// default permissions during restore.
|
||||||
|
func AugmentRestorePaths(
|
||||||
|
backupVersion int,
|
||||||
|
paths []path.RestorePaths,
|
||||||
|
) ([]path.RestorePaths, error) {
|
||||||
|
// Keyed by each value's StoragePath.String() which corresponds to the RepoRef
|
||||||
|
// of the directory.
|
||||||
|
colPaths := map[string]path.RestorePaths{}
|
||||||
|
|
||||||
for _, p := range paths {
|
for _, p := range paths {
|
||||||
|
first := true
|
||||||
|
|
||||||
for {
|
for {
|
||||||
np, err := p.Dir()
|
sp, err := p.StoragePath.Dir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
drivePath, err := path.ToDrivePath(np)
|
drivePath, err := path.ToDrivePath(sp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -800,8 +812,31 @@ func AugmentRestorePaths(backupVersion int, paths []path.Path) ([]path.Path, err
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
colPaths[np.String()] = np
|
if len(p.RestorePath.Elements()) < len(sp.Elements()) {
|
||||||
p = np
|
return nil, clues.New("restorePath shorter than storagePath").
|
||||||
|
With("restore_path", p.RestorePath, "storage_path", sp)
|
||||||
|
}
|
||||||
|
|
||||||
|
rp := p.RestorePath
|
||||||
|
|
||||||
|
// Make sure the RestorePath always points to the level of the current
|
||||||
|
// collection. We need to track if it's the first iteration because the
|
||||||
|
// RestorePath starts out at the collection level to begin with.
|
||||||
|
if !first {
|
||||||
|
rp, err = p.RestorePath.Dir()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
paths := path.RestorePaths{
|
||||||
|
StoragePath: sp,
|
||||||
|
RestorePath: rp,
|
||||||
|
}
|
||||||
|
|
||||||
|
colPaths[sp.String()] = paths
|
||||||
|
p = paths
|
||||||
|
first = false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -814,32 +849,45 @@ func AugmentRestorePaths(backupVersion int, paths []path.Path) ([]path.Path, err
|
|||||||
// As of now look up metadata for parent directories from a
|
// As of now look up metadata for parent directories from a
|
||||||
// collection.
|
// collection.
|
||||||
for _, p := range colPaths {
|
for _, p := range colPaths {
|
||||||
el := p.Elements()
|
el := p.StoragePath.Elements()
|
||||||
|
|
||||||
if backupVersion >= version.OneDrive6NameInMeta {
|
if backupVersion >= version.OneDrive6NameInMeta {
|
||||||
mPath, err := p.Append(".dirmeta", true)
|
mPath, err := p.StoragePath.AppendItem(".dirmeta")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
paths = append(paths, mPath)
|
paths = append(
|
||||||
|
paths,
|
||||||
|
path.RestorePaths{StoragePath: mPath, RestorePath: p.RestorePath})
|
||||||
} else if backupVersion >= version.OneDrive4DirIncludesPermissions {
|
} else if backupVersion >= version.OneDrive4DirIncludesPermissions {
|
||||||
mPath, err := p.Append(el[len(el)-1]+".dirmeta", true)
|
mPath, err := p.StoragePath.AppendItem(el.Last() + ".dirmeta")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
paths = append(paths, mPath)
|
paths = append(
|
||||||
|
paths,
|
||||||
|
path.RestorePaths{StoragePath: mPath, RestorePath: p.RestorePath})
|
||||||
} else if backupVersion >= version.OneDrive1DataAndMetaFiles {
|
} else if backupVersion >= version.OneDrive1DataAndMetaFiles {
|
||||||
pp, err := p.Dir()
|
pp, err := p.StoragePath.Dir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
mPath, err := pp.Append(el[len(el)-1]+".dirmeta", true)
|
|
||||||
|
mPath, err := pp.AppendItem(el.Last() + ".dirmeta")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
paths = append(paths, mPath)
|
|
||||||
|
prp, err := p.RestorePath.Dir()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
paths = append(
|
||||||
|
paths,
|
||||||
|
path.RestorePaths{StoragePath: mPath, RestorePath: prp})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -847,8 +895,11 @@ func AugmentRestorePaths(backupVersion int, paths []path.Path) ([]path.Path, err
|
|||||||
// files. This is only a necessity for OneDrive as we are storing
|
// files. This is only a necessity for OneDrive as we are storing
|
||||||
// metadata for files/folders in separate meta files and we the
|
// metadata for files/folders in separate meta files and we the
|
||||||
// data to be restored before we can restore the metadata.
|
// data to be restored before we can restore the metadata.
|
||||||
|
//
|
||||||
|
// This sorting assumes stuff in the same StoragePath directory end up in the
|
||||||
|
// same RestorePath collection.
|
||||||
sort.Slice(paths, func(i, j int) bool {
|
sort.Slice(paths, func(i, j int) bool {
|
||||||
return paths[i].String() < paths[j].String()
|
return paths[i].StoragePath.String() < paths[j].StoragePath.String()
|
||||||
})
|
})
|
||||||
|
|
||||||
return paths, nil
|
return paths, nil
|
||||||
|
|||||||
@ -172,20 +172,30 @@ func (suite *RestoreUnitSuite) TestAugmentRestorePaths() {
|
|||||||
|
|
||||||
base := "id/onedrive/user/files/drives/driveID/root:/"
|
base := "id/onedrive/user/files/drives/driveID/root:/"
|
||||||
|
|
||||||
inPaths := []path.Path{}
|
inPaths := []path.RestorePaths{}
|
||||||
for _, ps := range test.input {
|
for _, ps := range test.input {
|
||||||
p, err := path.FromDataLayerPath(base+ps, true)
|
p, err := path.FromDataLayerPath(base+ps, true)
|
||||||
require.NoError(t, err, "creating path", clues.ToCore(err))
|
require.NoError(t, err, "creating path", clues.ToCore(err))
|
||||||
|
|
||||||
inPaths = append(inPaths, p)
|
pd, err := p.Dir()
|
||||||
|
require.NoError(t, err, "creating collection path", clues.ToCore(err))
|
||||||
|
|
||||||
|
inPaths = append(
|
||||||
|
inPaths,
|
||||||
|
path.RestorePaths{StoragePath: p, RestorePath: pd})
|
||||||
}
|
}
|
||||||
|
|
||||||
outPaths := []path.Path{}
|
outPaths := []path.RestorePaths{}
|
||||||
for _, ps := range test.output {
|
for _, ps := range test.output {
|
||||||
p, err := path.FromDataLayerPath(base+ps, true)
|
p, err := path.FromDataLayerPath(base+ps, true)
|
||||||
require.NoError(t, err, "creating path", clues.ToCore(err))
|
require.NoError(t, err, "creating path", clues.ToCore(err))
|
||||||
|
|
||||||
outPaths = append(outPaths, p)
|
pd, err := p.Dir()
|
||||||
|
require.NoError(t, err, "creating collection path", clues.ToCore(err))
|
||||||
|
|
||||||
|
outPaths = append(
|
||||||
|
outPaths,
|
||||||
|
path.RestorePaths{StoragePath: p, RestorePath: pd})
|
||||||
}
|
}
|
||||||
|
|
||||||
actual, err := AugmentRestorePaths(test.version, inPaths)
|
actual, err := AugmentRestorePaths(test.version, inPaths)
|
||||||
@ -197,3 +207,111 @@ func (suite *RestoreUnitSuite) TestAugmentRestorePaths() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestAugmentRestorePaths_DifferentRestorePath tests that RestorePath
|
||||||
|
// substitution works properly. Since it's only possible for future backup
|
||||||
|
// versions to need restore path substitution (i.e. due to storing folders by
|
||||||
|
// ID instead of name) this is only tested against the most recent backup
|
||||||
|
// version at the moment.
|
||||||
|
func (suite *RestoreUnitSuite) TestAugmentRestorePaths_DifferentRestorePath() {
|
||||||
|
// Adding a simple test here so that we can be sure that this
|
||||||
|
// function gets updated whenever we add a new version.
|
||||||
|
require.LessOrEqual(suite.T(), version.Backup, version.All8MigrateUserPNToID, "unsupported backup version")
|
||||||
|
|
||||||
|
type pathPair struct {
|
||||||
|
storage string
|
||||||
|
restore string
|
||||||
|
}
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
version int
|
||||||
|
input []pathPair
|
||||||
|
output []pathPair
|
||||||
|
errCheck assert.ErrorAssertionFunc
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "nested folders",
|
||||||
|
version: version.Backup,
|
||||||
|
input: []pathPair{
|
||||||
|
{storage: "folder-id/file.txt.data", restore: "folder"},
|
||||||
|
{storage: "folder-id/folder2-id/file.txt.data", restore: "folder/folder2"},
|
||||||
|
},
|
||||||
|
output: []pathPair{
|
||||||
|
{storage: "folder-id/.dirmeta", restore: "folder"},
|
||||||
|
{storage: "folder-id/file.txt.data", restore: "folder"},
|
||||||
|
{storage: "folder-id/folder2-id/.dirmeta", restore: "folder/folder2"},
|
||||||
|
{storage: "folder-id/folder2-id/file.txt.data", restore: "folder/folder2"},
|
||||||
|
},
|
||||||
|
errCheck: assert.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "restore path longer one folder",
|
||||||
|
version: version.Backup,
|
||||||
|
input: []pathPair{
|
||||||
|
{storage: "folder-id/file.txt.data", restore: "corso_restore/folder"},
|
||||||
|
},
|
||||||
|
output: []pathPair{
|
||||||
|
{storage: "folder-id/.dirmeta", restore: "corso_restore/folder"},
|
||||||
|
{storage: "folder-id/file.txt.data", restore: "corso_restore/folder"},
|
||||||
|
},
|
||||||
|
errCheck: assert.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "restore path shorter one folder",
|
||||||
|
version: version.Backup,
|
||||||
|
input: []pathPair{
|
||||||
|
{storage: "folder-id/file.txt.data", restore: ""},
|
||||||
|
},
|
||||||
|
errCheck: assert.Error,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
_, flush := tester.NewContext()
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
base := "id/onedrive/user/files/drives/driveID/root:/"
|
||||||
|
|
||||||
|
inPaths := []path.RestorePaths{}
|
||||||
|
for _, ps := range test.input {
|
||||||
|
p, err := path.FromDataLayerPath(base+ps.storage, true)
|
||||||
|
require.NoError(t, err, "creating path", clues.ToCore(err))
|
||||||
|
|
||||||
|
r, err := path.FromDataLayerPath(base+ps.restore, false)
|
||||||
|
require.NoError(t, err, "creating path", clues.ToCore(err))
|
||||||
|
|
||||||
|
inPaths = append(
|
||||||
|
inPaths,
|
||||||
|
path.RestorePaths{StoragePath: p, RestorePath: r})
|
||||||
|
}
|
||||||
|
|
||||||
|
outPaths := []path.RestorePaths{}
|
||||||
|
for _, ps := range test.output {
|
||||||
|
p, err := path.FromDataLayerPath(base+ps.storage, true)
|
||||||
|
require.NoError(t, err, "creating path", clues.ToCore(err))
|
||||||
|
|
||||||
|
r, err := path.FromDataLayerPath(base+ps.restore, false)
|
||||||
|
require.NoError(t, err, "creating path", clues.ToCore(err))
|
||||||
|
|
||||||
|
outPaths = append(
|
||||||
|
outPaths,
|
||||||
|
path.RestorePaths{StoragePath: p, RestorePath: r})
|
||||||
|
}
|
||||||
|
|
||||||
|
actual, err := AugmentRestorePaths(test.version, inPaths)
|
||||||
|
test.errCheck(t, err, "augmenting paths", clues.ToCore(err))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ordering of paths matter here as we need dirmeta files
|
||||||
|
// to show up before file in dir
|
||||||
|
assert.Equal(t, outPaths, actual, "augmented paths")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -10,7 +10,6 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
|
||||||
"github.com/alcionai/corso/src/internal/connector/sharepoint"
|
"github.com/alcionai/corso/src/internal/connector/sharepoint"
|
||||||
"github.com/alcionai/corso/src/internal/connector/sharepoint/api"
|
"github.com/alcionai/corso/src/internal/connector/sharepoint/api"
|
||||||
spMock "github.com/alcionai/corso/src/internal/connector/sharepoint/mock"
|
spMock "github.com/alcionai/corso/src/internal/connector/sharepoint/mock"
|
||||||
@ -81,7 +80,7 @@ func (suite *SharePointPageSuite) TestRestoreSinglePage() {
|
|||||||
|
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
|
||||||
destName := "Corso_Restore_" + dttm.FormatNow(dttm.SafeForTesting)
|
destName := tester.DefaultTestRestoreDestination("").ContainerName
|
||||||
testName := "MockPage"
|
testName := "MockPage"
|
||||||
|
|
||||||
// Create Test Page
|
// Create Test Page
|
||||||
@ -98,8 +97,7 @@ func (suite *SharePointPageSuite) TestRestoreSinglePage() {
|
|||||||
suite.service,
|
suite.service,
|
||||||
pageData,
|
pageData,
|
||||||
suite.siteID,
|
suite.siteID,
|
||||||
destName,
|
destName)
|
||||||
)
|
|
||||||
|
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
require.NotNil(t, info)
|
require.NotNil(t, info)
|
||||||
|
|||||||
@ -12,7 +12,6 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/connector/sharepoint/api"
|
"github.com/alcionai/corso/src/internal/connector/sharepoint/api"
|
||||||
spMock "github.com/alcionai/corso/src/internal/connector/sharepoint/mock"
|
spMock "github.com/alcionai/corso/src/internal/connector/sharepoint/mock"
|
||||||
@ -193,7 +192,7 @@ func (suite *SharePointCollectionSuite) TestListCollection_Restore() {
|
|||||||
info: sharePointListInfo(listing, int64(len(byteArray))),
|
info: sharePointListInfo(listing, int64(len(byteArray))),
|
||||||
}
|
}
|
||||||
|
|
||||||
destName := "Corso_Restore_" + dttm.FormatNow(dttm.SafeForTesting)
|
destName := tester.DefaultTestRestoreDestination("").ContainerName
|
||||||
|
|
||||||
deets, err := restoreListItem(ctx, service, listData, suite.siteID, destName)
|
deets, err := restoreListItem(ctx, service, listData, suite.siteID, destName)
|
||||||
assert.NoError(t, err, clues.ToCore(err))
|
assert.NoError(t, err, clues.ToCore(err))
|
||||||
|
|||||||
@ -125,7 +125,7 @@ func RestoreCollections(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// restoreListItem utility function restores a List to the siteID.
|
// restoreListItem utility function restores a List to the siteID.
|
||||||
// The name is changed to to Corso_Restore_{timeStame}_name
|
// The name is changed to to {DestName}_{name}
|
||||||
// API Reference: https://learn.microsoft.com/en-us/graph/api/list-create?view=graph-rest-1.0&tabs=http
|
// API Reference: https://learn.microsoft.com/en-us/graph/api/list-create?view=graph-rest-1.0&tabs=http
|
||||||
// Restored List can be verified within the Site contents.
|
// Restored List can be verified within the Site contents.
|
||||||
func restoreListItem(
|
func restoreListItem(
|
||||||
@ -172,7 +172,11 @@ func restoreListItem(
|
|||||||
newList.SetItems(contents)
|
newList.SetItems(contents)
|
||||||
|
|
||||||
// Restore to List base to M365 back store
|
// Restore to List base to M365 back store
|
||||||
restoredList, err := service.Client().SitesById(siteID).Lists().Post(ctx, newList, nil)
|
restoredList, err := service.
|
||||||
|
Client().
|
||||||
|
SitesById(siteID).
|
||||||
|
Lists().
|
||||||
|
Post(ctx, newList, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dii, graph.Wrap(ctx, err, "restoring list")
|
return dii, graph.Wrap(ctx, err, "restoring list")
|
||||||
}
|
}
|
||||||
@ -247,7 +251,7 @@ func RestoreListCollection(
|
|||||||
|
|
||||||
metrics.Bytes += itemInfo.SharePoint.Size
|
metrics.Bytes += itemInfo.SharePoint.Size
|
||||||
|
|
||||||
itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
|
itemPath, err := dc.FullPath().AppendItem(itemData.UUID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
el.AddRecoverable(clues.Wrap(err, "appending item to full path").WithClues(ctx))
|
el.AddRecoverable(clues.Wrap(err, "appending item to full path").WithClues(ctx))
|
||||||
continue
|
continue
|
||||||
@ -335,7 +339,7 @@ func RestorePageCollection(
|
|||||||
|
|
||||||
metrics.Bytes += itemInfo.SharePoint.Size
|
metrics.Bytes += itemInfo.SharePoint.Size
|
||||||
|
|
||||||
itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
|
itemPath, err := dc.FullPath().AppendItem(itemData.UUID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
el.AddRecoverable(clues.Wrap(err, "appending item to full path").WithClues(ctx))
|
el.AddRecoverable(clues.Wrap(err, "appending item to full path").WithClues(ctx))
|
||||||
continue
|
continue
|
||||||
|
|||||||
@ -188,10 +188,12 @@ func tenantHash(tenID string) string {
|
|||||||
// metrics aggregation
|
// metrics aggregation
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
type m string
|
type metricsCategory string
|
||||||
|
|
||||||
// metrics collection bucket
|
// metrics collection bucket
|
||||||
const APICall m = "api_call"
|
const (
|
||||||
|
APICall metricsCategory = "api_call"
|
||||||
|
)
|
||||||
|
|
||||||
// configurations
|
// configurations
|
||||||
const (
|
const (
|
||||||
@ -256,13 +258,19 @@ func dumpMetrics(ctx context.Context, stop <-chan struct{}, sig *metrics.InmemSi
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Inc increments the given category by 1.
|
// Inc increments the given category by 1.
|
||||||
func Inc(cat m, keys ...string) {
|
func Inc(cat metricsCategory, keys ...string) {
|
||||||
cats := append([]string{string(cat)}, keys...)
|
cats := append([]string{string(cat)}, keys...)
|
||||||
metrics.IncrCounter(cats, 1)
|
metrics.IncrCounter(cats, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IncN increments the given category by N.
|
||||||
|
func IncN(n int, cat metricsCategory, keys ...string) {
|
||||||
|
cats := append([]string{string(cat)}, keys...)
|
||||||
|
metrics.IncrCounter(cats, float32(n))
|
||||||
|
}
|
||||||
|
|
||||||
// Since records the duration between the provided time and now, in millis.
|
// Since records the duration between the provided time and now, in millis.
|
||||||
func Since(start time.Time, cat m, keys ...string) {
|
func Since(start time.Time, cat metricsCategory, keys ...string) {
|
||||||
cats := append([]string{string(cat)}, keys...)
|
cats := append([]string{string(cat)}, keys...)
|
||||||
metrics.MeasureSince(cats, start)
|
metrics.MeasureSince(cats, start)
|
||||||
}
|
}
|
||||||
|
|||||||
112
src/internal/kopia/merge_collection.go
Normal file
112
src/internal/kopia/merge_collection.go
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
package kopia
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ data.RestoreCollection = &mergeCollection{}
|
||||||
|
|
||||||
|
type col struct {
|
||||||
|
storagePath string
|
||||||
|
data.RestoreCollection
|
||||||
|
}
|
||||||
|
|
||||||
|
type mergeCollection struct {
|
||||||
|
cols []col
|
||||||
|
// Technically don't need to track this but it can help detect errors.
|
||||||
|
fullPath path.Path
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *mergeCollection) addCollection(
|
||||||
|
storagePath string,
|
||||||
|
c data.RestoreCollection,
|
||||||
|
) error {
|
||||||
|
if c == nil {
|
||||||
|
return clues.New("adding nil collection").
|
||||||
|
With("current_path", mc.FullPath())
|
||||||
|
} else if mc.FullPath().String() != c.FullPath().String() {
|
||||||
|
return clues.New("attempting to merge collection with different path").
|
||||||
|
With("current_path", mc.FullPath(), "new_path", c.FullPath())
|
||||||
|
}
|
||||||
|
|
||||||
|
mc.cols = append(mc.cols, col{storagePath: storagePath, RestoreCollection: c})
|
||||||
|
|
||||||
|
// Keep a stable sorting of this merged collection set so we can say there's
|
||||||
|
// some deterministic behavior when Fetch is called. We don't expect to have
|
||||||
|
// to merge many collections.
|
||||||
|
slices.SortStableFunc(mc.cols, func(a, b col) bool {
|
||||||
|
return a.storagePath < b.storagePath
|
||||||
|
})
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc mergeCollection) FullPath() path.Path {
|
||||||
|
return mc.fullPath
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *mergeCollection) Items(
|
||||||
|
ctx context.Context,
|
||||||
|
errs *fault.Bus,
|
||||||
|
) <-chan data.Stream {
|
||||||
|
res := make(chan data.Stream)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer close(res)
|
||||||
|
|
||||||
|
logger.Ctx(ctx).Infow(
|
||||||
|
"getting items for merged collection",
|
||||||
|
"merged_collection_count", len(mc.cols))
|
||||||
|
|
||||||
|
for _, c := range mc.cols {
|
||||||
|
// Unfortunately doesn't seem to be a way right now to see if the
|
||||||
|
// iteration failed and we should be exiting early.
|
||||||
|
ictx := clues.Add(ctx, "merged_collection_storage_path", c.storagePath)
|
||||||
|
logger.Ctx(ictx).Debug("sending items from merged collection")
|
||||||
|
|
||||||
|
for item := range c.Items(ictx, errs) {
|
||||||
|
res <- item
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch goes through all the collections in this one and returns the first
|
||||||
|
// match found or the first error that is not data.ErrNotFound. If multiple
|
||||||
|
// collections have the requested item, the instance in the collection with the
|
||||||
|
// lexicographically smallest storage path is returned.
|
||||||
|
func (mc *mergeCollection) Fetch(
|
||||||
|
ctx context.Context,
|
||||||
|
name string,
|
||||||
|
) (data.Stream, error) {
|
||||||
|
logger.Ctx(ctx).Infow(
|
||||||
|
"fetching item in merged collection",
|
||||||
|
"merged_collection_count", len(mc.cols))
|
||||||
|
|
||||||
|
for _, c := range mc.cols {
|
||||||
|
ictx := clues.Add(ctx, "merged_collection_storage_path", c.storagePath)
|
||||||
|
|
||||||
|
logger.Ctx(ictx).Debug("looking for item in merged collection")
|
||||||
|
|
||||||
|
s, err := c.Fetch(ictx, name)
|
||||||
|
if err == nil {
|
||||||
|
return s, nil
|
||||||
|
} else if err != nil && !errors.Is(err, data.ErrNotFound) {
|
||||||
|
return nil, clues.Wrap(err, "fetching from merged collection").
|
||||||
|
WithClues(ictx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, clues.Wrap(data.ErrNotFound, "merged collection fetch")
|
||||||
|
}
|
||||||
297
src/internal/kopia/merge_collection_test.go
Normal file
297
src/internal/kopia/merge_collection_test.go
Normal file
@ -0,0 +1,297 @@
|
|||||||
|
package kopia
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
|
"github.com/kopia/kopia/fs"
|
||||||
|
"github.com/kopia/kopia/fs/virtualfs"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/exchange/mock"
|
||||||
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MergeCollectionUnitSuite struct {
|
||||||
|
tester.Suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMergeCollectionUnitSuite(t *testing.T) {
|
||||||
|
suite.Run(t, &MergeCollectionUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *MergeCollectionUnitSuite) TestReturnsPath() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
pth, err := path.Build(
|
||||||
|
"a-tenant",
|
||||||
|
"a-user",
|
||||||
|
path.ExchangeService,
|
||||||
|
path.EmailCategory,
|
||||||
|
false,
|
||||||
|
"some", "path", "for", "data")
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
c := mergeCollection{
|
||||||
|
fullPath: pth,
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, pth, c.FullPath())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *MergeCollectionUnitSuite) TestItems() {
|
||||||
|
ctx, flush := tester.NewContext()
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
t := suite.T()
|
||||||
|
storagePaths := []string{
|
||||||
|
"tenant-id/exchange/user-id/mail/some/folder/path1",
|
||||||
|
"tenant-id/exchange/user-id/mail/some/folder/path2",
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedItemNames := []string{"1", "2"}
|
||||||
|
|
||||||
|
pth, err := path.Build(
|
||||||
|
"a-tenant",
|
||||||
|
"a-user",
|
||||||
|
path.ExchangeService,
|
||||||
|
path.EmailCategory,
|
||||||
|
false,
|
||||||
|
"some", "path", "for", "data")
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
c1 := mock.NewCollection(pth, nil, 1)
|
||||||
|
c1.Names[0] = expectedItemNames[0]
|
||||||
|
|
||||||
|
c2 := mock.NewCollection(pth, nil, 1)
|
||||||
|
c2.Names[0] = expectedItemNames[1]
|
||||||
|
|
||||||
|
// Not testing fetch here so safe to use this wrapper.
|
||||||
|
cols := []data.RestoreCollection{
|
||||||
|
data.NotFoundRestoreCollection{Collection: c1},
|
||||||
|
data.NotFoundRestoreCollection{Collection: c2},
|
||||||
|
}
|
||||||
|
|
||||||
|
dc := &mergeCollection{fullPath: pth}
|
||||||
|
|
||||||
|
for i, c := range cols {
|
||||||
|
err := dc.addCollection(storagePaths[i], c)
|
||||||
|
require.NoError(t, err, "adding collection", clues.ToCore(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
gotItemNames := []string{}
|
||||||
|
|
||||||
|
for item := range dc.Items(ctx, fault.New(true)) {
|
||||||
|
gotItemNames = append(gotItemNames, item.UUID())
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.ElementsMatch(t, expectedItemNames, gotItemNames)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *MergeCollectionUnitSuite) TestAddCollection_DifferentPathFails() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
pth1, err := path.Build(
|
||||||
|
"a-tenant",
|
||||||
|
"a-user",
|
||||||
|
path.ExchangeService,
|
||||||
|
path.EmailCategory,
|
||||||
|
false,
|
||||||
|
"some", "path", "for", "data")
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
pth2, err := path.Build(
|
||||||
|
"a-tenant",
|
||||||
|
"a-user",
|
||||||
|
path.ExchangeService,
|
||||||
|
path.EmailCategory,
|
||||||
|
false,
|
||||||
|
"some", "path", "for", "data2")
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
dc := mergeCollection{fullPath: pth1}
|
||||||
|
|
||||||
|
err = dc.addCollection("some/path", &kopiaDataCollection{path: pth2})
|
||||||
|
assert.Error(t, err, clues.ToCore(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *MergeCollectionUnitSuite) TestFetch() {
|
||||||
|
var (
|
||||||
|
fileData1 = []byte("abcdefghijklmnopqrstuvwxyz")
|
||||||
|
fileData2 = []byte("zyxwvutsrqponmlkjihgfedcba")
|
||||||
|
fileData3 = []byte("foo bar baz")
|
||||||
|
|
||||||
|
fileName1 = "file1"
|
||||||
|
fileName2 = "file2"
|
||||||
|
fileLookupErrName = "errLookup"
|
||||||
|
fileOpenErrName = "errOpen"
|
||||||
|
|
||||||
|
colPaths = []string{
|
||||||
|
"tenant-id/exchange/user-id/mail/some/data/directory1",
|
||||||
|
"tenant-id/exchange/user-id/mail/some/data/directory2",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
pth, err := path.Build(
|
||||||
|
"a-tenant",
|
||||||
|
"a-user",
|
||||||
|
path.ExchangeService,
|
||||||
|
path.EmailCategory,
|
||||||
|
false,
|
||||||
|
"some", "path", "for", "data")
|
||||||
|
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||||
|
|
||||||
|
// Needs to be a function so the readers get refreshed each time.
|
||||||
|
layouts := []func() fs.Directory{
|
||||||
|
// Has the following;
|
||||||
|
// - file1: data[0]
|
||||||
|
// - errOpen: (error opening file)
|
||||||
|
func() fs.Directory {
|
||||||
|
return virtualfs.NewStaticDirectory(encodeAsPath(colPaths[0]), []fs.Entry{
|
||||||
|
&mockFile{
|
||||||
|
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||||
|
encodeAsPath(fileName1),
|
||||||
|
nil,
|
||||||
|
),
|
||||||
|
r: newBackupStreamReader(
|
||||||
|
serializationVersion,
|
||||||
|
io.NopCloser(bytes.NewReader(fileData1)),
|
||||||
|
),
|
||||||
|
size: int64(len(fileData1) + versionSize),
|
||||||
|
},
|
||||||
|
&mockFile{
|
||||||
|
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||||
|
encodeAsPath(fileOpenErrName),
|
||||||
|
nil,
|
||||||
|
),
|
||||||
|
openErr: assert.AnError,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
},
|
||||||
|
|
||||||
|
// Has the following;
|
||||||
|
// - file1: data[1]
|
||||||
|
// - file2: data[0]
|
||||||
|
// - errOpen: data[2]
|
||||||
|
func() fs.Directory {
|
||||||
|
return virtualfs.NewStaticDirectory(encodeAsPath(colPaths[1]), []fs.Entry{
|
||||||
|
&mockFile{
|
||||||
|
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||||
|
encodeAsPath(fileName1),
|
||||||
|
nil,
|
||||||
|
),
|
||||||
|
r: newBackupStreamReader(
|
||||||
|
serializationVersion,
|
||||||
|
io.NopCloser(bytes.NewReader(fileData2)),
|
||||||
|
),
|
||||||
|
size: int64(len(fileData2) + versionSize),
|
||||||
|
},
|
||||||
|
&mockFile{
|
||||||
|
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||||
|
encodeAsPath(fileName2),
|
||||||
|
nil,
|
||||||
|
),
|
||||||
|
r: newBackupStreamReader(
|
||||||
|
serializationVersion,
|
||||||
|
io.NopCloser(bytes.NewReader(fileData1)),
|
||||||
|
),
|
||||||
|
size: int64(len(fileData1) + versionSize),
|
||||||
|
},
|
||||||
|
&mockFile{
|
||||||
|
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||||
|
encodeAsPath(fileOpenErrName),
|
||||||
|
nil,
|
||||||
|
),
|
||||||
|
r: newBackupStreamReader(
|
||||||
|
serializationVersion,
|
||||||
|
io.NopCloser(bytes.NewReader(fileData3)),
|
||||||
|
),
|
||||||
|
size: int64(len(fileData3) + versionSize),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
fileName string
|
||||||
|
expectError assert.ErrorAssertionFunc
|
||||||
|
expectData []byte
|
||||||
|
notFoundErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Duplicate File, first collection",
|
||||||
|
fileName: fileName1,
|
||||||
|
expectError: assert.NoError,
|
||||||
|
expectData: fileData1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Distinct File, second collection",
|
||||||
|
fileName: fileName2,
|
||||||
|
expectError: assert.NoError,
|
||||||
|
expectData: fileData1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Error opening file",
|
||||||
|
fileName: fileOpenErrName,
|
||||||
|
expectError: assert.Error,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "File not found",
|
||||||
|
fileName: fileLookupErrName,
|
||||||
|
expectError: assert.Error,
|
||||||
|
notFoundErr: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
ctx, flush := tester.NewContext()
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
t := suite.T()
|
||||||
|
c := &i64counter{}
|
||||||
|
|
||||||
|
dc := mergeCollection{fullPath: pth}
|
||||||
|
|
||||||
|
for i, layout := range layouts {
|
||||||
|
col := &kopiaDataCollection{
|
||||||
|
path: pth,
|
||||||
|
dir: layout(),
|
||||||
|
counter: c,
|
||||||
|
expectedVersion: serializationVersion,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := dc.addCollection(colPaths[i], col)
|
||||||
|
require.NoError(t, err, "adding collection", clues.ToCore(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
s, err := dc.Fetch(ctx, test.fileName)
|
||||||
|
test.expectError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
if test.notFoundErr {
|
||||||
|
assert.ErrorIs(t, err, data.ErrNotFound, clues.ToCore(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fileData, err := io.ReadAll(s.ToReader())
|
||||||
|
require.NoError(t, err, "reading file data", clues.ToCore(err))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, test.expectData, fileData)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -31,6 +31,10 @@ func s3BlobStorage(ctx context.Context, s storage.Storage) (blob.Storage, error)
|
|||||||
Prefix: cfg.Prefix,
|
Prefix: cfg.Prefix,
|
||||||
DoNotUseTLS: cfg.DoNotUseTLS,
|
DoNotUseTLS: cfg.DoNotUseTLS,
|
||||||
DoNotVerifyTLS: cfg.DoNotVerifyTLS,
|
DoNotVerifyTLS: cfg.DoNotVerifyTLS,
|
||||||
|
Tags: s.SessionTags,
|
||||||
|
SessionName: s.SessionName,
|
||||||
|
RoleARN: s.Role,
|
||||||
|
RoleDuration: s.SessionDuration,
|
||||||
}
|
}
|
||||||
|
|
||||||
store, err := s3.New(ctx, &opts, false)
|
store, err := s3.New(ctx, &opts, false)
|
||||||
|
|||||||
@ -347,7 +347,7 @@ func collectionEntries(
|
|||||||
seen[encodedName] = struct{}{}
|
seen[encodedName] = struct{}{}
|
||||||
|
|
||||||
// For now assuming that item IDs don't need escaping.
|
// For now assuming that item IDs don't need escaping.
|
||||||
itemPath, err := streamedEnts.FullPath().Append(e.UUID(), true)
|
itemPath, err := streamedEnts.FullPath().AppendItem(e.UUID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = clues.Wrap(err, "getting full item path")
|
err = clues.Wrap(err, "getting full item path")
|
||||||
progress.errs.AddRecoverable(err)
|
progress.errs.AddRecoverable(err)
|
||||||
@ -464,7 +464,7 @@ func streamBaseEntries(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// For now assuming that item IDs don't need escaping.
|
// For now assuming that item IDs don't need escaping.
|
||||||
itemPath, err := curPath.Append(entName, true)
|
itemPath, err := curPath.AppendItem(entName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return clues.Wrap(err, "getting full item path for base entry")
|
return clues.Wrap(err, "getting full item path for base entry")
|
||||||
}
|
}
|
||||||
@ -473,7 +473,7 @@ func streamBaseEntries(
|
|||||||
// backup details. If the item moved and we had only the new path, we'd be
|
// backup details. If the item moved and we had only the new path, we'd be
|
||||||
// unable to find it in the old backup details because we wouldn't know what
|
// unable to find it in the old backup details because we wouldn't know what
|
||||||
// to look for.
|
// to look for.
|
||||||
prevItemPath, err := prevPath.Append(entName, true)
|
prevItemPath, err := prevPath.AppendItem(entName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return clues.Wrap(err, "getting previous full item path for base entry")
|
return clues.Wrap(err, "getting previous full item path for base entry")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -365,6 +365,11 @@ type ByteCounter interface {
|
|||||||
Count(numBytes int64)
|
Count(numBytes int64)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type restoreCollection struct {
|
||||||
|
restorePath path.Path
|
||||||
|
storageDirs map[string]*dirAndItems
|
||||||
|
}
|
||||||
|
|
||||||
type dirAndItems struct {
|
type dirAndItems struct {
|
||||||
dir path.Path
|
dir path.Path
|
||||||
items []string
|
items []string
|
||||||
@ -380,7 +385,7 @@ func loadDirsAndItems(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
snapshotRoot fs.Entry,
|
snapshotRoot fs.Entry,
|
||||||
bcounter ByteCounter,
|
bcounter ByteCounter,
|
||||||
toLoad map[string]*dirAndItems,
|
toLoad map[string]*restoreCollection,
|
||||||
bus *fault.Bus,
|
bus *fault.Bus,
|
||||||
) ([]data.RestoreCollection, error) {
|
) ([]data.RestoreCollection, error) {
|
||||||
var (
|
var (
|
||||||
@ -389,50 +394,67 @@ func loadDirsAndItems(
|
|||||||
loadCount = 0
|
loadCount = 0
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, dirItems := range toLoad {
|
for _, col := range toLoad {
|
||||||
if el.Failure() != nil {
|
if el.Failure() != nil {
|
||||||
return nil, el.Failure()
|
return nil, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
ictx := clues.Add(ctx, "directory_path", dirItems.dir)
|
ictx := clues.Add(ctx, "restore_path", col.restorePath)
|
||||||
|
|
||||||
dir, err := getDir(ictx, dirItems.dir, snapshotRoot)
|
mergeCol := &mergeCollection{fullPath: col.restorePath}
|
||||||
if err != nil {
|
res = append(res, mergeCol)
|
||||||
el.AddRecoverable(clues.Wrap(err, "loading directory").
|
|
||||||
WithClues(ictx).
|
|
||||||
Label(fault.LabelForceNoBackupCreation))
|
|
||||||
|
|
||||||
continue
|
for _, dirItems := range col.storageDirs {
|
||||||
}
|
|
||||||
|
|
||||||
dc := &kopiaDataCollection{
|
|
||||||
path: dirItems.dir,
|
|
||||||
dir: dir,
|
|
||||||
counter: bcounter,
|
|
||||||
expectedVersion: serializationVersion,
|
|
||||||
}
|
|
||||||
|
|
||||||
res = append(res, dc)
|
|
||||||
|
|
||||||
for _, item := range dirItems.items {
|
|
||||||
if el.Failure() != nil {
|
if el.Failure() != nil {
|
||||||
return nil, el.Failure()
|
return nil, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
err := dc.addStream(ictx, item)
|
ictx = clues.Add(ictx, "storage_directory_path", dirItems.dir)
|
||||||
|
|
||||||
|
dir, err := getDir(ictx, dirItems.dir, snapshotRoot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
el.AddRecoverable(clues.Wrap(err, "loading item").
|
el.AddRecoverable(clues.Wrap(err, "loading storage directory").
|
||||||
WithClues(ictx).
|
WithClues(ictx).
|
||||||
Label(fault.LabelForceNoBackupCreation))
|
Label(fault.LabelForceNoBackupCreation))
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
loadCount++
|
dc := &kopiaDataCollection{
|
||||||
if loadCount%1000 == 0 {
|
path: col.restorePath,
|
||||||
logger.Ctx(ctx).Infow(
|
dir: dir,
|
||||||
"loading items from kopia",
|
counter: bcounter,
|
||||||
"loaded_items", loadCount)
|
expectedVersion: serializationVersion,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := mergeCol.addCollection(dirItems.dir.String(), dc); err != nil {
|
||||||
|
el.AddRecoverable(clues.Wrap(err, "adding collection to merge collection").
|
||||||
|
WithClues(ctx).
|
||||||
|
Label(fault.LabelForceNoBackupCreation))
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, item := range dirItems.items {
|
||||||
|
if el.Failure() != nil {
|
||||||
|
return nil, el.Failure()
|
||||||
|
}
|
||||||
|
|
||||||
|
err := dc.addStream(ictx, item)
|
||||||
|
if err != nil {
|
||||||
|
el.AddRecoverable(clues.Wrap(err, "loading item").
|
||||||
|
WithClues(ictx).
|
||||||
|
Label(fault.LabelForceNoBackupCreation))
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
loadCount++
|
||||||
|
if loadCount%1000 == 0 {
|
||||||
|
logger.Ctx(ctx).Infow(
|
||||||
|
"loading items from kopia",
|
||||||
|
"loaded_items", loadCount)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -454,7 +476,7 @@ func loadDirsAndItems(
|
|||||||
func (w Wrapper) ProduceRestoreCollections(
|
func (w Wrapper) ProduceRestoreCollections(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
snapshotID string,
|
snapshotID string,
|
||||||
paths []path.Path,
|
paths []path.RestorePaths,
|
||||||
bcounter ByteCounter,
|
bcounter ByteCounter,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.RestoreCollection, error) {
|
) ([]data.RestoreCollection, error) {
|
||||||
@ -474,36 +496,53 @@ func (w Wrapper) ProduceRestoreCollections(
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
loadCount int
|
loadCount int
|
||||||
// Directory path -> set of items to load from the directory.
|
// RestorePath -> []StoragePath directory -> set of items to load from the
|
||||||
dirsToItems = map[string]*dirAndItems{}
|
// directory.
|
||||||
|
dirsToItems = map[string]*restoreCollection{}
|
||||||
el = errs.Local()
|
el = errs.Local()
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, itemPath := range paths {
|
for _, itemPaths := range paths {
|
||||||
if el.Failure() != nil {
|
if el.Failure() != nil {
|
||||||
return nil, el.Failure()
|
return nil, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Group things by directory so we can load all items from a single
|
// Group things by RestorePath and then StoragePath so we can load multiple
|
||||||
// directory instance lower down.
|
// items from a single directory instance lower down.
|
||||||
ictx := clues.Add(ctx, "item_path", itemPath.String())
|
ictx := clues.Add(
|
||||||
|
ctx,
|
||||||
|
"item_path", itemPaths.StoragePath.String(),
|
||||||
|
"restore_path", itemPaths.RestorePath.String())
|
||||||
|
|
||||||
parentPath, err := itemPath.Dir()
|
parentStoragePath, err := itemPaths.StoragePath.Dir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
el.AddRecoverable(clues.Wrap(err, "making directory collection").
|
el.AddRecoverable(clues.Wrap(err, "getting storage directory path").
|
||||||
WithClues(ictx).
|
WithClues(ictx).
|
||||||
Label(fault.LabelForceNoBackupCreation))
|
Label(fault.LabelForceNoBackupCreation))
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
di := dirsToItems[parentPath.ShortRef()]
|
// Find the location this item is restored to.
|
||||||
if di == nil {
|
rc := dirsToItems[itemPaths.RestorePath.ShortRef()]
|
||||||
dirsToItems[parentPath.ShortRef()] = &dirAndItems{dir: parentPath}
|
if rc == nil {
|
||||||
di = dirsToItems[parentPath.ShortRef()]
|
dirsToItems[itemPaths.RestorePath.ShortRef()] = &restoreCollection{
|
||||||
|
restorePath: itemPaths.RestorePath,
|
||||||
|
storageDirs: map[string]*dirAndItems{},
|
||||||
|
}
|
||||||
|
rc = dirsToItems[itemPaths.RestorePath.ShortRef()]
|
||||||
}
|
}
|
||||||
|
|
||||||
di.items = append(di.items, itemPath.Item())
|
// Find the collection this item is sourced from.
|
||||||
|
di := rc.storageDirs[parentStoragePath.ShortRef()]
|
||||||
|
if di == nil {
|
||||||
|
rc.storageDirs[parentStoragePath.ShortRef()] = &dirAndItems{
|
||||||
|
dir: parentStoragePath,
|
||||||
|
}
|
||||||
|
di = rc.storageDirs[parentStoragePath.ShortRef()]
|
||||||
|
}
|
||||||
|
|
||||||
|
di.items = append(di.items, itemPaths.StoragePath.Item())
|
||||||
|
|
||||||
loadCount++
|
loadCount++
|
||||||
if loadCount%1000 == 0 {
|
if loadCount%1000 == 0 {
|
||||||
|
|||||||
@ -59,14 +59,12 @@ var (
|
|||||||
testFileData6 = testFileData
|
testFileData6 = testFileData
|
||||||
)
|
)
|
||||||
|
|
||||||
//revive:disable:context-as-argument
|
|
||||||
func testForFiles(
|
func testForFiles(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
ctx context.Context,
|
ctx context.Context, //revive:disable-line:context-as-argument
|
||||||
expected map[string][]byte,
|
expected map[string][]byte,
|
||||||
collections []data.RestoreCollection,
|
collections []data.RestoreCollection,
|
||||||
) {
|
) {
|
||||||
//revive:enable:context-as-argument
|
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
count := 0
|
count := 0
|
||||||
@ -75,7 +73,7 @@ func testForFiles(
|
|||||||
for s := range c.Items(ctx, fault.New(true)) {
|
for s := range c.Items(ctx, fault.New(true)) {
|
||||||
count++
|
count++
|
||||||
|
|
||||||
fullPath, err := c.FullPath().Append(s.UUID(), true)
|
fullPath, err := c.FullPath().AppendItem(s.UUID())
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
expected, ok := expected[fullPath.String()]
|
expected, ok := expected[fullPath.String()]
|
||||||
@ -107,6 +105,19 @@ func checkSnapshotTags(
|
|||||||
assert.Equal(t, expectedTags, man.Tags)
|
assert.Equal(t, expectedTags, man.Tags)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func toRestorePaths(t *testing.T, paths ...path.Path) []path.RestorePaths {
|
||||||
|
res := make([]path.RestorePaths, 0, len(paths))
|
||||||
|
|
||||||
|
for _, p := range paths {
|
||||||
|
dir, err := p.Dir()
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
res = append(res, path.RestorePaths{StoragePath: p, RestorePath: dir})
|
||||||
|
}
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
// ---------------
|
// ---------------
|
||||||
// unit tests
|
// unit tests
|
||||||
// ---------------
|
// ---------------
|
||||||
@ -678,10 +689,10 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
|
|||||||
dc1 := exchMock.NewCollection(suite.storePath1, suite.locPath1, 1)
|
dc1 := exchMock.NewCollection(suite.storePath1, suite.locPath1, 1)
|
||||||
dc2 := exchMock.NewCollection(suite.storePath2, suite.locPath2, 1)
|
dc2 := exchMock.NewCollection(suite.storePath2, suite.locPath2, 1)
|
||||||
|
|
||||||
fp1, err := suite.storePath1.Append(dc1.Names[0], true)
|
fp1, err := suite.storePath1.AppendItem(dc1.Names[0])
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
fp2, err := suite.storePath2.Append(dc2.Names[0], true)
|
fp2, err := suite.storePath2.AppendItem(dc2.Names[0])
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
stats, _, _, err := w.ConsumeBackupCollections(
|
stats, _, _, err := w.ConsumeBackupCollections(
|
||||||
@ -705,10 +716,7 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
|
|||||||
result, err := w.ProduceRestoreCollections(
|
result, err := w.ProduceRestoreCollections(
|
||||||
ctx,
|
ctx,
|
||||||
string(stats.SnapshotID),
|
string(stats.SnapshotID),
|
||||||
[]path.Path{
|
toRestorePaths(t, fp1, fp2),
|
||||||
fp1,
|
|
||||||
fp2,
|
|
||||||
},
|
|
||||||
nil,
|
nil,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
@ -830,7 +838,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
|||||||
// 5 file and 2 folder entries.
|
// 5 file and 2 folder entries.
|
||||||
assert.Len(t, deets.Details().Entries, 5+2)
|
assert.Len(t, deets.Details().Entries, 5+2)
|
||||||
|
|
||||||
failedPath, err := suite.storePath2.Append(testFileName4, true)
|
failedPath, err := suite.storePath2.AppendItem(testFileName4)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
ic := i64counter{}
|
ic := i64counter{}
|
||||||
@ -838,7 +846,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
|||||||
_, err = suite.w.ProduceRestoreCollections(
|
_, err = suite.w.ProduceRestoreCollections(
|
||||||
suite.ctx,
|
suite.ctx,
|
||||||
string(stats.SnapshotID),
|
string(stats.SnapshotID),
|
||||||
[]path.Path{failedPath},
|
toRestorePaths(t, failedPath),
|
||||||
&ic,
|
&ic,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
// Files that had an error shouldn't make a dir entry in kopia. If they do we
|
// Files that had an error shouldn't make a dir entry in kopia. If they do we
|
||||||
@ -979,7 +987,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupSuite() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, item := range filesInfo {
|
for _, item := range filesInfo {
|
||||||
pth, err := item.parentPath.Append(item.name, true)
|
pth, err := item.parentPath.AppendItem(item.name)
|
||||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||||
|
|
||||||
mapKey := item.parentPath.String()
|
mapKey := item.parentPath.String()
|
||||||
@ -1219,9 +1227,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
|||||||
_, err = suite.w.ProduceRestoreCollections(
|
_, err = suite.w.ProduceRestoreCollections(
|
||||||
suite.ctx,
|
suite.ctx,
|
||||||
string(stats.SnapshotID),
|
string(stats.SnapshotID),
|
||||||
[]path.Path{
|
toRestorePaths(t, suite.files[suite.testPath1.String()][0].itemPath),
|
||||||
suite.files[suite.testPath1.String()][0].itemPath,
|
|
||||||
},
|
|
||||||
&ic,
|
&ic,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
test.restoreCheck(t, err, clues.ToCore(err))
|
test.restoreCheck(t, err, clues.ToCore(err))
|
||||||
@ -1322,7 +1328,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections() {
|
|||||||
result, err := suite.w.ProduceRestoreCollections(
|
result, err := suite.w.ProduceRestoreCollections(
|
||||||
suite.ctx,
|
suite.ctx,
|
||||||
string(suite.snapshotID),
|
string(suite.snapshotID),
|
||||||
test.inputPaths,
|
toRestorePaths(t, test.inputPaths...),
|
||||||
&ic,
|
&ic,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
test.expectedErr(t, err, clues.ToCore(err))
|
test.expectedErr(t, err, clues.ToCore(err))
|
||||||
@ -1338,14 +1344,201 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestProduceRestoreCollections_PathChanges tests that having different
|
||||||
|
// Restore and Storage paths works properly. Having the same Restore and Storage
|
||||||
|
// paths is tested by TestProduceRestoreCollections.
|
||||||
|
func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections_PathChanges() {
|
||||||
|
rp1, err := path.Build(
|
||||||
|
testTenant,
|
||||||
|
testUser,
|
||||||
|
path.ExchangeService,
|
||||||
|
path.EmailCategory,
|
||||||
|
false,
|
||||||
|
"corso_restore", "Inbox")
|
||||||
|
require.NoError(suite.T(), err)
|
||||||
|
|
||||||
|
rp2, err := path.Build(
|
||||||
|
testTenant,
|
||||||
|
testUser,
|
||||||
|
path.ExchangeService,
|
||||||
|
path.EmailCategory,
|
||||||
|
false,
|
||||||
|
"corso_restore", "Archive")
|
||||||
|
require.NoError(suite.T(), err)
|
||||||
|
|
||||||
|
// Expected items is generated during the test by looking up paths in the
|
||||||
|
// suite's map of files.
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
inputPaths []path.RestorePaths
|
||||||
|
expectedCollections int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "SingleItem",
|
||||||
|
inputPaths: []path.RestorePaths{
|
||||||
|
{
|
||||||
|
StoragePath: suite.files[suite.testPath1.String()][0].itemPath,
|
||||||
|
RestorePath: rp1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedCollections: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "MultipleItemsSameCollection",
|
||||||
|
inputPaths: []path.RestorePaths{
|
||||||
|
{
|
||||||
|
StoragePath: suite.files[suite.testPath1.String()][0].itemPath,
|
||||||
|
RestorePath: rp1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
StoragePath: suite.files[suite.testPath1.String()][1].itemPath,
|
||||||
|
RestorePath: rp1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedCollections: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "MultipleItemsDifferentCollections",
|
||||||
|
inputPaths: []path.RestorePaths{
|
||||||
|
{
|
||||||
|
StoragePath: suite.files[suite.testPath1.String()][0].itemPath,
|
||||||
|
RestorePath: rp1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
StoragePath: suite.files[suite.testPath2.String()][0].itemPath,
|
||||||
|
RestorePath: rp2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedCollections: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Multiple Items From Different Collections To Same Collection",
|
||||||
|
inputPaths: []path.RestorePaths{
|
||||||
|
{
|
||||||
|
StoragePath: suite.files[suite.testPath1.String()][0].itemPath,
|
||||||
|
RestorePath: rp1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
StoragePath: suite.files[suite.testPath2.String()][0].itemPath,
|
||||||
|
RestorePath: rp1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedCollections: 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
ctx, flush := tester.NewContext()
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
t := suite.T()
|
||||||
|
expected := make(map[string][]byte, len(test.inputPaths))
|
||||||
|
|
||||||
|
for _, pth := range test.inputPaths {
|
||||||
|
item, ok := suite.filesByPath[pth.StoragePath.String()]
|
||||||
|
require.True(t, ok, "getting expected file data")
|
||||||
|
|
||||||
|
itemPath, err := pth.RestorePath.AppendItem(pth.StoragePath.Item())
|
||||||
|
require.NoError(t, err, "getting expected item path")
|
||||||
|
|
||||||
|
expected[itemPath.String()] = item.data
|
||||||
|
}
|
||||||
|
|
||||||
|
ic := i64counter{}
|
||||||
|
|
||||||
|
result, err := suite.w.ProduceRestoreCollections(
|
||||||
|
suite.ctx,
|
||||||
|
string(suite.snapshotID),
|
||||||
|
test.inputPaths,
|
||||||
|
&ic,
|
||||||
|
fault.New(true))
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
assert.Len(t, result, test.expectedCollections)
|
||||||
|
assert.Less(t, int64(0), ic.i)
|
||||||
|
testForFiles(t, ctx, expected, result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestProduceRestoreCollections_Fetch tests that the Fetch function still works
|
||||||
|
// properly even with different Restore and Storage paths and items from
|
||||||
|
// different kopia directories.
|
||||||
|
func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections_Fetch() {
|
||||||
|
ctx, flush := tester.NewContext()
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
rp1, err := path.Build(
|
||||||
|
testTenant,
|
||||||
|
testUser,
|
||||||
|
path.ExchangeService,
|
||||||
|
path.EmailCategory,
|
||||||
|
false,
|
||||||
|
"corso_restore", "Inbox")
|
||||||
|
require.NoError(suite.T(), err)
|
||||||
|
|
||||||
|
inputPaths := []path.RestorePaths{
|
||||||
|
{
|
||||||
|
StoragePath: suite.files[suite.testPath1.String()][0].itemPath,
|
||||||
|
RestorePath: rp1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
StoragePath: suite.files[suite.testPath2.String()][0].itemPath,
|
||||||
|
RestorePath: rp1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Really only interested in getting the collection so we can call fetch on
|
||||||
|
// it.
|
||||||
|
ic := i64counter{}
|
||||||
|
|
||||||
|
result, err := suite.w.ProduceRestoreCollections(
|
||||||
|
suite.ctx,
|
||||||
|
string(suite.snapshotID),
|
||||||
|
inputPaths,
|
||||||
|
&ic,
|
||||||
|
fault.New(true))
|
||||||
|
require.NoError(t, err, "getting collection", clues.ToCore(err))
|
||||||
|
require.Len(t, result, 1)
|
||||||
|
|
||||||
|
// Item from first kopia directory.
|
||||||
|
f := suite.files[suite.testPath1.String()][0]
|
||||||
|
|
||||||
|
item, err := result[0].Fetch(ctx, f.itemPath.Item())
|
||||||
|
require.NoError(t, err, "fetching file", clues.ToCore(err))
|
||||||
|
|
||||||
|
r := item.ToReader()
|
||||||
|
|
||||||
|
buf, err := io.ReadAll(r)
|
||||||
|
require.NoError(t, err, "reading file data", clues.ToCore(err))
|
||||||
|
|
||||||
|
assert.Equal(t, f.data, buf)
|
||||||
|
|
||||||
|
// Item from second kopia directory.
|
||||||
|
f = suite.files[suite.testPath2.String()][0]
|
||||||
|
|
||||||
|
item, err = result[0].Fetch(ctx, f.itemPath.Item())
|
||||||
|
require.NoError(t, err, "fetching file", clues.ToCore(err))
|
||||||
|
|
||||||
|
r = item.ToReader()
|
||||||
|
|
||||||
|
buf, err = io.ReadAll(r)
|
||||||
|
require.NoError(t, err, "reading file data", clues.ToCore(err))
|
||||||
|
|
||||||
|
assert.Equal(t, f.data, buf)
|
||||||
|
}
|
||||||
|
|
||||||
func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections_Errors() {
|
func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections_Errors() {
|
||||||
itemPath, err := suite.testPath1.Append(testFileName, true)
|
itemPath, err := suite.testPath1.AppendItem(testFileName)
|
||||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||||
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
snapshotID string
|
snapshotID string
|
||||||
paths []path.Path
|
paths []path.RestorePaths
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
"NilPaths",
|
"NilPaths",
|
||||||
@ -1355,12 +1548,12 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections_Erro
|
|||||||
{
|
{
|
||||||
"EmptyPaths",
|
"EmptyPaths",
|
||||||
string(suite.snapshotID),
|
string(suite.snapshotID),
|
||||||
[]path.Path{},
|
[]path.RestorePaths{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"NoSnapshot",
|
"NoSnapshot",
|
||||||
"foo",
|
"foo",
|
||||||
[]path.Path{itemPath},
|
toRestorePaths(suite.T(), itemPath),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1393,7 +1586,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestDeleteSnapshot() {
|
|||||||
c, err := suite.w.ProduceRestoreCollections(
|
c, err := suite.w.ProduceRestoreCollections(
|
||||||
suite.ctx,
|
suite.ctx,
|
||||||
string(suite.snapshotID),
|
string(suite.snapshotID),
|
||||||
[]path.Path{itemPath},
|
toRestorePaths(t, itemPath),
|
||||||
&ic,
|
&ic,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
assert.Error(t, err, "snapshot should be deleted", clues.ToCore(err))
|
assert.Error(t, err, "snapshot should be deleted", clues.ToCore(err))
|
||||||
|
|||||||
@ -512,11 +512,16 @@ func consumeBackupCollections(
|
|||||||
"kopia_ignored_errors", kopiaStats.IgnoredErrorCount)
|
"kopia_ignored_errors", kopiaStats.IgnoredErrorCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
if kopiaStats.ErrorCount > 0 ||
|
ctx = clues.Add(
|
||||||
(kopiaStats.IgnoredErrorCount > kopiaStats.ExpectedIgnoredErrorCount) {
|
ctx,
|
||||||
err = clues.New("building kopia snapshot").With(
|
"kopia_errors", kopiaStats.ErrorCount,
|
||||||
"kopia_errors", kopiaStats.ErrorCount,
|
"kopia_ignored_errors", kopiaStats.IgnoredErrorCount,
|
||||||
"kopia_ignored_errors", kopiaStats.IgnoredErrorCount)
|
"kopia_expected_ignored_errors", kopiaStats.ExpectedIgnoredErrorCount)
|
||||||
|
|
||||||
|
if kopiaStats.ErrorCount > 0 {
|
||||||
|
err = clues.New("building kopia snapshot").WithClues(ctx)
|
||||||
|
} else if kopiaStats.IgnoredErrorCount > kopiaStats.ExpectedIgnoredErrorCount {
|
||||||
|
err = clues.New("downloading items for persistence").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
return kopiaStats, deets, itemsSourcedFromBase, err
|
return kopiaStats, deets, itemsSourcedFromBase, err
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -16,6 +16,7 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
||||||
"github.com/alcionai/corso/src/internal/connector/mock"
|
"github.com/alcionai/corso/src/internal/connector/mock"
|
||||||
|
odConsts "github.com/alcionai/corso/src/internal/connector/onedrive/consts"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
evmock "github.com/alcionai/corso/src/internal/events/mock"
|
evmock "github.com/alcionai/corso/src/internal/events/mock"
|
||||||
"github.com/alcionai/corso/src/internal/kopia"
|
"github.com/alcionai/corso/src/internal/kopia"
|
||||||
@ -46,16 +47,28 @@ type mockRestoreProducer struct {
|
|||||||
onRestore restoreFunc
|
onRestore restoreFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
type restoreFunc func(id string, ps []path.Path) ([]data.RestoreCollection, error)
|
type restoreFunc func(
|
||||||
|
id string,
|
||||||
|
ps []path.RestorePaths,
|
||||||
|
) ([]data.RestoreCollection, error)
|
||||||
|
|
||||||
func (mr *mockRestoreProducer) buildRestoreFunc(
|
func (mr *mockRestoreProducer) buildRestoreFunc(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
oid string,
|
oid string,
|
||||||
ops []path.Path,
|
ops []path.Path,
|
||||||
) {
|
) {
|
||||||
mr.onRestore = func(id string, ps []path.Path) ([]data.RestoreCollection, error) {
|
mr.onRestore = func(
|
||||||
|
id string,
|
||||||
|
ps []path.RestorePaths,
|
||||||
|
) ([]data.RestoreCollection, error) {
|
||||||
|
gotPaths := make([]path.Path, 0, len(ps))
|
||||||
|
|
||||||
|
for _, rp := range ps {
|
||||||
|
gotPaths = append(gotPaths, rp.StoragePath)
|
||||||
|
}
|
||||||
|
|
||||||
assert.Equal(t, oid, id, "manifest id")
|
assert.Equal(t, oid, id, "manifest id")
|
||||||
checkPaths(t, ops, ps)
|
checkPaths(t, ops, gotPaths)
|
||||||
|
|
||||||
return mr.colls, mr.err
|
return mr.colls, mr.err
|
||||||
}
|
}
|
||||||
@ -64,11 +77,13 @@ func (mr *mockRestoreProducer) buildRestoreFunc(
|
|||||||
func (mr *mockRestoreProducer) ProduceRestoreCollections(
|
func (mr *mockRestoreProducer) ProduceRestoreCollections(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
snapshotID string,
|
snapshotID string,
|
||||||
paths []path.Path,
|
paths []path.RestorePaths,
|
||||||
bc kopia.ByteCounter,
|
bc kopia.ByteCounter,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.RestoreCollection, error) {
|
) ([]data.RestoreCollection, error) {
|
||||||
mr.gotPaths = append(mr.gotPaths, paths...)
|
for _, ps := range paths {
|
||||||
|
mr.gotPaths = append(mr.gotPaths, ps.StoragePath)
|
||||||
|
}
|
||||||
|
|
||||||
if mr.onRestore != nil {
|
if mr.onRestore != nil {
|
||||||
return mr.onRestore(snapshotID, paths)
|
return mr.onRestore(snapshotID, paths)
|
||||||
@ -643,15 +658,15 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
|||||||
path.OneDriveService.String(),
|
path.OneDriveService.String(),
|
||||||
ro,
|
ro,
|
||||||
path.FilesCategory.String(),
|
path.FilesCategory.String(),
|
||||||
"drives",
|
odConsts.DrivesPathDir,
|
||||||
"drive-id",
|
"drive-id",
|
||||||
"root:",
|
odConsts.RootPathDir,
|
||||||
"work",
|
"work",
|
||||||
"item1",
|
"item1",
|
||||||
},
|
},
|
||||||
true,
|
true,
|
||||||
)
|
)
|
||||||
locationPath1 = path.Builder{}.Append("root:", "work-display-name")
|
locationPath1 = path.Builder{}.Append(odConsts.RootPathDir, "work-display-name")
|
||||||
itemPath2 = makePath(
|
itemPath2 = makePath(
|
||||||
suite.T(),
|
suite.T(),
|
||||||
[]string{
|
[]string{
|
||||||
@ -659,15 +674,15 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
|||||||
path.OneDriveService.String(),
|
path.OneDriveService.String(),
|
||||||
ro,
|
ro,
|
||||||
path.FilesCategory.String(),
|
path.FilesCategory.String(),
|
||||||
"drives",
|
odConsts.DrivesPathDir,
|
||||||
"drive-id",
|
"drive-id",
|
||||||
"root:",
|
odConsts.RootPathDir,
|
||||||
"personal",
|
"personal",
|
||||||
"item2",
|
"item2",
|
||||||
},
|
},
|
||||||
true,
|
true,
|
||||||
)
|
)
|
||||||
locationPath2 = path.Builder{}.Append("root:", "personal-display-name")
|
locationPath2 = path.Builder{}.Append(odConsts.RootPathDir, "personal-display-name")
|
||||||
itemPath3 = makePath(
|
itemPath3 = makePath(
|
||||||
suite.T(),
|
suite.T(),
|
||||||
[]string{
|
[]string{
|
||||||
|
|||||||
@ -47,7 +47,7 @@ type (
|
|||||||
ProduceRestoreCollections(
|
ProduceRestoreCollections(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
snapshotID string,
|
snapshotID string,
|
||||||
paths []path.Path,
|
paths []path.RestorePaths,
|
||||||
bc kopia.ByteCounter,
|
bc kopia.ByteCounter,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.RestoreCollection, error)
|
) ([]data.RestoreCollection, error)
|
||||||
|
|||||||
@ -308,7 +308,7 @@ func collectMetadata(
|
|||||||
tenantID string,
|
tenantID string,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.RestoreCollection, error) {
|
) ([]data.RestoreCollection, error) {
|
||||||
paths := []path.Path{}
|
paths := []path.RestorePaths{}
|
||||||
|
|
||||||
for _, fn := range fileNames {
|
for _, fn := range fileNames {
|
||||||
for _, reason := range man.Reasons {
|
for _, reason := range man.Reasons {
|
||||||
@ -326,7 +326,14 @@ func collectMetadata(
|
|||||||
With("metadata_file", fn, "category", reason.Category)
|
With("metadata_file", fn, "category", reason.Category)
|
||||||
}
|
}
|
||||||
|
|
||||||
paths = append(paths, p)
|
dir, err := p.Dir()
|
||||||
|
if err != nil {
|
||||||
|
return nil, clues.
|
||||||
|
Wrap(err, "building metadata collection path").
|
||||||
|
With("metadata_file", fn, "category", reason.Category)
|
||||||
|
}
|
||||||
|
|
||||||
|
paths = append(paths, path.RestorePaths{StoragePath: p, RestorePath: dir})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -140,7 +140,7 @@ func (suite *OperationsManifestsUnitSuite) TestCollectMetadata() {
|
|||||||
ps := make([]path.Path, 0, len(files))
|
ps := make([]path.Path, 0, len(files))
|
||||||
|
|
||||||
for _, f := range files {
|
for _, f := range files {
|
||||||
p, err := emailPath.Append(f, true)
|
p, err := emailPath.AppendItem(f)
|
||||||
assert.NoError(t, err, clues.ToCore(err))
|
assert.NoError(t, err, clues.ToCore(err))
|
||||||
ps = append(ps, p)
|
ps = append(ps, p)
|
||||||
}
|
}
|
||||||
@ -163,7 +163,7 @@ func (suite *OperationsManifestsUnitSuite) TestCollectMetadata() {
|
|||||||
ps := make([]path.Path, 0, len(files))
|
ps := make([]path.Path, 0, len(files))
|
||||||
|
|
||||||
for _, f := range files {
|
for _, f := range files {
|
||||||
p, err := emailPath.Append(f, true)
|
p, err := emailPath.AppendItem(f)
|
||||||
assert.NoError(t, err, clues.ToCore(err))
|
assert.NoError(t, err, clues.ToCore(err))
|
||||||
ps = append(ps, p)
|
ps = append(ps, p)
|
||||||
}
|
}
|
||||||
@ -191,10 +191,10 @@ func (suite *OperationsManifestsUnitSuite) TestCollectMetadata() {
|
|||||||
ps := make([]path.Path, 0, len(files))
|
ps := make([]path.Path, 0, len(files))
|
||||||
|
|
||||||
for _, f := range files {
|
for _, f := range files {
|
||||||
p, err := emailPath.Append(f, true)
|
p, err := emailPath.AppendItem(f)
|
||||||
assert.NoError(t, err, clues.ToCore(err))
|
assert.NoError(t, err, clues.ToCore(err))
|
||||||
ps = append(ps, p)
|
ps = append(ps, p)
|
||||||
p, err = contactPath.Append(f, true)
|
p, err = contactPath.AppendItem(f)
|
||||||
assert.NoError(t, err, clues.ToCore(err))
|
assert.NoError(t, err, clues.ToCore(err))
|
||||||
ps = append(ps, p)
|
ps = append(ps, p)
|
||||||
}
|
}
|
||||||
@ -222,10 +222,10 @@ func (suite *OperationsManifestsUnitSuite) TestCollectMetadata() {
|
|||||||
ps := make([]path.Path, 0, len(files))
|
ps := make([]path.Path, 0, len(files))
|
||||||
|
|
||||||
for _, f := range files {
|
for _, f := range files {
|
||||||
p, err := emailPath.Append(f, true)
|
p, err := emailPath.AppendItem(f)
|
||||||
assert.NoError(t, err, clues.ToCore(err))
|
assert.NoError(t, err, clues.ToCore(err))
|
||||||
ps = append(ps, p)
|
ps = append(ps, p)
|
||||||
p, err = contactPath.Append(f, true)
|
p, err = contactPath.AppendItem(f)
|
||||||
assert.NoError(t, err, clues.ToCore(err))
|
assert.NoError(t, err, clues.ToCore(err))
|
||||||
ps = append(ps, p)
|
ps = append(ps, p)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -0,0 +1,181 @@
|
|||||||
|
package pathtransformer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/version"
|
||||||
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
|
)
|
||||||
|
|
||||||
|
func locationRef(
|
||||||
|
ent *details.Entry,
|
||||||
|
repoRef path.Path,
|
||||||
|
backupVersion int,
|
||||||
|
) (*path.Builder, error) {
|
||||||
|
loc := ent.LocationRef
|
||||||
|
|
||||||
|
// At this backup version all data types should populate LocationRef.
|
||||||
|
if len(loc) > 0 || backupVersion >= version.OneDrive7LocationRef {
|
||||||
|
return path.Builder{}.SplitUnescapeAppend(loc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We could get an empty LocationRef either because it wasn't populated or it
|
||||||
|
// was in the root of the data type.
|
||||||
|
elems := repoRef.Folders()
|
||||||
|
|
||||||
|
if ent.OneDrive != nil || ent.SharePoint != nil {
|
||||||
|
dp, err := path.ToDrivePath(repoRef)
|
||||||
|
if err != nil {
|
||||||
|
return nil, clues.Wrap(err, "fallback for LocationRef")
|
||||||
|
}
|
||||||
|
|
||||||
|
elems = append([]string{dp.Root}, dp.Folders...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return path.Builder{}.Append(elems...), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func basicLocationPath(repoRef path.Path, locRef *path.Builder) (path.Path, error) {
|
||||||
|
if len(locRef.Elements()) == 0 {
|
||||||
|
res, err := path.ServicePrefix(
|
||||||
|
repoRef.Tenant(),
|
||||||
|
repoRef.ResourceOwner(),
|
||||||
|
repoRef.Service(),
|
||||||
|
repoRef.Category())
|
||||||
|
if err != nil {
|
||||||
|
return nil, clues.Wrap(err, "getting prefix for empty location")
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return locRef.ToDataLayerPath(
|
||||||
|
repoRef.Tenant(),
|
||||||
|
repoRef.ResourceOwner(),
|
||||||
|
repoRef.Service(),
|
||||||
|
repoRef.Category(),
|
||||||
|
false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func drivePathMerge(
|
||||||
|
ent *details.Entry,
|
||||||
|
repoRef path.Path,
|
||||||
|
locRef *path.Builder,
|
||||||
|
) (path.Path, error) {
|
||||||
|
// Try getting the drive ID from the item. Not all details versions had it
|
||||||
|
// though.
|
||||||
|
var driveID string
|
||||||
|
|
||||||
|
if ent.SharePoint != nil {
|
||||||
|
driveID = ent.SharePoint.DriveID
|
||||||
|
} else if ent.OneDrive != nil {
|
||||||
|
driveID = ent.OneDrive.DriveID
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback to trying to get from RepoRef.
|
||||||
|
if len(driveID) == 0 {
|
||||||
|
odp, err := path.ToDrivePath(repoRef)
|
||||||
|
if err != nil {
|
||||||
|
return nil, clues.Wrap(err, "fallback getting DriveID")
|
||||||
|
}
|
||||||
|
|
||||||
|
driveID = odp.DriveID
|
||||||
|
}
|
||||||
|
|
||||||
|
return basicLocationPath(
|
||||||
|
repoRef,
|
||||||
|
path.BuildDriveLocation(driveID, locRef.Elements()...))
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeRestorePathsForEntry(
|
||||||
|
ctx context.Context,
|
||||||
|
backupVersion int,
|
||||||
|
ent *details.Entry,
|
||||||
|
) (path.RestorePaths, error) {
|
||||||
|
res := path.RestorePaths{}
|
||||||
|
|
||||||
|
repoRef, err := path.FromDataLayerPath(ent.RepoRef, true)
|
||||||
|
if err != nil {
|
||||||
|
err = clues.Wrap(err, "parsing RepoRef").
|
||||||
|
WithClues(ctx).
|
||||||
|
With("repo_ref", clues.Hide(ent.RepoRef), "location_ref", clues.Hide(ent.LocationRef))
|
||||||
|
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
res.StoragePath = repoRef
|
||||||
|
ctx = clues.Add(ctx, "repo_ref", repoRef)
|
||||||
|
|
||||||
|
// Get the LocationRef so we can munge it onto our path.
|
||||||
|
locRef, err := locationRef(ent, repoRef, backupVersion)
|
||||||
|
if err != nil {
|
||||||
|
err = clues.Wrap(err, "parsing LocationRef after reduction").
|
||||||
|
WithClues(ctx).
|
||||||
|
With("location_ref", clues.Hide(ent.LocationRef))
|
||||||
|
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx = clues.Add(ctx, "location_ref", locRef)
|
||||||
|
|
||||||
|
// Now figure out what type of ent it is and munge the path accordingly.
|
||||||
|
// Eventually we're going to need munging for:
|
||||||
|
// * Exchange Calendars (different folder handling)
|
||||||
|
// * Exchange Email/Contacts
|
||||||
|
// * OneDrive/SharePoint (needs drive information)
|
||||||
|
switch true {
|
||||||
|
case ent.Exchange != nil:
|
||||||
|
// TODO(ashmrtn): Eventually make Events have it's own function to handle
|
||||||
|
// setting the restore destination properly.
|
||||||
|
res.RestorePath, err = basicLocationPath(repoRef, locRef)
|
||||||
|
case ent.OneDrive != nil ||
|
||||||
|
(ent.SharePoint != nil && ent.SharePoint.ItemType == details.SharePointLibrary) ||
|
||||||
|
(ent.SharePoint != nil && ent.SharePoint.ItemType == details.OneDriveItem):
|
||||||
|
res.RestorePath, err = drivePathMerge(ent, repoRef, locRef)
|
||||||
|
default:
|
||||||
|
return res, clues.New("unknown entry type").WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return res, clues.Wrap(err, "generating RestorePath").WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPaths takes a set of filtered details entries and returns a set of
|
||||||
|
// RestorePaths for the entries.
|
||||||
|
func GetPaths(
|
||||||
|
ctx context.Context,
|
||||||
|
backupVersion int,
|
||||||
|
items []*details.Entry,
|
||||||
|
errs *fault.Bus,
|
||||||
|
) ([]path.RestorePaths, error) {
|
||||||
|
var (
|
||||||
|
paths = make([]path.RestorePaths, len(items))
|
||||||
|
el = errs.Local()
|
||||||
|
)
|
||||||
|
|
||||||
|
for i, ent := range items {
|
||||||
|
if el.Failure() != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
restorePaths, err := makeRestorePathsForEntry(ctx, backupVersion, ent)
|
||||||
|
if err != nil {
|
||||||
|
el.AddRecoverable(clues.Wrap(err, "getting restore paths"))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
paths[i] = restorePaths
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Ctx(ctx).Infof("found %d details entries to restore", len(paths))
|
||||||
|
|
||||||
|
return paths, el.Failure()
|
||||||
|
}
|
||||||
@ -0,0 +1,340 @@
|
|||||||
|
package pathtransformer_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/operations/pathtransformer"
|
||||||
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
|
"github.com/alcionai/corso/src/internal/version"
|
||||||
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
|
"github.com/alcionai/corso/src/pkg/backup/details/testdata"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RestorePathTransformerUnitSuite struct {
|
||||||
|
tester.Suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRestorePathTransformerUnitSuite(t *testing.T) {
|
||||||
|
suite.Run(t, &RestorePathTransformerUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *RestorePathTransformerUnitSuite) TestGetPaths() {
|
||||||
|
type expectPaths struct {
|
||||||
|
storage string
|
||||||
|
restore string
|
||||||
|
isRestorePrefix bool
|
||||||
|
}
|
||||||
|
|
||||||
|
toRestore := func(
|
||||||
|
repoRef path.Path,
|
||||||
|
unescapedFolders ...string,
|
||||||
|
) string {
|
||||||
|
return path.Builder{}.
|
||||||
|
Append(
|
||||||
|
repoRef.Tenant(),
|
||||||
|
repoRef.Service().String(),
|
||||||
|
repoRef.ResourceOwner(),
|
||||||
|
repoRef.Category().String()).
|
||||||
|
Append(unescapedFolders...).
|
||||||
|
String()
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
driveID = "some-drive-id"
|
||||||
|
extraItemName = "some-item"
|
||||||
|
SharePointRootItemPath = testdata.SharePointRootPath.MustAppend(extraItemName, true)
|
||||||
|
)
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
backupVersion int
|
||||||
|
input []*details.Entry
|
||||||
|
expectErr assert.ErrorAssertionFunc
|
||||||
|
expected []expectPaths
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "SharePoint List Errors",
|
||||||
|
// No version bump for the change so we always have to check for this.
|
||||||
|
backupVersion: version.All8MigrateUserPNToID,
|
||||||
|
input: []*details.Entry{
|
||||||
|
{
|
||||||
|
RepoRef: SharePointRootItemPath.RR.String(),
|
||||||
|
LocationRef: SharePointRootItemPath.Loc.String(),
|
||||||
|
ItemInfo: details.ItemInfo{
|
||||||
|
SharePoint: &details.SharePointInfo{
|
||||||
|
ItemType: details.SharePointList,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectErr: assert.Error,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SharePoint Page Errors",
|
||||||
|
// No version bump for the change so we always have to check for this.
|
||||||
|
backupVersion: version.All8MigrateUserPNToID,
|
||||||
|
input: []*details.Entry{
|
||||||
|
{
|
||||||
|
RepoRef: SharePointRootItemPath.RR.String(),
|
||||||
|
LocationRef: SharePointRootItemPath.Loc.String(),
|
||||||
|
ItemInfo: details.ItemInfo{
|
||||||
|
SharePoint: &details.SharePointInfo{
|
||||||
|
ItemType: details.SharePointPage,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectErr: assert.Error,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SharePoint old format, item in root",
|
||||||
|
// No version bump for the change so we always have to check for this.
|
||||||
|
backupVersion: version.All8MigrateUserPNToID,
|
||||||
|
input: []*details.Entry{
|
||||||
|
{
|
||||||
|
RepoRef: SharePointRootItemPath.RR.String(),
|
||||||
|
LocationRef: SharePointRootItemPath.Loc.String(),
|
||||||
|
ItemInfo: details.ItemInfo{
|
||||||
|
SharePoint: &details.SharePointInfo{
|
||||||
|
ItemType: details.OneDriveItem,
|
||||||
|
DriveID: driveID,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectErr: assert.NoError,
|
||||||
|
expected: []expectPaths{
|
||||||
|
{
|
||||||
|
storage: SharePointRootItemPath.RR.String(),
|
||||||
|
restore: toRestore(
|
||||||
|
SharePointRootItemPath.RR,
|
||||||
|
append(
|
||||||
|
[]string{"drives", driveID},
|
||||||
|
SharePointRootItemPath.Loc.Elements()...)...),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SharePoint, no LocationRef, no DriveID, item in root",
|
||||||
|
backupVersion: version.OneDrive6NameInMeta,
|
||||||
|
input: []*details.Entry{
|
||||||
|
{
|
||||||
|
RepoRef: SharePointRootItemPath.RR.String(),
|
||||||
|
ItemInfo: details.ItemInfo{
|
||||||
|
SharePoint: &details.SharePointInfo{
|
||||||
|
ItemType: details.SharePointLibrary,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectErr: assert.NoError,
|
||||||
|
expected: []expectPaths{
|
||||||
|
{
|
||||||
|
storage: SharePointRootItemPath.RR.String(),
|
||||||
|
restore: toRestore(
|
||||||
|
SharePointRootItemPath.RR,
|
||||||
|
append(
|
||||||
|
[]string{"drives"},
|
||||||
|
// testdata path has '.d' on the drives folder we need to remove.
|
||||||
|
SharePointRootItemPath.RR.Folders()[1:]...)...),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "OneDrive, nested item",
|
||||||
|
backupVersion: version.All8MigrateUserPNToID,
|
||||||
|
input: []*details.Entry{
|
||||||
|
{
|
||||||
|
RepoRef: testdata.OneDriveItemPath2.RR.String(),
|
||||||
|
LocationRef: testdata.OneDriveItemPath2.Loc.String(),
|
||||||
|
ItemInfo: details.ItemInfo{
|
||||||
|
OneDrive: &details.OneDriveInfo{
|
||||||
|
ItemType: details.OneDriveItem,
|
||||||
|
DriveID: driveID,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectErr: assert.NoError,
|
||||||
|
expected: []expectPaths{
|
||||||
|
{
|
||||||
|
storage: testdata.OneDriveItemPath2.RR.String(),
|
||||||
|
restore: toRestore(
|
||||||
|
testdata.OneDriveItemPath2.RR,
|
||||||
|
append(
|
||||||
|
[]string{"drives", driveID},
|
||||||
|
testdata.OneDriveItemPath2.Loc.Elements()...)...),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Exchange Email, extra / in path",
|
||||||
|
backupVersion: version.All8MigrateUserPNToID,
|
||||||
|
input: []*details.Entry{
|
||||||
|
{
|
||||||
|
RepoRef: testdata.ExchangeEmailItemPath3.RR.String(),
|
||||||
|
LocationRef: testdata.ExchangeEmailItemPath3.Loc.String(),
|
||||||
|
ItemInfo: details.ItemInfo{
|
||||||
|
Exchange: &details.ExchangeInfo{
|
||||||
|
ItemType: details.ExchangeMail,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectErr: assert.NoError,
|
||||||
|
expected: []expectPaths{
|
||||||
|
{
|
||||||
|
storage: testdata.ExchangeEmailItemPath3.RR.String(),
|
||||||
|
restore: toRestore(
|
||||||
|
testdata.ExchangeEmailItemPath3.RR,
|
||||||
|
testdata.ExchangeEmailItemPath3.Loc.Elements()...),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Exchange Email, no LocationRef, extra / in path",
|
||||||
|
backupVersion: version.OneDrive7LocationRef,
|
||||||
|
input: []*details.Entry{
|
||||||
|
{
|
||||||
|
RepoRef: testdata.ExchangeEmailItemPath3.RR.String(),
|
||||||
|
LocationRef: testdata.ExchangeEmailItemPath3.Loc.String(),
|
||||||
|
ItemInfo: details.ItemInfo{
|
||||||
|
Exchange: &details.ExchangeInfo{
|
||||||
|
ItemType: details.ExchangeMail,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectErr: assert.NoError,
|
||||||
|
expected: []expectPaths{
|
||||||
|
{
|
||||||
|
storage: testdata.ExchangeEmailItemPath3.RR.String(),
|
||||||
|
restore: toRestore(
|
||||||
|
testdata.ExchangeEmailItemPath3.RR,
|
||||||
|
testdata.ExchangeEmailItemPath3.Loc.Elements()...),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Exchange Contact",
|
||||||
|
backupVersion: version.All8MigrateUserPNToID,
|
||||||
|
input: []*details.Entry{
|
||||||
|
{
|
||||||
|
RepoRef: testdata.ExchangeContactsItemPath1.RR.String(),
|
||||||
|
LocationRef: testdata.ExchangeContactsItemPath1.Loc.String(),
|
||||||
|
ItemInfo: details.ItemInfo{
|
||||||
|
Exchange: &details.ExchangeInfo{
|
||||||
|
ItemType: details.ExchangeContact,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectErr: assert.NoError,
|
||||||
|
expected: []expectPaths{
|
||||||
|
{
|
||||||
|
storage: testdata.ExchangeContactsItemPath1.RR.String(),
|
||||||
|
restore: toRestore(
|
||||||
|
testdata.ExchangeContactsItemPath1.RR,
|
||||||
|
testdata.ExchangeContactsItemPath1.Loc.Elements()...),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Exchange Contact, root dir",
|
||||||
|
backupVersion: version.All8MigrateUserPNToID,
|
||||||
|
input: []*details.Entry{
|
||||||
|
{
|
||||||
|
RepoRef: testdata.ExchangeContactsItemPath1.RR.String(),
|
||||||
|
ItemInfo: details.ItemInfo{
|
||||||
|
Exchange: &details.ExchangeInfo{
|
||||||
|
ItemType: details.ExchangeContact,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectErr: assert.NoError,
|
||||||
|
expected: []expectPaths{
|
||||||
|
{
|
||||||
|
storage: testdata.ExchangeContactsItemPath1.RR.String(),
|
||||||
|
restore: toRestore(testdata.ExchangeContactsItemPath1.RR, "tmp"),
|
||||||
|
isRestorePrefix: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Exchange Event",
|
||||||
|
backupVersion: version.All8MigrateUserPNToID,
|
||||||
|
input: []*details.Entry{
|
||||||
|
{
|
||||||
|
RepoRef: testdata.ExchangeEmailItemPath3.RR.String(),
|
||||||
|
LocationRef: testdata.ExchangeEmailItemPath3.Loc.String(),
|
||||||
|
ItemInfo: details.ItemInfo{
|
||||||
|
Exchange: &details.ExchangeInfo{
|
||||||
|
ItemType: details.ExchangeMail,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectErr: assert.NoError,
|
||||||
|
expected: []expectPaths{
|
||||||
|
{
|
||||||
|
storage: testdata.ExchangeEmailItemPath3.RR.String(),
|
||||||
|
restore: toRestore(
|
||||||
|
testdata.ExchangeEmailItemPath3.RR,
|
||||||
|
testdata.ExchangeEmailItemPath3.Loc.Elements()...),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
ctx, flush := tester.NewContext()
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
paths, err := pathtransformer.GetPaths(
|
||||||
|
ctx,
|
||||||
|
test.backupVersion,
|
||||||
|
test.input,
|
||||||
|
fault.New(true))
|
||||||
|
test.expectErr(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := make([]path.RestorePaths, 0, len(test.expected))
|
||||||
|
|
||||||
|
for _, e := range test.expected {
|
||||||
|
tmp := path.RestorePaths{}
|
||||||
|
p, err := path.FromDataLayerPath(e.storage, true)
|
||||||
|
require.NoError(t, err, "parsing expected storage path", clues.ToCore(err))
|
||||||
|
|
||||||
|
tmp.StoragePath = p
|
||||||
|
|
||||||
|
p, err = path.FromDataLayerPath(e.restore, false)
|
||||||
|
require.NoError(t, err, "parsing expected restore path", clues.ToCore(err))
|
||||||
|
|
||||||
|
if e.isRestorePrefix {
|
||||||
|
p, err = p.Dir()
|
||||||
|
require.NoError(t, err, "getting service prefix", clues.ToCore(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
tmp.RestorePath = p
|
||||||
|
|
||||||
|
expected = append(expected, tmp)
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.ElementsMatch(t, expected, paths)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -18,6 +18,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/model"
|
"github.com/alcionai/corso/src/internal/model"
|
||||||
"github.com/alcionai/corso/src/internal/observe"
|
"github.com/alcionai/corso/src/internal/observe"
|
||||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||||
|
"github.com/alcionai/corso/src/internal/operations/pathtransformer"
|
||||||
"github.com/alcionai/corso/src/internal/stats"
|
"github.com/alcionai/corso/src/internal/stats"
|
||||||
"github.com/alcionai/corso/src/internal/streamstore"
|
"github.com/alcionai/corso/src/internal/streamstore"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
@ -349,36 +350,15 @@ func formatDetailsForRestoration(
|
|||||||
sel selectors.Selector,
|
sel selectors.Selector,
|
||||||
deets *details.Details,
|
deets *details.Details,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]path.Path, error) {
|
) ([]path.RestorePaths, error) {
|
||||||
fds, err := sel.Reduce(ctx, deets, errs)
|
fds, err := sel.Reduce(ctx, deets, errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
paths, err := pathtransformer.GetPaths(ctx, backupVersion, fds.Items(), errs)
|
||||||
fdsPaths = fds.Paths()
|
if err != nil {
|
||||||
paths = make([]path.Path, len(fdsPaths))
|
return nil, clues.Wrap(err, "getting restore paths")
|
||||||
shortRefs = make([]string, len(fdsPaths))
|
|
||||||
el = errs.Local()
|
|
||||||
)
|
|
||||||
|
|
||||||
for i := range fdsPaths {
|
|
||||||
if el.Failure() != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err := path.FromDataLayerPath(fdsPaths[i], true)
|
|
||||||
if err != nil {
|
|
||||||
el.AddRecoverable(clues.
|
|
||||||
Wrap(err, "parsing details path after reduction").
|
|
||||||
WithMap(clues.In(ctx)).
|
|
||||||
With("path", fdsPaths[i]))
|
|
||||||
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
paths[i] = p
|
|
||||||
shortRefs[i] = p.ShortRef()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if sel.Service == selectors.ServiceOneDrive {
|
if sel.Service == selectors.ServiceOneDrive {
|
||||||
@ -388,7 +368,5 @@ func formatDetailsForRestoration(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Ctx(ctx).With("short_refs", shortRefs).Infof("found %d details entries to restore", len(shortRefs))
|
return paths, nil
|
||||||
|
|
||||||
return paths, el.Failure()
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -55,7 +55,7 @@ func (suite *RestoreOpSuite) TestRestoreOperation_PersistResults() {
|
|||||||
gc = &mock.GraphConnector{}
|
gc = &mock.GraphConnector{}
|
||||||
acct = account.Account{}
|
acct = account.Account{}
|
||||||
now = time.Now()
|
now = time.Now()
|
||||||
dest = tester.DefaultTestRestoreDestination()
|
dest = tester.DefaultTestRestoreDestination("")
|
||||||
)
|
)
|
||||||
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
@ -220,7 +220,7 @@ func (suite *RestoreOpIntegrationSuite) TestNewRestoreOperation() {
|
|||||||
sw = &store.Wrapper{}
|
sw = &store.Wrapper{}
|
||||||
gc = &mock.GraphConnector{}
|
gc = &mock.GraphConnector{}
|
||||||
acct = tester.NewM365Account(suite.T())
|
acct = tester.NewM365Account(suite.T())
|
||||||
dest = tester.DefaultTestRestoreDestination()
|
dest = tester.DefaultTestRestoreDestination("")
|
||||||
opts = control.Defaults()
|
opts = control.Defaults()
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -392,7 +392,7 @@ func (suite *RestoreOpIntegrationSuite) TestRestore_Run() {
|
|||||||
{
|
{
|
||||||
name: "Exchange_Restore",
|
name: "Exchange_Restore",
|
||||||
owner: tester.M365UserID(suite.T()),
|
owner: tester.M365UserID(suite.T()),
|
||||||
dest: tester.DefaultTestRestoreDestination(),
|
dest: tester.DefaultTestRestoreDestination(""),
|
||||||
getSelector: func(t *testing.T, owners []string) selectors.Selector {
|
getSelector: func(t *testing.T, owners []string) selectors.Selector {
|
||||||
rsel := selectors.NewExchangeRestore(owners)
|
rsel := selectors.NewExchangeRestore(owners)
|
||||||
rsel.Include(rsel.AllData())
|
rsel.Include(rsel.AllData())
|
||||||
@ -464,7 +464,7 @@ func (suite *RestoreOpIntegrationSuite) TestRestore_Run_errorNoResults() {
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
t = suite.T()
|
t = suite.T()
|
||||||
dest = tester.DefaultTestRestoreDestination()
|
dest = tester.DefaultTestRestoreDestination("")
|
||||||
mb = evmock.NewBus()
|
mb = evmock.NewBus()
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -262,12 +262,22 @@ func read(
|
|||||||
return clues.Stack(err).WithClues(ctx)
|
return clues.Stack(err).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pd, err := p.Dir()
|
||||||
|
if err != nil {
|
||||||
|
return clues.Stack(err).WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
ctx = clues.Add(ctx, "snapshot_id", snapshotID)
|
ctx = clues.Add(ctx, "snapshot_id", snapshotID)
|
||||||
|
|
||||||
cs, err := rer.ProduceRestoreCollections(
|
cs, err := rer.ProduceRestoreCollections(
|
||||||
ctx,
|
ctx,
|
||||||
snapshotID,
|
snapshotID,
|
||||||
[]path.Path{p},
|
[]path.RestorePaths{
|
||||||
|
{
|
||||||
|
StoragePath: p,
|
||||||
|
RestorePath: pd,
|
||||||
|
},
|
||||||
|
},
|
||||||
&stats.ByteCounter{},
|
&stats.ByteCounter{},
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -106,7 +106,7 @@ func readTestConfig() (map[string]string, error) {
|
|||||||
testEnv := map[string]string{}
|
testEnv := map[string]string{}
|
||||||
fallbackTo(testEnv, TestCfgStorageProvider, vpr.GetString(TestCfgStorageProvider))
|
fallbackTo(testEnv, TestCfgStorageProvider, vpr.GetString(TestCfgStorageProvider))
|
||||||
fallbackTo(testEnv, TestCfgAccountProvider, vpr.GetString(TestCfgAccountProvider))
|
fallbackTo(testEnv, TestCfgAccountProvider, vpr.GetString(TestCfgAccountProvider))
|
||||||
fallbackTo(testEnv, TestCfgBucket, vpr.GetString(TestCfgBucket), "test-corso-repo-init")
|
fallbackTo(testEnv, TestCfgBucket, os.Getenv("S3_BUCKET"), vpr.GetString(TestCfgBucket), "test-corso-repo-init")
|
||||||
fallbackTo(testEnv, TestCfgEndpoint, vpr.GetString(TestCfgEndpoint), "s3.amazonaws.com")
|
fallbackTo(testEnv, TestCfgEndpoint, vpr.GetString(TestCfgEndpoint), "s3.amazonaws.com")
|
||||||
fallbackTo(testEnv, TestCfgPrefix, vpr.GetString(TestCfgPrefix))
|
fallbackTo(testEnv, TestCfgPrefix, vpr.GetString(TestCfgPrefix))
|
||||||
fallbackTo(testEnv, TestCfgAzureTenantID, os.Getenv(account.AzureTenantID), vpr.GetString(TestCfgAzureTenantID))
|
fallbackTo(testEnv, TestCfgAzureTenantID, os.Getenv(account.AzureTenantID), vpr.GetString(TestCfgAzureTenantID))
|
||||||
|
|||||||
@ -1,11 +1,26 @@
|
|||||||
package tester
|
package tester
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
)
|
)
|
||||||
|
|
||||||
func DefaultTestRestoreDestination() control.RestoreDestination {
|
const RestoreFolderPrefix = "Corso_Test"
|
||||||
// Use microsecond granularity to help reduce collisions.
|
|
||||||
return control.DefaultRestoreDestination(dttm.SafeForTesting)
|
func DefaultTestRestoreDestination(namespace string) control.RestoreDestination {
|
||||||
|
var (
|
||||||
|
dest = control.DefaultRestoreDestination(dttm.SafeForTesting)
|
||||||
|
sft = dttm.FormatNow(dttm.SafeForTesting)
|
||||||
|
)
|
||||||
|
|
||||||
|
parts := []string{RestoreFolderPrefix, namespace, sft}
|
||||||
|
if len(namespace) == 0 {
|
||||||
|
parts = []string{RestoreFolderPrefix, sft}
|
||||||
|
}
|
||||||
|
|
||||||
|
dest.ContainerName = strings.Join(parts, "_")
|
||||||
|
|
||||||
|
return dest
|
||||||
}
|
}
|
||||||
|
|||||||
@ -14,6 +14,7 @@ import (
|
|||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||||
|
odConsts "github.com/alcionai/corso/src/internal/connector/onedrive/consts"
|
||||||
"github.com/alcionai/corso/src/internal/connector/onedrive/metadata"
|
"github.com/alcionai/corso/src/internal/connector/onedrive/metadata"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/internal/version"
|
"github.com/alcionai/corso/src/internal/version"
|
||||||
@ -242,9 +243,9 @@ func oneDriveishEntry(t *testing.T, id string, size int, it ItemType) Entry {
|
|||||||
"tenant-id",
|
"tenant-id",
|
||||||
"user-id",
|
"user-id",
|
||||||
[]string{
|
[]string{
|
||||||
"drives",
|
odConsts.DrivesPathDir,
|
||||||
"drive-id",
|
"drive-id",
|
||||||
"root:",
|
odConsts.RootPathDir,
|
||||||
"Inbox",
|
"Inbox",
|
||||||
"folder1",
|
"folder1",
|
||||||
id,
|
id,
|
||||||
@ -408,7 +409,7 @@ func (suite *DetailsUnitSuite) TestDetailsAdd_LocationFolders() {
|
|||||||
{
|
{
|
||||||
ItemInfo: ItemInfo{
|
ItemInfo: ItemInfo{
|
||||||
Folder: &FolderInfo{
|
Folder: &FolderInfo{
|
||||||
DisplayName: "root:",
|
DisplayName: odConsts.RootPathDir,
|
||||||
ItemType: FolderItem,
|
ItemType: FolderItem,
|
||||||
DriveName: "drive-name",
|
DriveName: "drive-name",
|
||||||
DriveID: "drive-id",
|
DriveID: "drive-id",
|
||||||
@ -416,7 +417,7 @@ func (suite *DetailsUnitSuite) TestDetailsAdd_LocationFolders() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
LocationRef: "root:",
|
LocationRef: odConsts.RootPathDir,
|
||||||
ItemInfo: ItemInfo{
|
ItemInfo: ItemInfo{
|
||||||
Folder: &FolderInfo{
|
Folder: &FolderInfo{
|
||||||
DisplayName: "Inbox",
|
DisplayName: "Inbox",
|
||||||
@ -958,7 +959,7 @@ func (suite *DetailsUnitSuite) TestBuilder_Add_shortRefsUniqueFromFolder() {
|
|||||||
"a-user",
|
"a-user",
|
||||||
[]string{
|
[]string{
|
||||||
"drive-id",
|
"drive-id",
|
||||||
"root:",
|
odConsts.RootPathDir,
|
||||||
"folder",
|
"folder",
|
||||||
name + "-id",
|
name + "-id",
|
||||||
})
|
})
|
||||||
@ -971,7 +972,7 @@ func (suite *DetailsUnitSuite) TestBuilder_Add_shortRefsUniqueFromFolder() {
|
|||||||
"a-user",
|
"a-user",
|
||||||
[]string{
|
[]string{
|
||||||
"drive-id",
|
"drive-id",
|
||||||
"root:",
|
odConsts.RootPathDir,
|
||||||
"folder",
|
"folder",
|
||||||
name + "-id",
|
name + "-id",
|
||||||
name,
|
name,
|
||||||
@ -1060,7 +1061,7 @@ func (suite *DetailsUnitSuite) TestUpdateItem() {
|
|||||||
)
|
)
|
||||||
|
|
||||||
newExchangePB := path.Builder{}.Append(folder2)
|
newExchangePB := path.Builder{}.Append(folder2)
|
||||||
newOneDrivePB := path.Builder{}.Append("root:", folder2)
|
newOneDrivePB := path.Builder{}.Append(odConsts.RootPathDir, folder2)
|
||||||
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
|
|||||||
368
src/pkg/backup/details/testdata/in_deets.go
vendored
Normal file
368
src/pkg/backup/details/testdata/in_deets.go
vendored
Normal file
@ -0,0 +1,368 @@
|
|||||||
|
package testdata
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/kopia"
|
||||||
|
"github.com/alcionai/corso/src/internal/model"
|
||||||
|
"github.com/alcionai/corso/src/internal/streamstore"
|
||||||
|
"github.com/alcionai/corso/src/pkg/backup"
|
||||||
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// location set handling
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
var exists = struct{}{}
|
||||||
|
|
||||||
|
type locSet struct {
|
||||||
|
// map [locationRef] map [itemRef] {}
|
||||||
|
// refs may be either the canonical ent refs, or something else,
|
||||||
|
// so long as they are consistent for the test in question
|
||||||
|
Locations map[string]map[string]struct{}
|
||||||
|
Deleted map[string]map[string]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLocSet() *locSet {
|
||||||
|
return &locSet{
|
||||||
|
Locations: map[string]map[string]struct{}{},
|
||||||
|
Deleted: map[string]map[string]struct{}{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *locSet) AddItem(locationRef, itemRef string) {
|
||||||
|
ls.AddLocation(locationRef)
|
||||||
|
|
||||||
|
ls.Locations[locationRef][itemRef] = exists
|
||||||
|
delete(ls.Deleted[locationRef], itemRef)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *locSet) RemoveItem(locationRef, itemRef string) {
|
||||||
|
delete(ls.Locations[locationRef], itemRef)
|
||||||
|
|
||||||
|
if _, ok := ls.Deleted[locationRef]; !ok {
|
||||||
|
ls.Deleted[locationRef] = map[string]struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
ls.Deleted[locationRef][itemRef] = exists
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *locSet) MoveItem(fromLocation, toLocation, ir string) {
|
||||||
|
ls.RemoveItem(fromLocation, ir)
|
||||||
|
ls.AddItem(toLocation, ir)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *locSet) AddLocation(locationRef string) {
|
||||||
|
if _, ok := ls.Locations[locationRef]; !ok {
|
||||||
|
ls.Locations[locationRef] = map[string]struct{}{}
|
||||||
|
}
|
||||||
|
// don't purge previously deleted items, or child locations.
|
||||||
|
// Assumption is that their itemRef is unique, and still deleted.
|
||||||
|
delete(ls.Deleted, locationRef)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *locSet) RemoveLocation(locationRef string) {
|
||||||
|
ss := ls.Subset(locationRef)
|
||||||
|
|
||||||
|
for lr := range ss.Locations {
|
||||||
|
items := ls.Locations[lr]
|
||||||
|
|
||||||
|
delete(ls.Locations, lr)
|
||||||
|
|
||||||
|
if _, ok := ls.Deleted[lr]; !ok {
|
||||||
|
ls.Deleted[lr] = map[string]struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for ir := range items {
|
||||||
|
ls.Deleted[lr][ir] = exists
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveLocation takes the LAST elemet in the fromLocation (and all)
|
||||||
|
// children matching the prefix, and relocates it as a child of toLocation.
|
||||||
|
// ex: MoveLocation("/a/b/c", "/d") will move all entries with the prefix
|
||||||
|
// "/a/b/c" into "/d/c". This also deletes all "/a/b/c" entries and children.
|
||||||
|
// assumes item IDs don't change across the migration. If item IDs do change,
|
||||||
|
// that difference will need to be handled manually by the caller.
|
||||||
|
// returns the base folder's new location (ex: /d/c)
|
||||||
|
func (ls *locSet) MoveLocation(fromLocation, toLocation string) string {
|
||||||
|
fromBuilder := path.Builder{}.Append(path.Split(fromLocation)...)
|
||||||
|
toBuilder := path.Builder{}.Append(path.Split(toLocation)...).Append(fromBuilder.LastElem())
|
||||||
|
|
||||||
|
ls.RenameLocation(fromBuilder.String(), toBuilder.String())
|
||||||
|
|
||||||
|
return toBuilder.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *locSet) RenameLocation(fromLocation, toLocation string) {
|
||||||
|
ss := ls.Subset(fromLocation)
|
||||||
|
fromBuilder := path.Builder{}.Append(path.Split(fromLocation)...)
|
||||||
|
toBuilder := path.Builder{}.Append(path.Split(toLocation)...)
|
||||||
|
|
||||||
|
for lr, items := range ss.Locations {
|
||||||
|
lrBuilder := path.Builder{}.Append(path.Split(lr)...)
|
||||||
|
lrBuilder.UpdateParent(fromBuilder, toBuilder)
|
||||||
|
|
||||||
|
newLoc := lrBuilder.String()
|
||||||
|
|
||||||
|
for ir := range items {
|
||||||
|
ls.RemoveItem(lr, ir)
|
||||||
|
ls.AddItem(newLoc, ir)
|
||||||
|
}
|
||||||
|
|
||||||
|
ls.RemoveLocation(lr)
|
||||||
|
ls.AddLocation(newLoc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subset produces a new locSet containing only Items and Locations
|
||||||
|
// whose location matches the locationPfx
|
||||||
|
func (ls *locSet) Subset(locationPfx string) *locSet {
|
||||||
|
ss := newLocSet()
|
||||||
|
|
||||||
|
for lr, items := range ls.Locations {
|
||||||
|
if strings.HasPrefix(lr, locationPfx) {
|
||||||
|
ss.AddLocation(lr)
|
||||||
|
|
||||||
|
for ir := range items {
|
||||||
|
ss.AddItem(lr, ir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ss
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// The goal of InDeets is to provide a struct and interface which allows
|
||||||
|
// tests to predict not just the elements within a set of details entries,
|
||||||
|
// but also their changes (relocation, renaming, etc) in a way that consolidates
|
||||||
|
// building an "expected set" of details entries that can be compared against
|
||||||
|
// the details results after a backup.
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// InDeets is a helper for comparing details state in tests
|
||||||
|
// across backup instances.
|
||||||
|
type InDeets struct {
|
||||||
|
// only: tenantID/service/resourceOwnerID
|
||||||
|
RRPrefix string
|
||||||
|
// map of container setting the uniqueness boundary for location
|
||||||
|
// ref entries (eg, data type like email, contacts, etc, or
|
||||||
|
// drive id) to the unique entries in that set.
|
||||||
|
Sets map[string]*locSet
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewInDeets(repoRefPrefix string) *InDeets {
|
||||||
|
return &InDeets{
|
||||||
|
RRPrefix: repoRefPrefix,
|
||||||
|
Sets: map[string]*locSet{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (id *InDeets) getSet(set string) *locSet {
|
||||||
|
s, ok := id.Sets[set]
|
||||||
|
if ok {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
return newLocSet()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (id *InDeets) AddAll(deets details.Details, ws whatSet) {
|
||||||
|
if id.Sets == nil {
|
||||||
|
id.Sets = map[string]*locSet{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ent := range deets.Entries {
|
||||||
|
set, err := ws(ent)
|
||||||
|
if err != nil {
|
||||||
|
set = err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
dir := ent.LocationRef
|
||||||
|
|
||||||
|
if ent.Folder != nil {
|
||||||
|
dir = dir + ent.Folder.DisplayName
|
||||||
|
id.AddLocation(set, dir)
|
||||||
|
} else {
|
||||||
|
id.AddItem(set, ent.LocationRef, ent.ItemRef)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (id *InDeets) AddItem(set, locationRef, itemRef string) {
|
||||||
|
id.getSet(set).AddItem(locationRef, itemRef)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (id *InDeets) RemoveItem(set, locationRef, itemRef string) {
|
||||||
|
id.getSet(set).RemoveItem(locationRef, itemRef)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (id *InDeets) MoveItem(set, fromLocation, toLocation, ir string) {
|
||||||
|
id.getSet(set).MoveItem(fromLocation, toLocation, ir)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (id *InDeets) AddLocation(set, locationRef string) {
|
||||||
|
id.getSet(set).AddLocation(locationRef)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveLocation removes the provided location, and all children
|
||||||
|
// of that location.
|
||||||
|
func (id *InDeets) RemoveLocation(set, locationRef string) {
|
||||||
|
id.getSet(set).RemoveLocation(locationRef)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveLocation takes the LAST elemet in the fromLocation (and all)
|
||||||
|
// children matching the prefix, and relocates it as a child of toLocation.
|
||||||
|
// ex: MoveLocation("/a/b/c", "/d") will move all entries with the prefix
|
||||||
|
// "/a/b/c" into "/d/c". This also deletes all "/a/b/c" entries and children.
|
||||||
|
// assumes item IDs don't change across the migration. If item IDs do change,
|
||||||
|
// that difference will need to be handled manually by the caller.
|
||||||
|
// returns the base folder's new location (ex: /d/c)
|
||||||
|
func (id *InDeets) MoveLocation(set, fromLocation, toLocation string) string {
|
||||||
|
return id.getSet(set).MoveLocation(fromLocation, toLocation)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (id *InDeets) RenameLocation(set, fromLocation, toLocation string) {
|
||||||
|
id.getSet(set).RenameLocation(fromLocation, toLocation)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subset produces a new locSet containing only Items and Locations
|
||||||
|
// whose location matches the locationPfx
|
||||||
|
func (id *InDeets) Subset(set, locationPfx string) *locSet {
|
||||||
|
return id.getSet(set).Subset(locationPfx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// whatSet helpers for extracting a set identifier from an arbitrary repoRef
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
type whatSet func(details.Entry) (string, error)
|
||||||
|
|
||||||
|
// common whatSet parser that extracts the service category from
|
||||||
|
// a repoRef.
|
||||||
|
func CategoryFromRepoRef(ent details.Entry) (string, error) {
|
||||||
|
p, err := path.FromDataLayerPath(ent.RepoRef, false)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.Category().String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// common whatSet parser that extracts the driveID from a repoRef.
|
||||||
|
func DriveIDFromRepoRef(ent details.Entry) (string, error) {
|
||||||
|
p, err := path.FromDataLayerPath(ent.RepoRef, false)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
odp, err := path.ToDrivePath(p)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return odp.DriveID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// helpers and comparators
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
func CheckBackupDetails(
|
||||||
|
t *testing.T,
|
||||||
|
ctx context.Context, //revive:disable-line:context-as-argument
|
||||||
|
backupID model.StableID,
|
||||||
|
ws whatSet,
|
||||||
|
ms *kopia.ModelStore,
|
||||||
|
ssr streamstore.Reader,
|
||||||
|
expect *InDeets,
|
||||||
|
// standard check is assert.Subset due to issues of external data cross-
|
||||||
|
// pollination. This should be true if the backup contains a unique directory
|
||||||
|
// of data.
|
||||||
|
mustEqualFolders bool,
|
||||||
|
) {
|
||||||
|
deets, result := GetDeetsInBackup(t, ctx, backupID, "", "", path.UnknownService, ws, ms, ssr)
|
||||||
|
|
||||||
|
t.Log("details entries in result")
|
||||||
|
|
||||||
|
for _, ent := range deets.Entries {
|
||||||
|
if ent.Folder == nil {
|
||||||
|
t.Log(ent.LocationRef)
|
||||||
|
t.Log(ent.ItemRef)
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Truef(
|
||||||
|
t,
|
||||||
|
strings.HasPrefix(ent.RepoRef, expect.RRPrefix),
|
||||||
|
"all details should begin with the expected prefix\nwant: %s\ngot: %s",
|
||||||
|
expect.RRPrefix, ent.RepoRef)
|
||||||
|
}
|
||||||
|
|
||||||
|
for set := range expect.Sets {
|
||||||
|
check := assert.Subsetf
|
||||||
|
|
||||||
|
if mustEqualFolders {
|
||||||
|
check = assert.ElementsMatchf
|
||||||
|
}
|
||||||
|
|
||||||
|
check(
|
||||||
|
t,
|
||||||
|
maps.Keys(result.Sets[set].Locations),
|
||||||
|
maps.Keys(expect.Sets[set].Locations),
|
||||||
|
"results in %s missing expected location", set)
|
||||||
|
|
||||||
|
for lr, items := range expect.Sets[set].Deleted {
|
||||||
|
_, ok := result.Sets[set].Locations[lr]
|
||||||
|
assert.Falsef(t, ok, "deleted location in %s found in result: %s", set, lr)
|
||||||
|
|
||||||
|
for ir := range items {
|
||||||
|
_, ok := result.Sets[set].Locations[lr][ir]
|
||||||
|
assert.Falsef(t, ok, "deleted item in %s found in result: %s", set, lr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetDeetsInBackup(
|
||||||
|
t *testing.T,
|
||||||
|
ctx context.Context, //revive:disable-line:context-as-argument
|
||||||
|
backupID model.StableID,
|
||||||
|
tid, resourceOwner string,
|
||||||
|
service path.ServiceType,
|
||||||
|
ws whatSet,
|
||||||
|
ms *kopia.ModelStore,
|
||||||
|
ssr streamstore.Reader,
|
||||||
|
) (details.Details, *InDeets) {
|
||||||
|
bup := backup.Backup{}
|
||||||
|
|
||||||
|
err := ms.Get(ctx, model.BackupSchema, backupID, &bup)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
ssid := bup.StreamStoreID
|
||||||
|
require.NotEmpty(t, ssid, "stream store ID")
|
||||||
|
|
||||||
|
var deets details.Details
|
||||||
|
err = ssr.Read(
|
||||||
|
ctx,
|
||||||
|
ssid,
|
||||||
|
streamstore.DetailsReader(details.UnmarshalTo(&deets)),
|
||||||
|
fault.New(true))
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
id := NewInDeets(path.Builder{}.Append(tid, service.String(), resourceOwner).String())
|
||||||
|
id.AddAll(deets, ws)
|
||||||
|
|
||||||
|
return deets, id
|
||||||
|
}
|
||||||
445
src/pkg/backup/details/testdata/in_deets_test.go
vendored
Normal file
445
src/pkg/backup/details/testdata/in_deets_test.go
vendored
Normal file
@ -0,0 +1,445 @@
|
|||||||
|
package testdata
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/suite"
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
|
)
|
||||||
|
|
||||||
|
type LocSetUnitSuite struct {
|
||||||
|
tester.Suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLocSetUnitSuite(t *testing.T) {
|
||||||
|
suite.Run(t, &LocSetUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
l1 = "lr_1"
|
||||||
|
l2 = "lr_2"
|
||||||
|
l13 = "lr_1/lr_3"
|
||||||
|
l14 = "lr_1/lr_4"
|
||||||
|
i1 = "ir_1"
|
||||||
|
i2 = "ir_2"
|
||||||
|
i3 = "ir_3"
|
||||||
|
i4 = "ir_4"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (suite *LocSetUnitSuite) TestAdd() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
ls := newLocSet()
|
||||||
|
|
||||||
|
ls.AddItem(l1, i1)
|
||||||
|
ls.AddLocation(l2)
|
||||||
|
|
||||||
|
assert.ElementsMatch(t, []string{l1, l2}, maps.Keys(ls.Locations))
|
||||||
|
assert.ElementsMatch(t, []string{i1}, maps.Keys(ls.Locations[l1]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l2]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l13]))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *LocSetUnitSuite) TestRemove() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
ls := newLocSet()
|
||||||
|
|
||||||
|
ls.AddItem(l1, i1)
|
||||||
|
ls.AddItem(l1, i2)
|
||||||
|
ls.AddLocation(l13)
|
||||||
|
ls.AddItem(l14, i3)
|
||||||
|
ls.AddItem(l14, i4)
|
||||||
|
|
||||||
|
assert.ElementsMatch(t, []string{l1, l13, l14}, maps.Keys(ls.Locations))
|
||||||
|
assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ls.Locations[l1]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l13]))
|
||||||
|
assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ls.Locations[l14]))
|
||||||
|
|
||||||
|
// nop removal
|
||||||
|
ls.RemoveItem(l2, i1)
|
||||||
|
assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ls.Locations[l1]))
|
||||||
|
|
||||||
|
// item removal
|
||||||
|
ls.RemoveItem(l1, i2)
|
||||||
|
assert.ElementsMatch(t, []string{i1}, maps.Keys(ls.Locations[l1]))
|
||||||
|
|
||||||
|
// nop location removal
|
||||||
|
ls.RemoveLocation(l2)
|
||||||
|
assert.ElementsMatch(t, []string{l1, l13, l14}, maps.Keys(ls.Locations))
|
||||||
|
|
||||||
|
// non-cascading location removal
|
||||||
|
ls.RemoveLocation(l13)
|
||||||
|
assert.ElementsMatch(t, []string{l1, l14}, maps.Keys(ls.Locations))
|
||||||
|
assert.ElementsMatch(t, []string{i1}, maps.Keys(ls.Locations[l1]))
|
||||||
|
assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ls.Locations[l14]))
|
||||||
|
|
||||||
|
// cascading location removal
|
||||||
|
ls.RemoveLocation(l1)
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l1]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l13]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l14]))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *LocSetUnitSuite) TestSubset() {
|
||||||
|
ls := newLocSet()
|
||||||
|
|
||||||
|
ls.AddItem(l1, i1)
|
||||||
|
ls.AddItem(l1, i2)
|
||||||
|
ls.AddLocation(l13)
|
||||||
|
ls.AddItem(l14, i3)
|
||||||
|
ls.AddItem(l14, i4)
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
locPfx string
|
||||||
|
expect func(*testing.T, *locSet)
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "nop",
|
||||||
|
locPfx: l2,
|
||||||
|
expect: func(t *testing.T, ss *locSet) {
|
||||||
|
assert.Empty(t, maps.Keys(ss.Locations))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no items",
|
||||||
|
locPfx: l13,
|
||||||
|
expect: func(t *testing.T, ss *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{l13}, maps.Keys(ss.Locations))
|
||||||
|
assert.Empty(t, maps.Keys(ss.Locations[l13]))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "non-cascading",
|
||||||
|
locPfx: l14,
|
||||||
|
expect: func(t *testing.T, ss *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{l14}, maps.Keys(ss.Locations))
|
||||||
|
assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ss.Locations[l14]))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "cascading",
|
||||||
|
locPfx: l1,
|
||||||
|
expect: func(t *testing.T, ss *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{l1, l13, l14}, maps.Keys(ss.Locations))
|
||||||
|
assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ss.Locations[l1]))
|
||||||
|
assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ss.Locations[l14]))
|
||||||
|
assert.Empty(t, maps.Keys(ss.Locations[l13]))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
t := suite.T()
|
||||||
|
test.expect(t, ls.Subset(test.locPfx))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *LocSetUnitSuite) TestRename() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
makeSet := func() *locSet {
|
||||||
|
ls := newLocSet()
|
||||||
|
|
||||||
|
ls.AddItem(l1, i1)
|
||||||
|
ls.AddItem(l1, i2)
|
||||||
|
ls.AddLocation(l13)
|
||||||
|
ls.AddItem(l14, i3)
|
||||||
|
ls.AddItem(l14, i4)
|
||||||
|
|
||||||
|
return ls
|
||||||
|
}
|
||||||
|
|
||||||
|
ts := makeSet()
|
||||||
|
assert.ElementsMatch(t, []string{l1, l13, l14}, maps.Keys(ts.Locations))
|
||||||
|
assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ts.Locations[l1]))
|
||||||
|
assert.Empty(t, maps.Keys(ts.Locations[l13]))
|
||||||
|
assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ts.Locations[l14]))
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
from string
|
||||||
|
to string
|
||||||
|
expect func(*testing.T, *locSet)
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "nop",
|
||||||
|
from: l2,
|
||||||
|
to: "foo",
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{l1, l13, l14}, maps.Keys(ls.Locations))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l2]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations["foo"]))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no items",
|
||||||
|
from: l13,
|
||||||
|
to: "foo",
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{l1, "foo", l14}, maps.Keys(ls.Locations))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l13]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations["foo"]))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with items",
|
||||||
|
from: l14,
|
||||||
|
to: "foo",
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{l1, l13, "foo"}, maps.Keys(ls.Locations))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l14]))
|
||||||
|
assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ls.Locations["foo"]))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "cascading locations",
|
||||||
|
from: l1,
|
||||||
|
to: "foo",
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{"foo", "foo/lr_3", "foo/lr_4"}, maps.Keys(ls.Locations))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l1]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l14]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l13]))
|
||||||
|
assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ls.Locations["foo"]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations["foo/lr_3"]))
|
||||||
|
assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ls.Locations["foo/lr_4"]))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "to existing location",
|
||||||
|
from: l14,
|
||||||
|
to: l1,
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{l1, l13}, maps.Keys(ls.Locations))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l14]))
|
||||||
|
assert.ElementsMatch(t, []string{i1, i2, i3, i4}, maps.Keys(ls.Locations[l1]))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
t := suite.T()
|
||||||
|
ls := makeSet()
|
||||||
|
|
||||||
|
ls.RenameLocation(test.from, test.to)
|
||||||
|
test.expect(t, ls)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *LocSetUnitSuite) TestItem() {
|
||||||
|
t := suite.T()
|
||||||
|
b4 := "bar/lr_4"
|
||||||
|
|
||||||
|
makeSet := func() *locSet {
|
||||||
|
ls := newLocSet()
|
||||||
|
|
||||||
|
ls.AddItem(l1, i1)
|
||||||
|
ls.AddItem(l1, i2)
|
||||||
|
ls.AddLocation(l13)
|
||||||
|
ls.AddItem(l14, i3)
|
||||||
|
ls.AddItem(l14, i4)
|
||||||
|
ls.AddItem(b4, "fnord")
|
||||||
|
|
||||||
|
return ls
|
||||||
|
}
|
||||||
|
|
||||||
|
ts := makeSet()
|
||||||
|
assert.ElementsMatch(t, []string{l1, l13, l14, b4}, maps.Keys(ts.Locations))
|
||||||
|
assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ts.Locations[l1]))
|
||||||
|
assert.Empty(t, maps.Keys(ts.Locations[l13]))
|
||||||
|
assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ts.Locations[l14]))
|
||||||
|
assert.ElementsMatch(t, []string{"fnord"}, maps.Keys(ts.Locations[b4]))
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
item string
|
||||||
|
from string
|
||||||
|
to string
|
||||||
|
expect func(*testing.T, *locSet)
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "nop item",
|
||||||
|
item: "floob",
|
||||||
|
from: l2,
|
||||||
|
to: l1,
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{i1, i2, "floob"}, maps.Keys(ls.Locations[l1]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l2]))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "nop origin",
|
||||||
|
item: i1,
|
||||||
|
from: "smarf",
|
||||||
|
to: l2,
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ls.Locations[l1]))
|
||||||
|
assert.ElementsMatch(t, []string{i1}, maps.Keys(ls.Locations[l2]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations["smarf"]))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "new location",
|
||||||
|
item: i1,
|
||||||
|
from: l1,
|
||||||
|
to: "fnords",
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{i2}, maps.Keys(ls.Locations[l1]))
|
||||||
|
assert.ElementsMatch(t, []string{i1}, maps.Keys(ls.Locations["fnords"]))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "existing location",
|
||||||
|
item: i1,
|
||||||
|
from: l1,
|
||||||
|
to: l2,
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{i2}, maps.Keys(ls.Locations[l1]))
|
||||||
|
assert.ElementsMatch(t, []string{i1}, maps.Keys(ls.Locations[l2]))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "same location",
|
||||||
|
item: i1,
|
||||||
|
from: l1,
|
||||||
|
to: l1,
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ls.Locations[l1]))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
t := suite.T()
|
||||||
|
ls := makeSet()
|
||||||
|
|
||||||
|
ls.MoveItem(test.from, test.to, test.item)
|
||||||
|
test.expect(t, ls)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *LocSetUnitSuite) TestMoveLocation() {
|
||||||
|
t := suite.T()
|
||||||
|
b4 := "bar/lr_4"
|
||||||
|
|
||||||
|
makeSet := func() *locSet {
|
||||||
|
ls := newLocSet()
|
||||||
|
|
||||||
|
ls.AddItem(l1, i1)
|
||||||
|
ls.AddItem(l1, i2)
|
||||||
|
ls.AddLocation(l13)
|
||||||
|
ls.AddItem(l14, i3)
|
||||||
|
ls.AddItem(l14, i4)
|
||||||
|
ls.AddItem(b4, "fnord")
|
||||||
|
|
||||||
|
return ls
|
||||||
|
}
|
||||||
|
|
||||||
|
ts := makeSet()
|
||||||
|
assert.ElementsMatch(t, []string{l1, l13, l14, b4}, maps.Keys(ts.Locations))
|
||||||
|
assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ts.Locations[l1]))
|
||||||
|
assert.Empty(t, maps.Keys(ts.Locations[l13]))
|
||||||
|
assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ts.Locations[l14]))
|
||||||
|
assert.ElementsMatch(t, []string{"fnord"}, maps.Keys(ts.Locations[b4]))
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
from string
|
||||||
|
to string
|
||||||
|
expect func(*testing.T, *locSet)
|
||||||
|
expectNewLoc string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "nop root",
|
||||||
|
from: l2,
|
||||||
|
to: "",
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{l1, l13, l14, b4}, maps.Keys(ls.Locations))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l2]))
|
||||||
|
},
|
||||||
|
expectNewLoc: l2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "nop child",
|
||||||
|
from: l2,
|
||||||
|
to: "foo",
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{l1, l13, l14, b4}, maps.Keys(ls.Locations))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations["foo"]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations["foo/"+l2]))
|
||||||
|
},
|
||||||
|
expectNewLoc: "foo/" + l2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no items",
|
||||||
|
from: l13,
|
||||||
|
to: "foo",
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
newLoc := "foo/lr_3"
|
||||||
|
assert.ElementsMatch(t, []string{l1, newLoc, l14, b4}, maps.Keys(ls.Locations))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l13]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[newLoc]))
|
||||||
|
},
|
||||||
|
expectNewLoc: "foo/lr_3",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with items",
|
||||||
|
from: l14,
|
||||||
|
to: "foo",
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
newLoc := "foo/lr_4"
|
||||||
|
assert.ElementsMatch(t, []string{l1, l13, newLoc, b4}, maps.Keys(ls.Locations))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l14]))
|
||||||
|
assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ls.Locations[newLoc]))
|
||||||
|
},
|
||||||
|
expectNewLoc: "foo/lr_4",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "cascading locations",
|
||||||
|
from: l1,
|
||||||
|
to: "foo",
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
pfx := "foo/"
|
||||||
|
assert.ElementsMatch(t, []string{pfx + l1, pfx + l13, pfx + l14, b4}, maps.Keys(ls.Locations))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l1]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l14]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l13]))
|
||||||
|
assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ls.Locations[pfx+l1]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[pfx+l13]))
|
||||||
|
assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ls.Locations[pfx+l14]))
|
||||||
|
},
|
||||||
|
expectNewLoc: "foo/" + l1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "to existing location",
|
||||||
|
from: l14,
|
||||||
|
to: "bar",
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{l1, l13, b4}, maps.Keys(ls.Locations))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l14]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations["bar"]))
|
||||||
|
assert.ElementsMatch(t, []string{"fnord", i3, i4}, maps.Keys(ls.Locations[b4]))
|
||||||
|
},
|
||||||
|
expectNewLoc: b4,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
t := suite.T()
|
||||||
|
ls := makeSet()
|
||||||
|
|
||||||
|
newLoc := ls.MoveLocation(test.from, test.to)
|
||||||
|
test.expect(t, ls)
|
||||||
|
assert.Equal(t, test.expectNewLoc, newLoc)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
103
src/pkg/backup/details/testdata/testdata.go
vendored
103
src/pkg/backup/details/testdata/testdata.go
vendored
@ -25,7 +25,7 @@ func mustParsePath(ref string, isItem bool) path.Path {
|
|||||||
// path with the element appended to it. Panics if the path cannot be parsed.
|
// path with the element appended to it. Panics if the path cannot be parsed.
|
||||||
// Useful for simple variable assignments.
|
// Useful for simple variable assignments.
|
||||||
func mustAppendPath(p path.Path, newElement string, isItem bool) path.Path {
|
func mustAppendPath(p path.Path, newElement string, isItem bool) path.Path {
|
||||||
newP, err := p.Append(newElement, isItem)
|
newP, err := p.Append(isItem, newElement)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@ -54,10 +54,10 @@ func locFromRepo(rr path.Path, isItem bool) *path.Builder {
|
|||||||
|
|
||||||
type repoRefAndLocRef struct {
|
type repoRefAndLocRef struct {
|
||||||
RR path.Path
|
RR path.Path
|
||||||
loc *path.Builder
|
Loc *path.Builder
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p repoRefAndLocRef) mustAppend(newElement string, isItem bool) repoRefAndLocRef {
|
func (p repoRefAndLocRef) MustAppend(newElement string, isItem bool) repoRefAndLocRef {
|
||||||
e := newElement + folderSuffix
|
e := newElement + folderSuffix
|
||||||
|
|
||||||
if isItem {
|
if isItem {
|
||||||
@ -68,7 +68,7 @@ func (p repoRefAndLocRef) mustAppend(newElement string, isItem bool) repoRefAndL
|
|||||||
RR: mustAppendPath(p.RR, e, isItem),
|
RR: mustAppendPath(p.RR, e, isItem),
|
||||||
}
|
}
|
||||||
|
|
||||||
res.loc = locFromRepo(res.RR, isItem)
|
res.Loc = locFromRepo(res.RR, isItem)
|
||||||
|
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
@ -85,7 +85,7 @@ func (p repoRefAndLocRef) FolderLocation() string {
|
|||||||
lastElem = f[len(f)-2]
|
lastElem = f[len(f)-2]
|
||||||
}
|
}
|
||||||
|
|
||||||
return p.loc.Append(strings.TrimSuffix(lastElem, folderSuffix)).String()
|
return p.Loc.Append(strings.TrimSuffix(lastElem, folderSuffix)).String()
|
||||||
}
|
}
|
||||||
|
|
||||||
func mustPathRep(ref string, isItem bool) repoRefAndLocRef {
|
func mustPathRep(ref string, isItem bool) repoRefAndLocRef {
|
||||||
@ -115,7 +115,7 @@ func mustPathRep(ref string, isItem bool) repoRefAndLocRef {
|
|||||||
}
|
}
|
||||||
|
|
||||||
res.RR = rr
|
res.RR = rr
|
||||||
res.loc = locFromRepo(rr, isItem)
|
res.Loc = locFromRepo(rr, isItem)
|
||||||
|
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
@ -138,12 +138,12 @@ var (
|
|||||||
Time4 = time.Date(2023, 10, 21, 10, 0, 0, 0, time.UTC)
|
Time4 = time.Date(2023, 10, 21, 10, 0, 0, 0, time.UTC)
|
||||||
|
|
||||||
ExchangeEmailInboxPath = mustPathRep("tenant-id/exchange/user-id/email/Inbox", false)
|
ExchangeEmailInboxPath = mustPathRep("tenant-id/exchange/user-id/email/Inbox", false)
|
||||||
ExchangeEmailBasePath = ExchangeEmailInboxPath.mustAppend("subfolder", false)
|
ExchangeEmailBasePath = ExchangeEmailInboxPath.MustAppend("subfolder", false)
|
||||||
ExchangeEmailBasePath2 = ExchangeEmailInboxPath.mustAppend("othersubfolder/", false)
|
ExchangeEmailBasePath2 = ExchangeEmailInboxPath.MustAppend("othersubfolder/", false)
|
||||||
ExchangeEmailBasePath3 = ExchangeEmailBasePath2.mustAppend("subsubfolder", false)
|
ExchangeEmailBasePath3 = ExchangeEmailBasePath2.MustAppend("subsubfolder", false)
|
||||||
ExchangeEmailItemPath1 = ExchangeEmailBasePath.mustAppend(ItemName1, true)
|
ExchangeEmailItemPath1 = ExchangeEmailBasePath.MustAppend(ItemName1, true)
|
||||||
ExchangeEmailItemPath2 = ExchangeEmailBasePath2.mustAppend(ItemName2, true)
|
ExchangeEmailItemPath2 = ExchangeEmailBasePath2.MustAppend(ItemName2, true)
|
||||||
ExchangeEmailItemPath3 = ExchangeEmailBasePath3.mustAppend(ItemName3, true)
|
ExchangeEmailItemPath3 = ExchangeEmailBasePath3.MustAppend(ItemName3, true)
|
||||||
|
|
||||||
ExchangeEmailItems = []details.Entry{
|
ExchangeEmailItems = []details.Entry{
|
||||||
{
|
{
|
||||||
@ -151,7 +151,7 @@ var (
|
|||||||
ShortRef: ExchangeEmailItemPath1.RR.ShortRef(),
|
ShortRef: ExchangeEmailItemPath1.RR.ShortRef(),
|
||||||
ParentRef: ExchangeEmailItemPath1.RR.ToBuilder().Dir().ShortRef(),
|
ParentRef: ExchangeEmailItemPath1.RR.ToBuilder().Dir().ShortRef(),
|
||||||
ItemRef: ExchangeEmailItemPath1.ItemLocation(),
|
ItemRef: ExchangeEmailItemPath1.ItemLocation(),
|
||||||
LocationRef: ExchangeEmailItemPath1.loc.String(),
|
LocationRef: ExchangeEmailItemPath1.Loc.String(),
|
||||||
ItemInfo: details.ItemInfo{
|
ItemInfo: details.ItemInfo{
|
||||||
Exchange: &details.ExchangeInfo{
|
Exchange: &details.ExchangeInfo{
|
||||||
ItemType: details.ExchangeMail,
|
ItemType: details.ExchangeMail,
|
||||||
@ -166,7 +166,7 @@ var (
|
|||||||
ShortRef: ExchangeEmailItemPath2.RR.ShortRef(),
|
ShortRef: ExchangeEmailItemPath2.RR.ShortRef(),
|
||||||
ParentRef: ExchangeEmailItemPath2.RR.ToBuilder().Dir().ShortRef(),
|
ParentRef: ExchangeEmailItemPath2.RR.ToBuilder().Dir().ShortRef(),
|
||||||
ItemRef: ExchangeEmailItemPath2.ItemLocation(),
|
ItemRef: ExchangeEmailItemPath2.ItemLocation(),
|
||||||
LocationRef: ExchangeEmailItemPath2.loc.String(),
|
LocationRef: ExchangeEmailItemPath2.Loc.String(),
|
||||||
ItemInfo: details.ItemInfo{
|
ItemInfo: details.ItemInfo{
|
||||||
Exchange: &details.ExchangeInfo{
|
Exchange: &details.ExchangeInfo{
|
||||||
ItemType: details.ExchangeMail,
|
ItemType: details.ExchangeMail,
|
||||||
@ -181,7 +181,7 @@ var (
|
|||||||
ShortRef: ExchangeEmailItemPath3.RR.ShortRef(),
|
ShortRef: ExchangeEmailItemPath3.RR.ShortRef(),
|
||||||
ParentRef: ExchangeEmailItemPath3.RR.ToBuilder().Dir().ShortRef(),
|
ParentRef: ExchangeEmailItemPath3.RR.ToBuilder().Dir().ShortRef(),
|
||||||
ItemRef: ExchangeEmailItemPath3.ItemLocation(),
|
ItemRef: ExchangeEmailItemPath3.ItemLocation(),
|
||||||
LocationRef: ExchangeEmailItemPath3.loc.String(),
|
LocationRef: ExchangeEmailItemPath3.Loc.String(),
|
||||||
ItemInfo: details.ItemInfo{
|
ItemInfo: details.ItemInfo{
|
||||||
Exchange: &details.ExchangeInfo{
|
Exchange: &details.ExchangeInfo{
|
||||||
ItemType: details.ExchangeMail,
|
ItemType: details.ExchangeMail,
|
||||||
@ -194,10 +194,10 @@ var (
|
|||||||
}
|
}
|
||||||
|
|
||||||
ExchangeContactsRootPath = mustPathRep("tenant-id/exchange/user-id/contacts/contacts", false)
|
ExchangeContactsRootPath = mustPathRep("tenant-id/exchange/user-id/contacts/contacts", false)
|
||||||
ExchangeContactsBasePath = ExchangeContactsRootPath.mustAppend("contacts", false)
|
ExchangeContactsBasePath = ExchangeContactsRootPath.MustAppend("contacts", false)
|
||||||
ExchangeContactsBasePath2 = ExchangeContactsRootPath.mustAppend("morecontacts", false)
|
ExchangeContactsBasePath2 = ExchangeContactsRootPath.MustAppend("morecontacts", false)
|
||||||
ExchangeContactsItemPath1 = ExchangeContactsBasePath.mustAppend(ItemName1, true)
|
ExchangeContactsItemPath1 = ExchangeContactsBasePath.MustAppend(ItemName1, true)
|
||||||
ExchangeContactsItemPath2 = ExchangeContactsBasePath2.mustAppend(ItemName2, true)
|
ExchangeContactsItemPath2 = ExchangeContactsBasePath2.MustAppend(ItemName2, true)
|
||||||
|
|
||||||
ExchangeContactsItems = []details.Entry{
|
ExchangeContactsItems = []details.Entry{
|
||||||
{
|
{
|
||||||
@ -205,7 +205,7 @@ var (
|
|||||||
ShortRef: ExchangeContactsItemPath1.RR.ShortRef(),
|
ShortRef: ExchangeContactsItemPath1.RR.ShortRef(),
|
||||||
ParentRef: ExchangeContactsItemPath1.RR.ToBuilder().Dir().ShortRef(),
|
ParentRef: ExchangeContactsItemPath1.RR.ToBuilder().Dir().ShortRef(),
|
||||||
ItemRef: ExchangeContactsItemPath1.ItemLocation(),
|
ItemRef: ExchangeContactsItemPath1.ItemLocation(),
|
||||||
LocationRef: ExchangeContactsItemPath1.loc.String(),
|
LocationRef: ExchangeContactsItemPath1.Loc.String(),
|
||||||
ItemInfo: details.ItemInfo{
|
ItemInfo: details.ItemInfo{
|
||||||
Exchange: &details.ExchangeInfo{
|
Exchange: &details.ExchangeInfo{
|
||||||
ItemType: details.ExchangeContact,
|
ItemType: details.ExchangeContact,
|
||||||
@ -218,7 +218,7 @@ var (
|
|||||||
ShortRef: ExchangeContactsItemPath2.RR.ShortRef(),
|
ShortRef: ExchangeContactsItemPath2.RR.ShortRef(),
|
||||||
ParentRef: ExchangeContactsItemPath2.RR.ToBuilder().Dir().ShortRef(),
|
ParentRef: ExchangeContactsItemPath2.RR.ToBuilder().Dir().ShortRef(),
|
||||||
ItemRef: ExchangeContactsItemPath2.ItemLocation(),
|
ItemRef: ExchangeContactsItemPath2.ItemLocation(),
|
||||||
LocationRef: ExchangeContactsItemPath2.loc.String(),
|
LocationRef: ExchangeContactsItemPath2.Loc.String(),
|
||||||
ItemInfo: details.ItemInfo{
|
ItemInfo: details.ItemInfo{
|
||||||
Exchange: &details.ExchangeInfo{
|
Exchange: &details.ExchangeInfo{
|
||||||
ItemType: details.ExchangeContact,
|
ItemType: details.ExchangeContact,
|
||||||
@ -228,11 +228,10 @@ var (
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
ExchangeEventsRootPath = mustPathRep("tenant-id/exchange/user-id/events/holidays", false)
|
ExchangeEventsBasePath = mustPathRep("tenant-id/exchange/user-id/events/holidays", false)
|
||||||
ExchangeEventsBasePath = ExchangeEventsRootPath.mustAppend("holidays", false)
|
ExchangeEventsBasePath2 = mustPathRep("tenant-id/exchange/user-id/events/moreholidays", false)
|
||||||
ExchangeEventsBasePath2 = ExchangeEventsRootPath.mustAppend("moreholidays", false)
|
ExchangeEventsItemPath1 = ExchangeEventsBasePath.MustAppend(ItemName1, true)
|
||||||
ExchangeEventsItemPath1 = ExchangeEventsBasePath.mustAppend(ItemName1, true)
|
ExchangeEventsItemPath2 = ExchangeEventsBasePath2.MustAppend(ItemName2, true)
|
||||||
ExchangeEventsItemPath2 = ExchangeEventsBasePath2.mustAppend(ItemName2, true)
|
|
||||||
|
|
||||||
ExchangeEventsItems = []details.Entry{
|
ExchangeEventsItems = []details.Entry{
|
||||||
{
|
{
|
||||||
@ -240,7 +239,7 @@ var (
|
|||||||
ShortRef: ExchangeEventsItemPath1.RR.ShortRef(),
|
ShortRef: ExchangeEventsItemPath1.RR.ShortRef(),
|
||||||
ParentRef: ExchangeEventsItemPath1.RR.ToBuilder().Dir().ShortRef(),
|
ParentRef: ExchangeEventsItemPath1.RR.ToBuilder().Dir().ShortRef(),
|
||||||
ItemRef: ExchangeEventsItemPath1.ItemLocation(),
|
ItemRef: ExchangeEventsItemPath1.ItemLocation(),
|
||||||
LocationRef: ExchangeEventsItemPath1.loc.String(),
|
LocationRef: ExchangeEventsItemPath1.Loc.String(),
|
||||||
ItemInfo: details.ItemInfo{
|
ItemInfo: details.ItemInfo{
|
||||||
Exchange: &details.ExchangeInfo{
|
Exchange: &details.ExchangeInfo{
|
||||||
ItemType: details.ExchangeEvent,
|
ItemType: details.ExchangeEvent,
|
||||||
@ -256,7 +255,7 @@ var (
|
|||||||
ShortRef: ExchangeEventsItemPath2.RR.ShortRef(),
|
ShortRef: ExchangeEventsItemPath2.RR.ShortRef(),
|
||||||
ParentRef: ExchangeEventsItemPath2.RR.ToBuilder().Dir().ShortRef(),
|
ParentRef: ExchangeEventsItemPath2.RR.ToBuilder().Dir().ShortRef(),
|
||||||
ItemRef: ExchangeEventsItemPath2.ItemLocation(),
|
ItemRef: ExchangeEventsItemPath2.ItemLocation(),
|
||||||
LocationRef: ExchangeEventsItemPath2.loc.String(),
|
LocationRef: ExchangeEventsItemPath2.Loc.String(),
|
||||||
ItemInfo: details.ItemInfo{
|
ItemInfo: details.ItemInfo{
|
||||||
Exchange: &details.ExchangeInfo{
|
Exchange: &details.ExchangeInfo{
|
||||||
ItemType: details.ExchangeEvent,
|
ItemType: details.ExchangeEvent,
|
||||||
@ -270,17 +269,17 @@ var (
|
|||||||
}
|
}
|
||||||
|
|
||||||
OneDriveRootPath = mustPathRep("tenant-id/onedrive/user-id/files/drives/foo/root:", false)
|
OneDriveRootPath = mustPathRep("tenant-id/onedrive/user-id/files/drives/foo/root:", false)
|
||||||
OneDriveFolderPath = OneDriveRootPath.mustAppend("folder", false)
|
OneDriveFolderPath = OneDriveRootPath.MustAppend("folder", false)
|
||||||
OneDriveBasePath1 = OneDriveFolderPath.mustAppend("a", false)
|
OneDriveBasePath1 = OneDriveFolderPath.MustAppend("a", false)
|
||||||
OneDriveBasePath2 = OneDriveFolderPath.mustAppend("b", false)
|
OneDriveBasePath2 = OneDriveFolderPath.MustAppend("b", false)
|
||||||
|
|
||||||
OneDriveItemPath1 = OneDriveFolderPath.mustAppend(ItemName1, true)
|
OneDriveItemPath1 = OneDriveFolderPath.MustAppend(ItemName1, true)
|
||||||
OneDriveItemPath2 = OneDriveBasePath1.mustAppend(ItemName2, true)
|
OneDriveItemPath2 = OneDriveBasePath1.MustAppend(ItemName2, true)
|
||||||
OneDriveItemPath3 = OneDriveBasePath2.mustAppend(ItemName3, true)
|
OneDriveItemPath3 = OneDriveBasePath2.MustAppend(ItemName3, true)
|
||||||
|
|
||||||
OneDriveFolderFolder = OneDriveFolderPath.loc.PopFront().String()
|
OneDriveFolderFolder = OneDriveFolderPath.Loc.PopFront().String()
|
||||||
OneDriveParentFolder1 = OneDriveBasePath1.loc.PopFront().String()
|
OneDriveParentFolder1 = OneDriveBasePath1.Loc.PopFront().String()
|
||||||
OneDriveParentFolder2 = OneDriveBasePath2.loc.PopFront().String()
|
OneDriveParentFolder2 = OneDriveBasePath2.Loc.PopFront().String()
|
||||||
|
|
||||||
OneDriveItems = []details.Entry{
|
OneDriveItems = []details.Entry{
|
||||||
{
|
{
|
||||||
@ -288,7 +287,7 @@ var (
|
|||||||
ShortRef: OneDriveItemPath1.RR.ShortRef(),
|
ShortRef: OneDriveItemPath1.RR.ShortRef(),
|
||||||
ParentRef: OneDriveItemPath1.RR.ToBuilder().Dir().ShortRef(),
|
ParentRef: OneDriveItemPath1.RR.ToBuilder().Dir().ShortRef(),
|
||||||
ItemRef: OneDriveItemPath1.ItemLocation(),
|
ItemRef: OneDriveItemPath1.ItemLocation(),
|
||||||
LocationRef: OneDriveItemPath1.loc.String(),
|
LocationRef: OneDriveItemPath1.Loc.String(),
|
||||||
ItemInfo: details.ItemInfo{
|
ItemInfo: details.ItemInfo{
|
||||||
OneDrive: &details.OneDriveInfo{
|
OneDrive: &details.OneDriveInfo{
|
||||||
ItemType: details.OneDriveItem,
|
ItemType: details.OneDriveItem,
|
||||||
@ -306,7 +305,7 @@ var (
|
|||||||
ShortRef: OneDriveItemPath2.RR.ShortRef(),
|
ShortRef: OneDriveItemPath2.RR.ShortRef(),
|
||||||
ParentRef: OneDriveItemPath2.RR.ToBuilder().Dir().ShortRef(),
|
ParentRef: OneDriveItemPath2.RR.ToBuilder().Dir().ShortRef(),
|
||||||
ItemRef: OneDriveItemPath2.ItemLocation(),
|
ItemRef: OneDriveItemPath2.ItemLocation(),
|
||||||
LocationRef: OneDriveItemPath2.loc.String(),
|
LocationRef: OneDriveItemPath2.Loc.String(),
|
||||||
ItemInfo: details.ItemInfo{
|
ItemInfo: details.ItemInfo{
|
||||||
OneDrive: &details.OneDriveInfo{
|
OneDrive: &details.OneDriveInfo{
|
||||||
ItemType: details.OneDriveItem,
|
ItemType: details.OneDriveItem,
|
||||||
@ -324,7 +323,7 @@ var (
|
|||||||
ShortRef: OneDriveItemPath3.RR.ShortRef(),
|
ShortRef: OneDriveItemPath3.RR.ShortRef(),
|
||||||
ParentRef: OneDriveItemPath3.RR.ToBuilder().Dir().ShortRef(),
|
ParentRef: OneDriveItemPath3.RR.ToBuilder().Dir().ShortRef(),
|
||||||
ItemRef: OneDriveItemPath3.ItemLocation(),
|
ItemRef: OneDriveItemPath3.ItemLocation(),
|
||||||
LocationRef: OneDriveItemPath3.loc.String(),
|
LocationRef: OneDriveItemPath3.Loc.String(),
|
||||||
ItemInfo: details.ItemInfo{
|
ItemInfo: details.ItemInfo{
|
||||||
OneDrive: &details.OneDriveInfo{
|
OneDrive: &details.OneDriveInfo{
|
||||||
ItemType: details.OneDriveItem,
|
ItemType: details.OneDriveItem,
|
||||||
@ -340,17 +339,17 @@ var (
|
|||||||
}
|
}
|
||||||
|
|
||||||
SharePointRootPath = mustPathRep("tenant-id/sharepoint/site-id/libraries/drives/foo/root:", false)
|
SharePointRootPath = mustPathRep("tenant-id/sharepoint/site-id/libraries/drives/foo/root:", false)
|
||||||
SharePointLibraryPath = SharePointRootPath.mustAppend("library", false)
|
SharePointLibraryPath = SharePointRootPath.MustAppend("library", false)
|
||||||
SharePointBasePath1 = SharePointLibraryPath.mustAppend("a", false)
|
SharePointBasePath1 = SharePointLibraryPath.MustAppend("a", false)
|
||||||
SharePointBasePath2 = SharePointLibraryPath.mustAppend("b", false)
|
SharePointBasePath2 = SharePointLibraryPath.MustAppend("b", false)
|
||||||
|
|
||||||
SharePointLibraryItemPath1 = SharePointLibraryPath.mustAppend(ItemName1, true)
|
SharePointLibraryItemPath1 = SharePointLibraryPath.MustAppend(ItemName1, true)
|
||||||
SharePointLibraryItemPath2 = SharePointBasePath1.mustAppend(ItemName2, true)
|
SharePointLibraryItemPath2 = SharePointBasePath1.MustAppend(ItemName2, true)
|
||||||
SharePointLibraryItemPath3 = SharePointBasePath2.mustAppend(ItemName3, true)
|
SharePointLibraryItemPath3 = SharePointBasePath2.MustAppend(ItemName3, true)
|
||||||
|
|
||||||
SharePointLibraryFolder = SharePointLibraryPath.loc.PopFront().String()
|
SharePointLibraryFolder = SharePointLibraryPath.Loc.PopFront().String()
|
||||||
SharePointParentLibrary1 = SharePointBasePath1.loc.PopFront().String()
|
SharePointParentLibrary1 = SharePointBasePath1.Loc.PopFront().String()
|
||||||
SharePointParentLibrary2 = SharePointBasePath2.loc.PopFront().String()
|
SharePointParentLibrary2 = SharePointBasePath2.Loc.PopFront().String()
|
||||||
|
|
||||||
SharePointLibraryItems = []details.Entry{
|
SharePointLibraryItems = []details.Entry{
|
||||||
{
|
{
|
||||||
@ -358,7 +357,7 @@ var (
|
|||||||
ShortRef: SharePointLibraryItemPath1.RR.ShortRef(),
|
ShortRef: SharePointLibraryItemPath1.RR.ShortRef(),
|
||||||
ParentRef: SharePointLibraryItemPath1.RR.ToBuilder().Dir().ShortRef(),
|
ParentRef: SharePointLibraryItemPath1.RR.ToBuilder().Dir().ShortRef(),
|
||||||
ItemRef: SharePointLibraryItemPath1.ItemLocation(),
|
ItemRef: SharePointLibraryItemPath1.ItemLocation(),
|
||||||
LocationRef: SharePointLibraryItemPath1.loc.String(),
|
LocationRef: SharePointLibraryItemPath1.Loc.String(),
|
||||||
ItemInfo: details.ItemInfo{
|
ItemInfo: details.ItemInfo{
|
||||||
SharePoint: &details.SharePointInfo{
|
SharePoint: &details.SharePointInfo{
|
||||||
ItemType: details.SharePointLibrary,
|
ItemType: details.SharePointLibrary,
|
||||||
@ -376,7 +375,7 @@ var (
|
|||||||
ShortRef: SharePointLibraryItemPath2.RR.ShortRef(),
|
ShortRef: SharePointLibraryItemPath2.RR.ShortRef(),
|
||||||
ParentRef: SharePointLibraryItemPath2.RR.ToBuilder().Dir().ShortRef(),
|
ParentRef: SharePointLibraryItemPath2.RR.ToBuilder().Dir().ShortRef(),
|
||||||
ItemRef: SharePointLibraryItemPath2.ItemLocation(),
|
ItemRef: SharePointLibraryItemPath2.ItemLocation(),
|
||||||
LocationRef: SharePointLibraryItemPath2.loc.String(),
|
LocationRef: SharePointLibraryItemPath2.Loc.String(),
|
||||||
ItemInfo: details.ItemInfo{
|
ItemInfo: details.ItemInfo{
|
||||||
SharePoint: &details.SharePointInfo{
|
SharePoint: &details.SharePointInfo{
|
||||||
ItemType: details.SharePointLibrary,
|
ItemType: details.SharePointLibrary,
|
||||||
@ -394,7 +393,7 @@ var (
|
|||||||
ShortRef: SharePointLibraryItemPath3.RR.ShortRef(),
|
ShortRef: SharePointLibraryItemPath3.RR.ShortRef(),
|
||||||
ParentRef: SharePointLibraryItemPath3.RR.ToBuilder().Dir().ShortRef(),
|
ParentRef: SharePointLibraryItemPath3.RR.ToBuilder().Dir().ShortRef(),
|
||||||
ItemRef: SharePointLibraryItemPath3.ItemLocation(),
|
ItemRef: SharePointLibraryItemPath3.ItemLocation(),
|
||||||
LocationRef: SharePointLibraryItemPath3.loc.String(),
|
LocationRef: SharePointLibraryItemPath3.Loc.String(),
|
||||||
ItemInfo: details.ItemInfo{
|
ItemInfo: details.ItemInfo{
|
||||||
SharePoint: &details.SharePointInfo{
|
SharePoint: &details.SharePointInfo{
|
||||||
ItemType: details.SharePointLibrary,
|
ItemType: details.SharePointLibrary,
|
||||||
|
|||||||
@ -34,20 +34,20 @@ const (
|
|||||||
|
|
||||||
// flag names
|
// flag names
|
||||||
const (
|
const (
|
||||||
DebugAPIFN = "debug-api-calls"
|
DebugAPIFN = "debug-api-calls"
|
||||||
LogFileFN = "log-file"
|
LogFileFN = "log-file"
|
||||||
LogLevelFN = "log-level"
|
LogLevelFN = "log-level"
|
||||||
ReadableLogsFN = "readable-logs"
|
ReadableLogsFN = "readable-logs"
|
||||||
SensitiveInfoFN = "sensitive-info"
|
MaskSensitiveDataFN = "mask-sensitive-data"
|
||||||
)
|
)
|
||||||
|
|
||||||
// flag values
|
// flag values
|
||||||
var (
|
var (
|
||||||
DebugAPIFV bool
|
DebugAPIFV bool
|
||||||
logFileFV = ""
|
logFileFV = ""
|
||||||
LogLevelFV = "info"
|
LogLevelFV = "info"
|
||||||
ReadableLogsFV bool
|
ReadableLogsFV bool
|
||||||
SensitiveInfoFV = PIIPlainText
|
MaskSensitiveDataFV bool
|
||||||
|
|
||||||
LogFile string // logFileFV after processing
|
LogFile string // logFileFV after processing
|
||||||
)
|
)
|
||||||
@ -83,9 +83,6 @@ func AddLoggingFlags(cmd *cobra.Command) {
|
|||||||
|
|
||||||
//nolint:errcheck
|
//nolint:errcheck
|
||||||
fs.MarkHidden(ReadableLogsFN)
|
fs.MarkHidden(ReadableLogsFN)
|
||||||
// TODO(keepers): unhide when we have sufficient/complete coverage of PII handling
|
|
||||||
//nolint:errcheck
|
|
||||||
fs.MarkHidden(SensitiveInfoFN)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// internal deduplication for adding flags
|
// internal deduplication for adding flags
|
||||||
@ -106,11 +103,11 @@ func addFlags(fs *pflag.FlagSet, defaultFile string) {
|
|||||||
false,
|
false,
|
||||||
"minimizes log output for console readability: removes the file and date, colors the level")
|
"minimizes log output for console readability: removes the file and date, colors the level")
|
||||||
|
|
||||||
fs.StringVar(
|
fs.BoolVar(
|
||||||
&SensitiveInfoFV,
|
&MaskSensitiveDataFV,
|
||||||
SensitiveInfoFN,
|
MaskSensitiveDataFN,
|
||||||
PIIPlainText,
|
false,
|
||||||
fmt.Sprintf("set the format for sensitive info in logs to %s|%s|%s", PIIHash, PIIMask, PIIPlainText))
|
"anonymize personal data in log output")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Settings records the user's preferred logging settings.
|
// Settings records the user's preferred logging settings.
|
||||||
@ -136,7 +133,7 @@ func PreloadLoggingFlags(args []string) Settings {
|
|||||||
ls := Settings{
|
ls := Settings{
|
||||||
File: "",
|
File: "",
|
||||||
Level: LogLevelFV,
|
Level: LogLevelFV,
|
||||||
PIIHandling: SensitiveInfoFV,
|
PIIHandling: PIIPlainText,
|
||||||
}
|
}
|
||||||
|
|
||||||
// parse the os args list to find the log level flag
|
// parse the os args list to find the log level flag
|
||||||
@ -144,6 +141,10 @@ func PreloadLoggingFlags(args []string) Settings {
|
|||||||
return ls
|
return ls
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if MaskSensitiveDataFV {
|
||||||
|
ls.PIIHandling = PIIHash
|
||||||
|
}
|
||||||
|
|
||||||
// retrieve the user's preferred log level
|
// retrieve the user's preferred log level
|
||||||
// automatically defaults to "info"
|
// automatically defaults to "info"
|
||||||
levelString, err := fs.GetString(LogLevelFN)
|
levelString, err := fs.GetString(LogLevelFN)
|
||||||
@ -165,7 +166,7 @@ func PreloadLoggingFlags(args []string) Settings {
|
|||||||
|
|
||||||
// retrieve the user's preferred PII handling algorithm
|
// retrieve the user's preferred PII handling algorithm
|
||||||
// automatically defaults to default log location
|
// automatically defaults to default log location
|
||||||
pii, err := fs.GetString(SensitiveInfoFN)
|
pii, err := fs.GetString(MaskSensitiveDataFN)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ls
|
return ls
|
||||||
}
|
}
|
||||||
|
|||||||
@ -33,7 +33,7 @@ func (suite *LoggerUnitSuite) TestAddLoggingFlags() {
|
|||||||
assert.True(t, logger.DebugAPIFV, logger.DebugAPIFN)
|
assert.True(t, logger.DebugAPIFV, logger.DebugAPIFN)
|
||||||
assert.True(t, logger.ReadableLogsFV, logger.ReadableLogsFN)
|
assert.True(t, logger.ReadableLogsFV, logger.ReadableLogsFN)
|
||||||
assert.Equal(t, logger.LLError, logger.LogLevelFV, logger.LogLevelFN)
|
assert.Equal(t, logger.LLError, logger.LogLevelFV, logger.LogLevelFN)
|
||||||
assert.Equal(t, logger.PIIMask, logger.SensitiveInfoFV, logger.SensitiveInfoFN)
|
assert.True(t, logger.MaskSensitiveDataFV, logger.MaskSensitiveDataFN)
|
||||||
// empty assertion here, instead of matching "log-file", because the LogFile
|
// empty assertion here, instead of matching "log-file", because the LogFile
|
||||||
// var isn't updated by running the command (this is expected and correct),
|
// var isn't updated by running the command (this is expected and correct),
|
||||||
// while the logFileFV remains unexported.
|
// while the logFileFV remains unexported.
|
||||||
@ -50,7 +50,7 @@ func (suite *LoggerUnitSuite) TestAddLoggingFlags() {
|
|||||||
"--" + logger.LogFileFN, "log-file",
|
"--" + logger.LogFileFN, "log-file",
|
||||||
"--" + logger.LogLevelFN, logger.LLError,
|
"--" + logger.LogLevelFN, logger.LLError,
|
||||||
"--" + logger.ReadableLogsFN,
|
"--" + logger.ReadableLogsFN,
|
||||||
"--" + logger.SensitiveInfoFN, logger.PIIMask,
|
"--" + logger.MaskSensitiveDataFN,
|
||||||
})
|
})
|
||||||
|
|
||||||
err := cmd.Execute()
|
err := cmd.Execute()
|
||||||
@ -68,7 +68,7 @@ func (suite *LoggerUnitSuite) TestPreloadLoggingFlags() {
|
|||||||
"--" + logger.LogFileFN, "log-file",
|
"--" + logger.LogFileFN, "log-file",
|
||||||
"--" + logger.LogLevelFN, logger.LLError,
|
"--" + logger.LogLevelFN, logger.LLError,
|
||||||
"--" + logger.ReadableLogsFN,
|
"--" + logger.ReadableLogsFN,
|
||||||
"--" + logger.SensitiveInfoFN, logger.PIIMask,
|
"--" + logger.MaskSensitiveDataFN,
|
||||||
}
|
}
|
||||||
|
|
||||||
settings := logger.PreloadLoggingFlags(args)
|
settings := logger.PreloadLoggingFlags(args)
|
||||||
@ -77,5 +77,5 @@ func (suite *LoggerUnitSuite) TestPreloadLoggingFlags() {
|
|||||||
assert.True(t, logger.ReadableLogsFV, logger.ReadableLogsFN)
|
assert.True(t, logger.ReadableLogsFV, logger.ReadableLogsFN)
|
||||||
assert.Equal(t, "log-file", settings.File, "settings.File")
|
assert.Equal(t, "log-file", settings.File, "settings.File")
|
||||||
assert.Equal(t, logger.LLError, settings.Level, "settings.Level")
|
assert.Equal(t, logger.LLError, settings.Level, "settings.Level")
|
||||||
assert.Equal(t, logger.PIIMask, settings.PIIHandling, "settings.PIIHandling")
|
assert.Equal(t, logger.PIIHash, settings.PIIHandling, "settings.PIIHandling")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -38,3 +38,13 @@ func GetDriveFolderPath(p Path) (string, error) {
|
|||||||
|
|
||||||
return Builder{}.Append(drivePath.Folders...).String(), nil
|
return Builder{}.Append(drivePath.Folders...).String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BuildDriveLocation takes a driveID and a set of unescaped element names,
|
||||||
|
// including the root folder, and returns a *path.Builder containing the
|
||||||
|
// canonical path representation for the drive path.
|
||||||
|
func BuildDriveLocation(
|
||||||
|
driveID string,
|
||||||
|
unescapedElements ...string,
|
||||||
|
) *Builder {
|
||||||
|
return Builder{}.Append("drives", driveID).Append(unescapedElements...)
|
||||||
|
}
|
||||||
|
|||||||
@ -1,6 +1,7 @@
|
|||||||
package path_test
|
package path_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
@ -8,6 +9,7 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
odConsts "github.com/alcionai/corso/src/internal/connector/onedrive/consts"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
@ -21,8 +23,6 @@ func TestOneDrivePathSuite(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (suite *OneDrivePathSuite) Test_ToOneDrivePath() {
|
func (suite *OneDrivePathSuite) Test_ToOneDrivePath() {
|
||||||
const root = "root:"
|
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
pathElements []string
|
pathElements []string
|
||||||
@ -31,20 +31,28 @@ func (suite *OneDrivePathSuite) Test_ToOneDrivePath() {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "Not enough path elements",
|
name: "Not enough path elements",
|
||||||
pathElements: []string{"drive", "driveID"},
|
pathElements: []string{odConsts.DrivesPathDir, "driveID"},
|
||||||
errCheck: assert.Error,
|
errCheck: assert.Error,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Root path",
|
name: "Root path",
|
||||||
pathElements: []string{"drive", "driveID", root},
|
pathElements: []string{odConsts.DrivesPathDir, "driveID", odConsts.RootPathDir},
|
||||||
expected: &path.DrivePath{DriveID: "driveID", Root: root, Folders: []string{}},
|
expected: &path.DrivePath{
|
||||||
errCheck: assert.NoError,
|
DriveID: "driveID",
|
||||||
|
Root: odConsts.RootPathDir,
|
||||||
|
Folders: []string{},
|
||||||
|
},
|
||||||
|
errCheck: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Deeper path",
|
name: "Deeper path",
|
||||||
pathElements: []string{"drive", "driveID", root, "folder1", "folder2"},
|
pathElements: []string{odConsts.DrivesPathDir, "driveID", odConsts.RootPathDir, "folder1", "folder2"},
|
||||||
expected: &path.DrivePath{DriveID: "driveID", Root: root, Folders: []string{"folder1", "folder2"}},
|
expected: &path.DrivePath{
|
||||||
errCheck: assert.NoError,
|
DriveID: "driveID",
|
||||||
|
Root: odConsts.RootPathDir,
|
||||||
|
Folders: []string{"folder1", "folder2"},
|
||||||
|
},
|
||||||
|
errCheck: assert.NoError,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
@ -63,3 +71,49 @@ func (suite *OneDrivePathSuite) Test_ToOneDrivePath() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (suite *OneDrivePathSuite) TestFormatDriveFolders() {
|
||||||
|
const (
|
||||||
|
driveID = "some-drive-id"
|
||||||
|
drivePrefix = "drives/" + driveID
|
||||||
|
)
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
input []string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "normal",
|
||||||
|
input: []string{
|
||||||
|
"root:",
|
||||||
|
"foo",
|
||||||
|
"bar",
|
||||||
|
},
|
||||||
|
expected: strings.Join(
|
||||||
|
append([]string{drivePrefix}, "root:", "foo", "bar"),
|
||||||
|
"/"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "has character that would be escaped",
|
||||||
|
input: []string{
|
||||||
|
"root:",
|
||||||
|
"foo/",
|
||||||
|
"bar",
|
||||||
|
},
|
||||||
|
// Element "foo/" should end up escaped in the string output.
|
||||||
|
expected: strings.Join(
|
||||||
|
append([]string{drivePrefix}, "root:", `foo\/`, "bar"),
|
||||||
|
"/"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
assert.Equal(
|
||||||
|
suite.T(),
|
||||||
|
test.expected,
|
||||||
|
path.BuildDriveLocation(driveID, test.input...).String())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -86,3 +86,12 @@ func (el Elements) String() string {
|
|||||||
func (el Elements) PlainString() string {
|
func (el Elements) PlainString() string {
|
||||||
return join(el)
|
return join(el)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Last returns the last element. Returns "" if empty.
|
||||||
|
func (el Elements) Last() string {
|
||||||
|
if len(el) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return el[len(el)-1]
|
||||||
|
}
|
||||||
|
|||||||
@ -85,7 +85,7 @@ type Path interface {
|
|||||||
Category() CategoryType
|
Category() CategoryType
|
||||||
Tenant() string
|
Tenant() string
|
||||||
ResourceOwner() string
|
ResourceOwner() string
|
||||||
Folder(bool) string
|
Folder(escaped bool) string
|
||||||
Folders() Elements
|
Folders() Elements
|
||||||
Item() string
|
Item() string
|
||||||
// UpdateParent updates parent from old to new if the item/folder was
|
// UpdateParent updates parent from old to new if the item/folder was
|
||||||
@ -106,7 +106,9 @@ type Path interface {
|
|||||||
// Append returns a new Path object with the given element added to the end of
|
// Append returns a new Path object with the given element added to the end of
|
||||||
// the old Path if possible. If the old Path is an item Path then Append
|
// the old Path if possible. If the old Path is an item Path then Append
|
||||||
// returns an error.
|
// returns an error.
|
||||||
Append(element string, isItem bool) (Path, error)
|
Append(isItem bool, elems ...string) (Path, error)
|
||||||
|
// AppendItem is a shorthand for Append(true, someItem)
|
||||||
|
AppendItem(item string) (Path, error)
|
||||||
// ShortRef returns a short reference representing this path. The short
|
// ShortRef returns a short reference representing this path. The short
|
||||||
// reference is guaranteed to be unique. No guarantees are made about whether
|
// reference is guaranteed to be unique. No guarantees are made about whether
|
||||||
// a short reference can be converted back into the Path that generated it.
|
// a short reference can be converted back into the Path that generated it.
|
||||||
@ -130,6 +132,13 @@ var (
|
|||||||
_ fmt.Stringer = &Builder{}
|
_ fmt.Stringer = &Builder{}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// RestorePaths denotes the location to find an item in kopia and the path of
|
||||||
|
// the collection to place the item in for restore.
|
||||||
|
type RestorePaths struct {
|
||||||
|
StoragePath Path
|
||||||
|
RestorePath Path
|
||||||
|
}
|
||||||
|
|
||||||
// Builder is a simple path representation that only tracks path elements. It
|
// Builder is a simple path representation that only tracks path elements. It
|
||||||
// can join, escape, and unescape elements. Higher-level packages are expected
|
// can join, escape, and unescape elements. Higher-level packages are expected
|
||||||
// to wrap this struct to build resource-specific contexts (e.x. an
|
// to wrap this struct to build resource-specific contexts (e.x. an
|
||||||
|
|||||||
@ -245,6 +245,26 @@ func (suite *PathUnitSuite) TestAppend() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (suite *PathUnitSuite) TestAppendItem() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
p, err := Build("t", "ro", ExchangeService, EmailCategory, false, "foo", "bar")
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
pb := p.ToBuilder()
|
||||||
|
assert.Equal(t, pb.String(), p.String())
|
||||||
|
|
||||||
|
pb = pb.Append("qux")
|
||||||
|
|
||||||
|
p, err = p.AppendItem("qux")
|
||||||
|
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
assert.Equal(t, pb.String(), p.String())
|
||||||
|
|
||||||
|
_, err = p.AppendItem("fnords")
|
||||||
|
require.Error(t, err, clues.ToCore(err))
|
||||||
|
}
|
||||||
|
|
||||||
func (suite *PathUnitSuite) TestUnescapeAndAppend() {
|
func (suite *PathUnitSuite) TestUnescapeAndAppend() {
|
||||||
table := append(append([]testData{}, genericCases...), basicEscapedInputs...)
|
table := append(append([]testData{}, genericCases...), basicEscapedInputs...)
|
||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
|
|||||||
@ -253,21 +253,25 @@ func (rp dataLayerResourcePath) Dir() (Path, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rp dataLayerResourcePath) Append(
|
func (rp dataLayerResourcePath) Append(
|
||||||
element string,
|
|
||||||
isItem bool,
|
isItem bool,
|
||||||
|
elems ...string,
|
||||||
) (Path, error) {
|
) (Path, error) {
|
||||||
if rp.hasItem {
|
if rp.hasItem {
|
||||||
return nil, clues.New("appending to an item path")
|
return nil, clues.New("appending to an item path")
|
||||||
}
|
}
|
||||||
|
|
||||||
return &dataLayerResourcePath{
|
return &dataLayerResourcePath{
|
||||||
Builder: *rp.Builder.Append(element),
|
Builder: *rp.Builder.Append(elems...),
|
||||||
service: rp.service,
|
service: rp.service,
|
||||||
category: rp.category,
|
category: rp.category,
|
||||||
hasItem: isItem,
|
hasItem: isItem,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (rp dataLayerResourcePath) AppendItem(item string) (Path, error) {
|
||||||
|
return rp.Append(true, item)
|
||||||
|
}
|
||||||
|
|
||||||
func (rp dataLayerResourcePath) ToBuilder() *Builder {
|
func (rp dataLayerResourcePath) ToBuilder() *Builder {
|
||||||
// Safe to directly return the Builder because Builders are immutable.
|
// Safe to directly return the Builder because Builders are immutable.
|
||||||
return &rp.Builder
|
return &rp.Builder
|
||||||
|
|||||||
@ -547,7 +547,7 @@ func (suite *PopulatedDataLayerResourcePath) TestAppend() {
|
|||||||
suite.Run(test.name, func() {
|
suite.Run(test.name, func() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
|
||||||
newPath, err := suite.paths[m.isItem].Append(newElement, test.hasItem)
|
newPath, err := suite.paths[m.isItem].Append(test.hasItem, newElement)
|
||||||
|
|
||||||
// Items don't allow appending.
|
// Items don't allow appending.
|
||||||
if m.isItem {
|
if m.isItem {
|
||||||
|
|||||||
@ -24,6 +24,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/repository"
|
"github.com/alcionai/corso/src/pkg/repository"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
|
selTD "github.com/alcionai/corso/src/pkg/selectors/testdata"
|
||||||
"github.com/alcionai/corso/src/pkg/storage"
|
"github.com/alcionai/corso/src/pkg/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -150,7 +151,7 @@ func runRestoreLoadTest(
|
|||||||
t.Skip("restore load test is toggled off")
|
t.Skip("restore load test is toggled off")
|
||||||
}
|
}
|
||||||
|
|
||||||
dest := tester.DefaultTestRestoreDestination()
|
dest := tester.DefaultTestRestoreDestination("")
|
||||||
|
|
||||||
rst, err := r.NewRestore(ctx, backupID, restSel, dest)
|
rst, err := r.NewRestore(ctx, backupID, restSel, dest)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
@ -541,7 +542,7 @@ func (suite *LoadOneDriveSuite) TestOneDrive() {
|
|||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
bsel := selectors.NewOneDriveBackup(suite.usersUnderTest)
|
bsel := selectors.NewOneDriveBackup(suite.usersUnderTest)
|
||||||
bsel.Include(bsel.AllData())
|
bsel.Include(selTD.OneDriveBackupFolderScope(bsel))
|
||||||
sel := bsel.Selector
|
sel := bsel.Selector
|
||||||
|
|
||||||
runLoadTest(
|
runLoadTest(
|
||||||
@ -588,7 +589,7 @@ func (suite *IndividualLoadOneDriveSuite) TestOneDrive() {
|
|||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
bsel := selectors.NewOneDriveBackup(suite.usersUnderTest)
|
bsel := selectors.NewOneDriveBackup(suite.usersUnderTest)
|
||||||
bsel.Include(bsel.AllData())
|
bsel.Include(selTD.OneDriveBackupFolderScope(bsel))
|
||||||
sel := bsel.Selector
|
sel := bsel.Selector
|
||||||
|
|
||||||
runLoadTest(
|
runLoadTest(
|
||||||
|
|||||||
@ -1,7 +1,9 @@
|
|||||||
package repository_test
|
package repository_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@ -145,6 +147,33 @@ func (suite *RepositoryIntegrationSuite) TestInitialize() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
roleARNEnvKey = "CORSO_TEST_S3_ROLE"
|
||||||
|
roleDuration = time.Minute * 20
|
||||||
|
)
|
||||||
|
|
||||||
|
func (suite *RepositoryIntegrationSuite) TestInitializeWithRole() {
|
||||||
|
if _, ok := os.LookupEnv(roleARNEnvKey); !ok {
|
||||||
|
suite.T().Skip(roleARNEnvKey + " not set")
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, flush := tester.NewContext()
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
st := tester.NewPrefixedS3Storage(suite.T())
|
||||||
|
|
||||||
|
st.Role = os.Getenv(roleARNEnvKey)
|
||||||
|
st.SessionName = "corso-repository-test"
|
||||||
|
st.SessionDuration = roleDuration.String()
|
||||||
|
|
||||||
|
r, err := repository.Initialize(ctx, account.Account{}, st, control.Options{})
|
||||||
|
require.NoError(suite.T(), err)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
r.Close(ctx)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
func (suite *RepositoryIntegrationSuite) TestConnect() {
|
func (suite *RepositoryIntegrationSuite) TestConnect() {
|
||||||
ctx, flush := tester.NewContext()
|
ctx, flush := tester.NewContext()
|
||||||
defer flush()
|
defer flush()
|
||||||
@ -213,7 +242,7 @@ func (suite *RepositoryIntegrationSuite) TestNewRestore() {
|
|||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
|
||||||
acct := tester.NewM365Account(t)
|
acct := tester.NewM365Account(t)
|
||||||
dest := tester.DefaultTestRestoreDestination()
|
dest := tester.DefaultTestRestoreDestination("")
|
||||||
|
|
||||||
// need to initialize the repository before we can test connecting to it.
|
// need to initialize the repository before we can test connecting to it.
|
||||||
st := tester.NewPrefixedS3Storage(t)
|
st := tester.NewPrefixedS3Storage(t)
|
||||||
|
|||||||
@ -10,6 +10,7 @@ import (
|
|||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||||
|
odConsts "github.com/alcionai/corso/src/internal/connector/onedrive/consts"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
@ -315,7 +316,7 @@ func (suite *OneDriveSelectorSuite) TestOneDriveCategory_PathValues() {
|
|||||||
fileName := "file"
|
fileName := "file"
|
||||||
fileID := fileName + "-id"
|
fileID := fileName + "-id"
|
||||||
shortRef := "short"
|
shortRef := "short"
|
||||||
elems := []string{"drive", "driveID", "root:", "dir1.d", "dir2.d", fileID}
|
elems := []string{odConsts.DrivesPathDir, "driveID", odConsts.RootPathDir, "dir1.d", "dir2.d", fileID}
|
||||||
|
|
||||||
filePath, err := path.Build("tenant", "user", path.OneDriveService, path.FilesCategory, true, elems...)
|
filePath, err := path.Build("tenant", "user", path.OneDriveService, path.FilesCategory, true, elems...)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user