Merge branch 'main' into InformOnCleanupFailure
This commit is contained in:
commit
423d1d900f
16
.github/actions/backup-restore-test/action.yml
vendored
16
.github/actions/backup-restore-test/action.yml
vendored
@ -50,9 +50,11 @@ runs:
|
||||
run: |
|
||||
set -euo pipefail
|
||||
./corso restore '${{ inputs.service }}' \
|
||||
--no-stats --hide-progress \
|
||||
--no-stats \
|
||||
--hide-progress \
|
||||
${{ inputs.restore-args }} \
|
||||
--backup '${{ steps.backup.outputs.result }}' 2>&1 |
|
||||
--backup '${{ steps.backup.outputs.result }}' \
|
||||
2>&1 |
|
||||
tee /tmp/corsologs |
|
||||
grep -i -e 'Restoring to folder ' |
|
||||
sed "s/Restoring to folder /result=/" |
|
||||
@ -77,7 +79,9 @@ runs:
|
||||
run: |
|
||||
set -euo pipefail
|
||||
./corso backup list ${{ inputs.service }} \
|
||||
--no-stats --hide-progress 2>&1 |
|
||||
--no-stats \
|
||||
--hide-progress \
|
||||
2>&1 |
|
||||
tee /tmp/corso-backup-list.log
|
||||
|
||||
if ! grep -q ${{ steps.backup.outputs.result }} /tmp/corso-backup-list.log
|
||||
@ -92,8 +96,10 @@ runs:
|
||||
run: |
|
||||
set -euo pipefail
|
||||
./corso backup list ${{ inputs.service }} \
|
||||
--no-stats --hide-progress \
|
||||
--backup "${{ steps.backup.outputs.result }}" 2>&1 |
|
||||
--no-stats \
|
||||
--hide-progress \
|
||||
--backup "${{ steps.backup.outputs.result }}" \
|
||||
2>&1 |
|
||||
tee /tmp/corso-backup-list-item.log
|
||||
|
||||
if ! grep -q ${{ steps.backup.outputs.result }} /tmp/corso-backup-list-item.log
|
||||
|
||||
132
.github/workflows/sanity-test.yaml
vendored
132
.github/workflows/sanity-test.yaml
vendored
@ -27,20 +27,20 @@ jobs:
|
||||
environment: Testing
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
# Need these in the local env so that corso can read them
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_ACCESS_KEY_SECRET }}
|
||||
AZURE_CLIENT_ID: ${{ secrets[needs.SetM365App.outputs.client_id_env] }}
|
||||
AZURE_CLIENT_SECRET: ${{ secrets[needs.SetM365App.outputs.client_secret_env] }}
|
||||
AZURE_TENANT_ID: ${{ secrets.TENANT_ID }}
|
||||
CORSO_BUCKET: ${{ secrets.CI_TESTS_S3_BUCKET }}
|
||||
CORSO_PASSPHRASE: ${{ secrets.INTEGRATION_TEST_CORSO_PASSPHRASE }}
|
||||
# re-used values
|
||||
# don't forget: return to Corso_Test_Sanity_
|
||||
CORSO_LOG_DIR: testlog
|
||||
CORSO_LOG_FILE: testlog/testlogging.log
|
||||
CORSO_PASSPHRASE: ${{ secrets.INTEGRATION_TEST_CORSO_PASSPHRASE }}
|
||||
RESTORE_DEST_PFX: Corso_Test_Sanity_
|
||||
TEST_RESULT: test_results
|
||||
TEST_USER: ${{ github.event.inputs.user != '' && github.event.inputs.user || secrets.CORSO_M365_TEST_USER_ID }}
|
||||
TEST_SITE: ${{ secrets.CORSO_M365_TEST_SITE_URL }}
|
||||
SECONDARY_TEST_USER : ${{ secrets.CORSO_SECONDARY_M365_TEST_USER_ID }}
|
||||
# The default working directory doesn't seem to apply to things without
|
||||
# the 'run' directive. https://stackoverflow.com/a/67845456
|
||||
WORKING_DIR: src
|
||||
@ -87,9 +87,9 @@ jobs:
|
||||
user: ${{ env.TEST_USER }}
|
||||
folder-prefix: ${{ env.RESTORE_DEST_PFX }}
|
||||
older-than: ${{ env.NOW }}
|
||||
azure-client-id: ${{ env.AZURE_CLIENT_ID }}
|
||||
azure-client-secret: ${{ env.AZURE_CLIENT_SECRET }}
|
||||
azure-tenant-id: ${{ env.AZURE_TENANT_ID }}
|
||||
azure-client-id: ${{ secrets[needs.SetM365App.outputs.client_id_env] }}
|
||||
azure-client-secret: ${{ secrets[needs.SetM365App.outputs.client_secret_env] }}
|
||||
azure-tenant-id: ${{ secrets.TENANT_ID }}
|
||||
m365-admin-user: ${{ secrets.M365_TENANT_ADMIN_USER }}
|
||||
m365-admin-password: ${{ secrets.M365_TENANT_ADMIN_PASSWORD }}
|
||||
|
||||
@ -97,13 +97,13 @@ jobs:
|
||||
if: always()
|
||||
uses: ./.github/actions/purge-m365-data
|
||||
with:
|
||||
site: ${{ env.TEST_SITE }}
|
||||
site: ${{ secrets.CORSO_M365_TEST_SITE_URL }}
|
||||
folder-prefix: ${{ env.RESTORE_DEST_PFX }}
|
||||
libraries: ${{ vars.CORSO_M365_TEST_SITE_LIBRARIES }}
|
||||
older-than: ${{ env.NOW }}
|
||||
azure-client-id: ${{ env.AZURE_CLIENT_ID }}
|
||||
azure-client-secret: ${{ env.AZURE_CLIENT_SECRET }}
|
||||
azure-tenant-id: ${{ env.AZURE_TENANT_ID }}
|
||||
azure-client-id: ${{ secrets[needs.SetM365App.outputs.client_id_env] }}
|
||||
azure-client-secret: ${{ secrets[needs.SetM365App.outputs.client_secret_env] }}
|
||||
azure-tenant-id: ${{ secrets.TENANT_ID }}
|
||||
m365-admin-user: ${{ secrets.M365_TENANT_ADMIN_USER }}
|
||||
m365-admin-password: ${{ secrets.M365_TENANT_ADMIN_PASSWORD }}
|
||||
|
||||
@ -122,10 +122,13 @@ jobs:
|
||||
run: |
|
||||
set -euo pipefail
|
||||
prefix=$(date +"%Y-%m-%d-%T")
|
||||
echo -e "\nRepo init test\n" >> ${CORSO_LOG_FILE}
|
||||
echo -e "\nRepo init test\n" >> ${{ env.CORSO_LOG_FILE }}
|
||||
./corso repo init s3 \
|
||||
--no-stats --hide-progress --prefix $prefix \
|
||||
--bucket ${CORSO_BUCKET} 2>&1 | tee $TEST_RESULT/initrepo.txt
|
||||
--no-stats \
|
||||
--hide-progress \
|
||||
--prefix $prefix \
|
||||
--bucket ${{ secrets.CI_TESTS_S3_BUCKET }} \
|
||||
2>&1 | tee $TEST_RESULT/initrepo.txt
|
||||
|
||||
if ! grep -q 'Initialized a S3 repository within bucket' $TEST_RESULT/initrepo.txt
|
||||
then
|
||||
@ -138,10 +141,13 @@ jobs:
|
||||
- name: Repo connect test
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo -e "\nRepo connect test\n" >> ${CORSO_LOG_FILE}
|
||||
echo -e "\nRepo connect test\n" >> ${{ env.CORSO_LOG_FILE }}
|
||||
./corso repo connect s3 \
|
||||
--no-stats --hide-progress --prefix ${{ steps.repo-init.outputs.result }} \
|
||||
--bucket ${CORSO_BUCKET} 2>&1 | tee $TEST_RESULT/connect.txt
|
||||
--no-stats \
|
||||
--hide-progress \
|
||||
--prefix ${{ steps.repo-init.outputs.result }} \
|
||||
--bucket ${{ secrets.CI_TESTS_S3_BUCKET }} \
|
||||
2>&1 | tee $TEST_RESULT/connect.txt
|
||||
|
||||
if ! grep -q 'Connected to S3 bucket' $TEST_RESULT/connect.txt
|
||||
then
|
||||
@ -159,9 +165,9 @@ jobs:
|
||||
working-directory: ./src/cmd/factory
|
||||
run: |
|
||||
go run . exchange emails \
|
||||
--user ${TEST_USER} \
|
||||
--tenant ${AZURE_TENANT_ID} \
|
||||
--destination ${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }} \
|
||||
--user ${{ env.TEST_USER }} \
|
||||
--tenant ${{ secrets.TENANT_ID }} \
|
||||
--destination ${{ env.RESTORE_DEST_PFX }}${{ steps.repo-init.outputs.result }} \
|
||||
--count 4
|
||||
|
||||
- name: Exchange - Backup
|
||||
@ -170,9 +176,9 @@ jobs:
|
||||
with:
|
||||
service: exchange
|
||||
kind: backup
|
||||
backup-args: '--mailbox "${TEST_USER}" --data "email"'
|
||||
restore-args: '--email-folder ${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }}'
|
||||
test-folder: '${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }}'
|
||||
backup-args: '--mailbox "${{ env.TEST_USER }}" --data "email"'
|
||||
restore-args: '--email-folder ${{ env.RESTORE_DEST_PFX }}${{ steps.repo-init.outputs.result }}'
|
||||
test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.repo-init.outputs.result }}'
|
||||
|
||||
- name: Exchange - Incremental backup
|
||||
id: exchange-backup-incremental
|
||||
@ -180,9 +186,9 @@ jobs:
|
||||
with:
|
||||
service: exchange
|
||||
kind: backup-incremental
|
||||
backup-args: '--mailbox "${TEST_USER}" --data "email"'
|
||||
restore-args: '--email-folder ${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }}'
|
||||
test-folder: '${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }}'
|
||||
backup-args: '--mailbox "${{ env.TEST_USER }}" --data "email"'
|
||||
restore-args: '--email-folder ${{ env.RESTORE_DEST_PFX }}${{ steps.repo-init.outputs.result }}'
|
||||
test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.repo-init.outputs.result }}'
|
||||
base-backup: ${{ steps.exchange-backup.outputs.backup-id }}
|
||||
|
||||
- name: Exchange - Non delta backup
|
||||
@ -191,9 +197,9 @@ jobs:
|
||||
with:
|
||||
service: exchange
|
||||
kind: backup-non-delta
|
||||
backup-args: '--mailbox "${TEST_USER}" --data "email" --disable-delta'
|
||||
restore-args: '--email-folder ${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }}'
|
||||
test-folder: '${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }}'
|
||||
backup-args: '--mailbox "${{ env.TEST_USER }}" --data "email" --disable-delta'
|
||||
restore-args: '--email-folder ${{ env.RESTORE_DEST_PFX }}${{ steps.repo-init.outputs.result }}'
|
||||
test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.repo-init.outputs.result }}'
|
||||
base-backup: ${{ steps.exchange-backup.outputs.backup-id }}
|
||||
|
||||
- name: Exchange - Incremental backup after non-delta
|
||||
@ -202,9 +208,9 @@ jobs:
|
||||
with:
|
||||
service: exchange
|
||||
kind: backup-incremental-after-non-delta
|
||||
backup-args: '--mailbox "${TEST_USER}" --data "email"'
|
||||
restore-args: '--email-folder ${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }}'
|
||||
test-folder: '${RESTORE_DEST_PFX}${{ steps.repo-init.outputs.result }}'
|
||||
backup-args: '--mailbox "${{ env.TEST_USER }}" --data "email"'
|
||||
restore-args: '--email-folder ${{ env.RESTORE_DEST_PFX }}${{ steps.repo-init.outputs.result }}'
|
||||
test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.repo-init.outputs.result }}'
|
||||
base-backup: ${{ steps.exchange-backup.outputs.backup-id }}
|
||||
|
||||
|
||||
@ -220,10 +226,10 @@ jobs:
|
||||
suffix=$(date +"%Y-%m-%d_%H-%M-%S")
|
||||
|
||||
go run . onedrive files \
|
||||
--user ${TEST_USER} \
|
||||
--secondaryuser ${SECONDARY_TEST_USER} \
|
||||
--tenant ${AZURE_TENANT_ID} \
|
||||
--destination ${RESTORE_DEST_PFX}$suffix \
|
||||
--user ${{ env.TEST_USER }} \
|
||||
--secondaryuser ${{ env.CORSO_SECONDARY_M365_TEST_USER_ID }} \
|
||||
--tenant ${{ secrets.TENANT_ID }} \
|
||||
--destination ${{ env.RESTORE_DEST_PFX }}$suffix \
|
||||
--count 4
|
||||
|
||||
echo result="${suffix}" >> $GITHUB_OUTPUT
|
||||
@ -234,19 +240,19 @@ jobs:
|
||||
with:
|
||||
service: onedrive
|
||||
kind: backup
|
||||
backup-args: '--user "${TEST_USER}"'
|
||||
restore-args: '--folder ${RESTORE_DEST_PFX}${{ steps.new-data-creation-onedrive.outputs.result }} --restore-permissions'
|
||||
test-folder: '${RESTORE_DEST_PFX}${{ steps.new-data-creation-onedrive.outputs.result }}'
|
||||
backup-args: '--user "${{ env.TEST_USER }}"'
|
||||
restore-args: '--folder ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-onedrive.outputs.result }} --restore-permissions'
|
||||
test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-onedrive.outputs.result }}'
|
||||
|
||||
# generate some more enteries for incremental check
|
||||
- name: OneDrive - Create new data (for incremental)
|
||||
working-directory: ./src/cmd/factory
|
||||
run: |
|
||||
go run . onedrive files \
|
||||
--user ${TEST_USER} \
|
||||
--secondaryuser ${SECONDARY_TEST_USER} \
|
||||
--tenant ${AZURE_TENANT_ID} \
|
||||
--destination ${RESTORE_DEST_PFX}${{ steps.new-data-creation-onedrive.outputs.result }} \
|
||||
--user ${{ env.TEST_USER }} \
|
||||
--secondaryuser ${{ env.CORSO_SECONDARY_M365_TEST_USER_ID }} \
|
||||
--tenant ${{ secrets.TENANT_ID }} \
|
||||
--destination ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-onedrive.outputs.result }} \
|
||||
--count 4
|
||||
|
||||
- name: OneDrive - Incremental backup
|
||||
@ -255,9 +261,9 @@ jobs:
|
||||
with:
|
||||
service: onedrive
|
||||
kind: incremental
|
||||
backup-args: '--user "${TEST_USER}"'
|
||||
restore-args: '--folder ${RESTORE_DEST_PFX}${{ steps.new-data-creation-onedrive.outputs.result }} --restore-permissions'
|
||||
test-folder: '${RESTORE_DEST_PFX}${{ steps.new-data-creation-onedrive.outputs.result }}'
|
||||
backup-args: '--user "${{ env.TEST_USER }}"'
|
||||
restore-args: '--folder ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-onedrive.outputs.result }} --restore-permissions'
|
||||
test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-onedrive.outputs.result }}'
|
||||
|
||||
##########################################################################################################################################
|
||||
|
||||
@ -271,11 +277,11 @@ jobs:
|
||||
suffix=$(date +"%Y-%m-%d_%H-%M-%S")
|
||||
|
||||
go run . sharepoint files \
|
||||
--site ${TEST_SITE} \
|
||||
--user ${TEST_USER} \
|
||||
--secondaryuser ${SECONDARY_TEST_USER} \
|
||||
--tenant ${AZURE_TENANT_ID} \
|
||||
--destination ${RESTORE_DEST_PFX}$suffix \
|
||||
--site ${{ secrets.CORSO_M365_TEST_SITE_URL }} \
|
||||
--user ${{ env.TEST_USER }} \
|
||||
--secondaryuser ${{ env.CORSO_SECONDARY_M365_TEST_USER_ID }} \
|
||||
--tenant ${{ secrets.TENANT_ID }} \
|
||||
--destination ${{ env.RESTORE_DEST_PFX }}$suffix \
|
||||
--count 4
|
||||
|
||||
echo result="${suffix}" >> $GITHUB_OUTPUT
|
||||
@ -286,20 +292,20 @@ jobs:
|
||||
with:
|
||||
service: sharepoint
|
||||
kind: backup
|
||||
backup-args: '--site "${TEST_SITE}"'
|
||||
restore-args: '--folder ${RESTORE_DEST_PFX}${{ steps.new-data-creation-sharepoint.outputs.result }} --restore-permissions'
|
||||
test-folder: '${RESTORE_DEST_PFX}${{ steps.new-data-creation-sharepoint.outputs.result }}'
|
||||
backup-args: '--site "${{ secrets.CORSO_M365_TEST_SITE_URL }}"'
|
||||
restore-args: '--folder ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-sharepoint.outputs.result }} --restore-permissions'
|
||||
test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-sharepoint.outputs.result }}'
|
||||
|
||||
# generate some more enteries for incremental check
|
||||
- name: SharePoint - Create new data (for incremental)
|
||||
working-directory: ./src/cmd/factory
|
||||
run: |
|
||||
go run . sharepoint files \
|
||||
--site ${TEST_SITE} \
|
||||
--user ${TEST_USER} \
|
||||
--secondaryuser ${SECONDARY_TEST_USER} \
|
||||
--tenant ${AZURE_TENANT_ID} \
|
||||
--destination ${RESTORE_DEST_PFX}${{ steps.new-data-creation-sharepoint.outputs.result }} \
|
||||
--site ${{ secrets.CORSO_M365_TEST_SITE_URL }} \
|
||||
--user ${{ env.TEST_USER }} \
|
||||
--secondaryuser ${{ env.CORSO_SECONDARY_M365_TEST_USER_ID }} \
|
||||
--tenant ${{ secrets.TENANT_ID }} \
|
||||
--destination ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-sharepoint.outputs.result }} \
|
||||
--count 4
|
||||
|
||||
- name: SharePoint - Incremental backup
|
||||
@ -308,9 +314,9 @@ jobs:
|
||||
with:
|
||||
service: sharepoint
|
||||
kind: incremental
|
||||
backup-args: '--site "${TEST_SITE}"'
|
||||
restore-args: '--folder ${RESTORE_DEST_PFX}${{ steps.new-data-creation-sharepoint.outputs.result }} --restore-permissions'
|
||||
test-folder: '${RESTORE_DEST_PFX}${{ steps.new-data-creation-sharepoint.outputs.result }}'
|
||||
backup-args: '--site "${{ secrets.CORSO_M365_TEST_SITE_URL }}"'
|
||||
restore-args: '--folder ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-sharepoint.outputs.result }} --restore-permissions'
|
||||
test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-sharepoint.outputs.result }}'
|
||||
|
||||
##########################################################################################################################################
|
||||
|
||||
@ -338,7 +344,7 @@ jobs:
|
||||
- name: Send Github Action failure to Slack
|
||||
id: slack-notification
|
||||
if: failure()
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
uses: slackapi/slack-github-action@v1.24.0
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
|
||||
@ -9,6 +9,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
### Added
|
||||
### Fixed
|
||||
- Fix Exchange folder cache population error when parent folder isn't found.
|
||||
|
||||
### Known Issues
|
||||
|
||||
## [v0.8.0] (beta) - 2023-05-15
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
@ -13,6 +14,8 @@ import (
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/cli/utils/testdata"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata"
|
||||
)
|
||||
|
||||
type ExchangeUnitSuite struct {
|
||||
@ -275,18 +278,26 @@ func (suite *ExchangeUnitSuite) TestExchangeBackupDetailsSelectors() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
for _, test := range testdata.ExchangeOptionDetailLookups {
|
||||
suite.Run(test.Name, func() {
|
||||
t := suite.T()
|
||||
for v := 0; v <= version.Backup; v++ {
|
||||
suite.Run(fmt.Sprintf("version%d", v), func() {
|
||||
for _, test := range testdata.ExchangeOptionDetailLookups {
|
||||
suite.Run(test.Name, func() {
|
||||
t := suite.T()
|
||||
|
||||
output, err := runDetailsExchangeCmd(
|
||||
ctx,
|
||||
test.BackupGetter,
|
||||
"backup-ID",
|
||||
test.Opts,
|
||||
false)
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
assert.ElementsMatch(t, test.Expected, output.Entries)
|
||||
bg := testdata.VersionedBackupGetter{
|
||||
Details: dtd.GetDetailsSetForVersion(t, v),
|
||||
}
|
||||
|
||||
output, err := runDetailsExchangeCmd(
|
||||
ctx,
|
||||
bg,
|
||||
"backup-ID",
|
||||
test.Opts(t, v),
|
||||
false)
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
assert.ElementsMatch(t, test.Expected(t, v), output.Entries)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -303,7 +314,7 @@ func (suite *ExchangeUnitSuite) TestExchangeBackupDetailsSelectorsBadFormats() {
|
||||
ctx,
|
||||
test.BackupGetter,
|
||||
"backup-ID",
|
||||
test.Opts,
|
||||
test.Opts(t, version.Backup),
|
||||
false)
|
||||
assert.Error(t, err, clues.ToCore(err))
|
||||
assert.Empty(t, output)
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
@ -13,6 +14,8 @@ import (
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/cli/utils/testdata"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata"
|
||||
)
|
||||
|
||||
type OneDriveUnitSuite struct {
|
||||
@ -137,18 +140,26 @@ func (suite *OneDriveUnitSuite) TestOneDriveBackupDetailsSelectors() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
for _, test := range testdata.OneDriveOptionDetailLookups {
|
||||
suite.Run(test.Name, func() {
|
||||
t := suite.T()
|
||||
for v := 0; v <= version.Backup; v++ {
|
||||
suite.Run(fmt.Sprintf("version%d", v), func() {
|
||||
for _, test := range testdata.OneDriveOptionDetailLookups {
|
||||
suite.Run(test.Name, func() {
|
||||
t := suite.T()
|
||||
|
||||
output, err := runDetailsOneDriveCmd(
|
||||
ctx,
|
||||
test.BackupGetter,
|
||||
"backup-ID",
|
||||
test.Opts,
|
||||
false)
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
assert.ElementsMatch(t, test.Expected, output.Entries)
|
||||
bg := testdata.VersionedBackupGetter{
|
||||
Details: dtd.GetDetailsSetForVersion(t, v),
|
||||
}
|
||||
|
||||
output, err := runDetailsOneDriveCmd(
|
||||
ctx,
|
||||
bg,
|
||||
"backup-ID",
|
||||
test.Opts(t, v),
|
||||
false)
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
assert.ElementsMatch(t, test.Expected(t, v), output.Entries)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -165,7 +176,7 @@ func (suite *OneDriveUnitSuite) TestOneDriveBackupDetailsSelectorsBadFormats() {
|
||||
ctx,
|
||||
test.BackupGetter,
|
||||
"backup-ID",
|
||||
test.Opts,
|
||||
test.Opts(t, version.Backup),
|
||||
false)
|
||||
assert.Error(t, err, clues.ToCore(err))
|
||||
assert.Empty(t, output)
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
@ -14,6 +15,8 @@ import (
|
||||
"github.com/alcionai/corso/src/cli/utils/testdata"
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
)
|
||||
|
||||
@ -256,18 +259,26 @@ func (suite *SharePointUnitSuite) TestSharePointBackupDetailsSelectors() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
for _, test := range testdata.SharePointOptionDetailLookups {
|
||||
suite.Run(test.Name, func() {
|
||||
t := suite.T()
|
||||
for v := 0; v <= version.Backup; v++ {
|
||||
suite.Run(fmt.Sprintf("version%d", v), func() {
|
||||
for _, test := range testdata.SharePointOptionDetailLookups {
|
||||
suite.Run(test.Name, func() {
|
||||
t := suite.T()
|
||||
|
||||
output, err := runDetailsSharePointCmd(
|
||||
ctx,
|
||||
test.BackupGetter,
|
||||
"backup-ID",
|
||||
test.Opts,
|
||||
false)
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
assert.ElementsMatch(t, test.Expected, output.Entries)
|
||||
bg := testdata.VersionedBackupGetter{
|
||||
Details: dtd.GetDetailsSetForVersion(t, v),
|
||||
}
|
||||
|
||||
output, err := runDetailsSharePointCmd(
|
||||
ctx,
|
||||
bg,
|
||||
"backup-ID",
|
||||
test.Opts(t, v),
|
||||
false)
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
assert.ElementsMatch(t, test.Expected(t, v), output.Entries)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -284,7 +295,7 @@ func (suite *SharePointUnitSuite) TestSharePointBackupDetailsSelectorsBadFormats
|
||||
ctx,
|
||||
test.BackupGetter,
|
||||
"backup-ID",
|
||||
test.Opts,
|
||||
test.Opts(t, version.Backup),
|
||||
false)
|
||||
assert.Error(t, err, clues.ToCore(err))
|
||||
assert.Empty(t, output)
|
||||
|
||||
@ -6,7 +6,7 @@ import (
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common"
|
||||
"github.com/alcionai/corso/src/internal/common/str"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/credentials"
|
||||
)
|
||||
@ -64,7 +64,7 @@ func configureAccount(
|
||||
|
||||
m365Cfg = account.M365Config{
|
||||
M365: m365,
|
||||
AzureTenantID: common.First(
|
||||
AzureTenantID: str.First(
|
||||
overrides[account.AzureTenantID],
|
||||
m365Cfg.AzureTenantID,
|
||||
os.Getenv(account.AzureTenantID)),
|
||||
|
||||
@ -10,6 +10,7 @@ import (
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common"
|
||||
"github.com/alcionai/corso/src/internal/common/str"
|
||||
"github.com/alcionai/corso/src/pkg/credentials"
|
||||
"github.com/alcionai/corso/src/pkg/storage"
|
||||
)
|
||||
@ -80,14 +81,14 @@ func configureStorage(
|
||||
}
|
||||
|
||||
s3Cfg = storage.S3Config{
|
||||
Bucket: common.First(overrides[storage.Bucket], s3Cfg.Bucket, os.Getenv(storage.BucketKey)),
|
||||
Endpoint: common.First(overrides[storage.Endpoint], s3Cfg.Endpoint, os.Getenv(storage.EndpointKey)),
|
||||
Prefix: common.First(overrides[storage.Prefix], s3Cfg.Prefix, os.Getenv(storage.PrefixKey)),
|
||||
DoNotUseTLS: common.ParseBool(common.First(
|
||||
Bucket: str.First(overrides[storage.Bucket], s3Cfg.Bucket, os.Getenv(storage.BucketKey)),
|
||||
Endpoint: str.First(overrides[storage.Endpoint], s3Cfg.Endpoint, os.Getenv(storage.EndpointKey)),
|
||||
Prefix: str.First(overrides[storage.Prefix], s3Cfg.Prefix, os.Getenv(storage.PrefixKey)),
|
||||
DoNotUseTLS: str.ParseBool(str.First(
|
||||
overrides[storage.DoNotUseTLS],
|
||||
strconv.FormatBool(s3Cfg.DoNotUseTLS),
|
||||
os.Getenv(storage.PrefixKey))),
|
||||
DoNotVerifyTLS: common.ParseBool(common.First(
|
||||
DoNotVerifyTLS: str.ParseBool(str.First(
|
||||
overrides[storage.DoNotVerifyTLS],
|
||||
strconv.FormatBool(s3Cfg.DoNotVerifyTLS),
|
||||
os.Getenv(storage.PrefixKey))),
|
||||
|
||||
1022
src/cli/utils/testdata/opts.go
vendored
1022
src/cli/utils/testdata/opts.go
vendored
File diff suppressed because it is too large
Load Diff
@ -11,10 +11,10 @@ import (
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/alcionai/corso/src/cli/print"
|
||||
"github.com/alcionai/corso/src/internal/common"
|
||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/common/str"
|
||||
"github.com/alcionai/corso/src/internal/connector"
|
||||
exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
@ -116,7 +116,7 @@ func getGCAndVerifyResourceOwner(
|
||||
idname.Provider,
|
||||
error,
|
||||
) {
|
||||
tid := common.First(Tenant, os.Getenv(account.AzureTenantID))
|
||||
tid := str.First(Tenant, os.Getenv(account.AzureTenantID))
|
||||
|
||||
if len(Tenant) == 0 {
|
||||
Tenant = tid
|
||||
|
||||
@ -15,7 +15,8 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/common"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/common/str"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/credentials"
|
||||
@ -54,7 +55,7 @@ func handleExchangeCmd(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
tid := common.First(tenant, os.Getenv(account.AzureTenantID))
|
||||
tid := str.First(tenant, os.Getenv(account.AzureTenantID))
|
||||
|
||||
ctx := clues.Add(
|
||||
cmd.Context(),
|
||||
@ -111,9 +112,7 @@ func runDisplayM365JSON(
|
||||
return err
|
||||
}
|
||||
|
||||
str := string(bs)
|
||||
|
||||
err = sw.WriteStringValue("", &str)
|
||||
err = sw.WriteStringValue("", ptr.To(string(bs)))
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "Error writing string value: "+itemID)
|
||||
}
|
||||
|
||||
@ -19,8 +19,8 @@ import (
|
||||
|
||||
. "github.com/alcionai/corso/src/cli/print"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/common"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/common/str"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/credentials"
|
||||
@ -57,7 +57,7 @@ func handleOneDriveCmd(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
tid := common.First(tenant, os.Getenv(account.AzureTenantID))
|
||||
tid := str.First(tenant, os.Getenv(account.AzureTenantID))
|
||||
|
||||
ctx := clues.Add(
|
||||
cmd.Context(),
|
||||
|
||||
@ -11,8 +11,8 @@ import (
|
||||
|
||||
. "github.com/alcionai/corso/src/cli/print"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/common"
|
||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||
"github.com/alcionai/corso/src/internal/common/str"
|
||||
"github.com/alcionai/corso/src/internal/connector"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/connector/onedrive"
|
||||
@ -263,7 +263,7 @@ func getGC(ctx context.Context) (account.Account, *connector.GraphConnector, err
|
||||
// get account info
|
||||
m365Cfg := account.M365Config{
|
||||
M365: credentials.GetM365(),
|
||||
AzureTenantID: common.First(tenant, os.Getenv(account.AzureTenantID)),
|
||||
AzureTenantID: str.First(tenant, os.Getenv(account.AzureTenantID)),
|
||||
}
|
||||
|
||||
acct, err := account.NewAccount(account.ProviderM365, m365Cfg)
|
||||
|
||||
@ -8,7 +8,7 @@ require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0
|
||||
github.com/alcionai/clues v0.0.0-20230406223931-f48777f4773c
|
||||
github.com/armon/go-metrics v0.4.1
|
||||
github.com/aws/aws-sdk-go v1.44.264
|
||||
github.com/aws/aws-sdk-go v1.44.266
|
||||
github.com/aws/aws-xray-sdk-go v1.8.1
|
||||
github.com/cenkalti/backoff/v4 v4.2.1
|
||||
github.com/google/uuid v1.3.0
|
||||
@ -19,7 +19,7 @@ require (
|
||||
github.com/microsoft/kiota-http-go v1.0.0
|
||||
github.com/microsoft/kiota-serialization-form-go v1.0.0
|
||||
github.com/microsoft/kiota-serialization-json-go v1.0.0
|
||||
github.com/microsoftgraph/msgraph-sdk-go v1.1.0
|
||||
github.com/microsoftgraph/msgraph-sdk-go v1.2.0
|
||||
github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/rudderlabs/analytics-go v3.3.3+incompatible
|
||||
@ -27,7 +27,7 @@ require (
|
||||
github.com/spf13/cobra v1.7.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/spf13/viper v1.15.0
|
||||
github.com/stretchr/testify v1.8.2
|
||||
github.com/stretchr/testify v1.8.3
|
||||
github.com/tidwall/pretty v1.2.1
|
||||
github.com/tomlazar/table v0.1.2
|
||||
github.com/vbauerster/mpb/v8 v8.1.6
|
||||
@ -117,7 +117,7 @@ require (
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.8.0 // indirect
|
||||
golang.org/x/mod v0.10.0 // indirect
|
||||
golang.org/x/net v0.10.0 // indirect
|
||||
golang.org/x/net v0.10.0
|
||||
golang.org/x/sync v0.2.0 // indirect
|
||||
golang.org/x/sys v0.8.0 // indirect
|
||||
golang.org/x/text v0.9.0 // indirect
|
||||
|
||||
12
src/go.sum
12
src/go.sum
@ -66,8 +66,8 @@ github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY
|
||||
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||
github.com/aws/aws-sdk-go v1.44.264 h1:5klL62ebn6uv3oJ0ixF7K12hKItj8lV3QqWeQPlkFSs=
|
||||
github.com/aws/aws-sdk-go v1.44.264/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go v1.44.266 h1:MWd775dcYf7NrwgcHLtlsIbWoWkX8p4vomfNHr88zH0=
|
||||
github.com/aws/aws-sdk-go v1.44.266/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo=
|
||||
github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
@ -285,8 +285,8 @@ github.com/microsoft/kiota-serialization-json-go v1.0.0 h1:snT+SwS/R4CMjkmj7mjCH
|
||||
github.com/microsoft/kiota-serialization-json-go v1.0.0/go.mod h1:psfgIfqWm/9P1JAdl2cxHHIg9SdEtYHOetfDLIQ5/dw=
|
||||
github.com/microsoft/kiota-serialization-text-go v1.0.0 h1:XOaRhAXy+g8ZVpcq7x7a0jlETWnWrEum0RhmbYrTFnA=
|
||||
github.com/microsoft/kiota-serialization-text-go v1.0.0/go.mod h1:sM1/C6ecnQ7IquQOGUrUldaO5wj+9+v7G2W3sQ3fy6M=
|
||||
github.com/microsoftgraph/msgraph-sdk-go v1.1.0 h1:NtFsFVIt8lpXcTlRbLG1WuCOTzltzS5j+U8Fecqdnr4=
|
||||
github.com/microsoftgraph/msgraph-sdk-go v1.1.0/go.mod h1:NIk9kSn7lQ5Hnhhn3FM4NrJWz54JfDHD0JvhJZky27g=
|
||||
github.com/microsoftgraph/msgraph-sdk-go v1.2.0 h1:SZGcs6aoxUt5mSSNlgKe7j4N2BhKN1w2DzYBEMLtMCc=
|
||||
github.com/microsoftgraph/msgraph-sdk-go v1.2.0/go.mod h1:NIk9kSn7lQ5Hnhhn3FM4NrJWz54JfDHD0JvhJZky27g=
|
||||
github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0 h1:7NWTfyXvOjoizW7PmxNp3+8wCKPgpODs/D1cUZ3fkAY=
|
||||
github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0/go.mod h1:tQb4q3YMIj2dWhhXhQSJ4ELpol931ANKzHSYK5kX1qE=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
@ -387,8 +387,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
|
||||
github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
|
||||
github.com/tg123/go-htpasswd v1.2.1 h1:i4wfsX1KvvkyoMiHZzjS0VzbAPWfxzI8INcZAKtutoU=
|
||||
|
||||
@ -1,23 +0,0 @@
|
||||
package common
|
||||
|
||||
// TODO: can be replaced with slices.Contains()
|
||||
func ContainsString(super []string, sub string) bool {
|
||||
for _, s := range super {
|
||||
if s == sub {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// First returns the first non-zero valued string
|
||||
func First(vs ...string) string {
|
||||
for _, v := range vs {
|
||||
if len(v) > 0 {
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
@ -1,30 +0,0 @@
|
||||
package common_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
)
|
||||
|
||||
type CommonSlicesSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestCommonSlicesSuite(t *testing.T) {
|
||||
s := &CommonSlicesSuite{Suite: tester.NewUnitSuite(t)}
|
||||
suite.Run(t, s)
|
||||
}
|
||||
|
||||
func (suite *CommonSlicesSuite) TestContainsString() {
|
||||
t := suite.T()
|
||||
target := "fnords"
|
||||
good := []string{"fnords"}
|
||||
bad := []string{"foo", "bar"}
|
||||
|
||||
assert.True(t, common.ContainsString(good, target))
|
||||
assert.False(t, common.ContainsString(bad, target))
|
||||
}
|
||||
58
src/internal/common/str/str.go
Normal file
58
src/internal/common/str/str.go
Normal file
@ -0,0 +1,58 @@
|
||||
package str
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
)
|
||||
|
||||
// parseBool returns the bool value represented by the string
|
||||
// or false on error
|
||||
func ParseBool(v string) bool {
|
||||
s, err := strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func FromMapToAny(k string, m map[string]any) (string, error) {
|
||||
if len(m) == 0 {
|
||||
return "", clues.New("missing entry").With("map_key", k)
|
||||
}
|
||||
|
||||
return FromAny(m[k])
|
||||
}
|
||||
|
||||
func FromAny(a any) (string, error) {
|
||||
if a == nil {
|
||||
return "", clues.New("missing value")
|
||||
}
|
||||
|
||||
sp, ok := a.(*string)
|
||||
if ok {
|
||||
return ptr.Val(sp), nil
|
||||
}
|
||||
|
||||
s, ok := a.(string)
|
||||
if ok {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
return "", clues.New(fmt.Sprintf("unexpected type: %T", a))
|
||||
}
|
||||
|
||||
// First returns the first non-zero valued string
|
||||
func First(vs ...string) string {
|
||||
for _, v := range vs {
|
||||
if len(v) > 0 {
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
@ -1,14 +0,0 @@
|
||||
package common
|
||||
|
||||
import "strconv"
|
||||
|
||||
// parseBool returns the bool value represented by the string
|
||||
// or false on error
|
||||
func ParseBool(v string) bool {
|
||||
s, err := strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
25
src/internal/common/tform/tform.go
Normal file
25
src/internal/common/tform/tform.go
Normal file
@ -0,0 +1,25 @@
|
||||
package tform
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
)
|
||||
|
||||
func FromMapToAny[T any](k string, m map[string]any) (T, error) {
|
||||
v, ok := m[k]
|
||||
if !ok {
|
||||
return *new(T), clues.New("entry not found")
|
||||
}
|
||||
|
||||
if v == nil {
|
||||
return *new(T), clues.New("nil entry")
|
||||
}
|
||||
|
||||
vt, ok := v.(T)
|
||||
if !ok {
|
||||
return *new(T), clues.New(fmt.Sprintf("unexpected type: %T", v))
|
||||
}
|
||||
|
||||
return vt, nil
|
||||
}
|
||||
@ -11,7 +11,29 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
|
||||
var _ graph.ContainerResolver = &contactFolderCache{}
|
||||
var (
|
||||
_ graph.ContainerResolver = &contactFolderCache{}
|
||||
_ containerRefresher = &contactRefresher{}
|
||||
)
|
||||
|
||||
type contactRefresher struct {
|
||||
getter containerGetter
|
||||
userID string
|
||||
}
|
||||
|
||||
func (r *contactRefresher) refreshContainer(
|
||||
ctx context.Context,
|
||||
id string,
|
||||
) (graph.CachedContainer, error) {
|
||||
c, err := r.getter.GetContainerByID(ctx, r.userID, id)
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err)
|
||||
}
|
||||
|
||||
f := graph.NewCacheFolder(c, nil, nil)
|
||||
|
||||
return &f, nil
|
||||
}
|
||||
|
||||
type contactFolderCache struct {
|
||||
*containerResolver
|
||||
@ -34,7 +56,7 @@ func (cfc *contactFolderCache) populateContactRoot(
|
||||
f,
|
||||
path.Builder{}.Append(ptr.Val(f.GetId())), // path of IDs
|
||||
path.Builder{}.Append(baseContainerPath...)) // display location
|
||||
if err := cfc.addFolder(temp); err != nil {
|
||||
if err := cfc.addFolder(&temp); err != nil {
|
||||
return clues.Wrap(err, "adding resolver dir").WithClues(ctx)
|
||||
}
|
||||
|
||||
@ -77,7 +99,10 @@ func (cfc *contactFolderCache) init(
|
||||
}
|
||||
|
||||
if cfc.containerResolver == nil {
|
||||
cfc.containerResolver = newContainerResolver()
|
||||
cfc.containerResolver = newContainerResolver(&contactRefresher{
|
||||
userID: cfc.userID,
|
||||
getter: cfc.getter,
|
||||
})
|
||||
}
|
||||
|
||||
return cfc.populateContactRoot(ctx, baseNode, baseContainerPath)
|
||||
|
||||
@ -8,6 +8,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
|
||||
@ -26,11 +27,18 @@ type containersEnumerator interface {
|
||||
EnumerateContainers(
|
||||
ctx context.Context,
|
||||
userID, baseDirID string,
|
||||
fn func(graph.CacheFolder) error,
|
||||
fn func(graph.CachedContainer) error,
|
||||
errs *fault.Bus,
|
||||
) error
|
||||
}
|
||||
|
||||
type containerRefresher interface {
|
||||
refreshContainer(
|
||||
ctx context.Context,
|
||||
dirID string,
|
||||
) (graph.CachedContainer, error)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// controller
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -40,59 +48,243 @@ type containersEnumerator interface {
|
||||
// folders if each folder is only a single character.
|
||||
const maxIterations = 300
|
||||
|
||||
func newContainerResolver() *containerResolver {
|
||||
func newContainerResolver(refresher containerRefresher) *containerResolver {
|
||||
return &containerResolver{
|
||||
cache: map[string]graph.CachedContainer{},
|
||||
cache: map[string]graph.CachedContainer{},
|
||||
refresher: refresher,
|
||||
}
|
||||
}
|
||||
|
||||
type containerResolver struct {
|
||||
cache map[string]graph.CachedContainer
|
||||
cache map[string]graph.CachedContainer
|
||||
refresher containerRefresher
|
||||
}
|
||||
|
||||
func (cr *containerResolver) IDToPath(
|
||||
ctx context.Context,
|
||||
folderID string,
|
||||
) (*path.Builder, *path.Builder, error) {
|
||||
return cr.idToPath(ctx, folderID, 0)
|
||||
ctx = clues.Add(ctx, "container_id", folderID)
|
||||
|
||||
c, ok := cr.cache[folderID]
|
||||
if !ok {
|
||||
return nil, nil, clues.New("container not cached").WithClues(ctx)
|
||||
}
|
||||
|
||||
p := c.Path()
|
||||
if p == nil {
|
||||
return nil, nil, clues.New("cached container has no path").WithClues(ctx)
|
||||
}
|
||||
|
||||
return p, c.Location(), nil
|
||||
}
|
||||
|
||||
// refreshContainer attempts to fetch the container with the given ID from Graph
|
||||
// API. Returns a graph.CachedContainer if the container was found. If the
|
||||
// container was deleted, returns nil, true, nil to note the container should
|
||||
// be removed from the cache.
|
||||
func (cr *containerResolver) refreshContainer(
|
||||
ctx context.Context,
|
||||
id string,
|
||||
) (graph.CachedContainer, bool, error) {
|
||||
ctx = clues.Add(ctx, "refresh_container_id", id)
|
||||
logger.Ctx(ctx).Debug("refreshing container")
|
||||
|
||||
if cr.refresher == nil {
|
||||
return nil, false, clues.New("nil refresher").WithClues(ctx)
|
||||
}
|
||||
|
||||
c, err := cr.refresher.refreshContainer(ctx, id)
|
||||
if err != nil && graph.IsErrDeletedInFlight(err) {
|
||||
logger.Ctx(ctx).Debug("container deleted")
|
||||
return nil, true, nil
|
||||
} else if err != nil {
|
||||
// This is some other error, just return it.
|
||||
return nil, false, clues.Wrap(err, "refreshing container").WithClues(ctx)
|
||||
}
|
||||
|
||||
return c, false, nil
|
||||
}
|
||||
|
||||
// recoverContainer attempts to fetch a missing container from Graph API and
|
||||
// populate the path for it. It returns
|
||||
// - the ID path for the folder
|
||||
// - the display name path for the folder
|
||||
// - if the folder was deleted
|
||||
// - any error that occurred
|
||||
//
|
||||
// If the folder is marked as deleted, child folders of this folder should be
|
||||
// deleted if they haven't been moved to another folder.
|
||||
func (cr *containerResolver) recoverContainer(
|
||||
ctx context.Context,
|
||||
folderID string,
|
||||
depth int,
|
||||
) (*path.Builder, *path.Builder, bool, error) {
|
||||
c, deleted, err := cr.refreshContainer(ctx, folderID)
|
||||
if err != nil {
|
||||
return nil, nil, false, clues.Wrap(err, "fetching uncached container")
|
||||
}
|
||||
|
||||
if deleted {
|
||||
logger.Ctx(ctx).Debug("fetching uncached container showed it was deleted")
|
||||
return nil, nil, deleted, err
|
||||
}
|
||||
|
||||
if err := cr.addFolder(c); err != nil {
|
||||
return nil, nil, false, clues.Wrap(err, "adding new container").WithClues(ctx)
|
||||
}
|
||||
|
||||
// Retry populating this container's paths.
|
||||
//
|
||||
// TODO(ashmrtn): May want to bump the depth here just so we don't get stuck
|
||||
// retrying too much if for some reason things keep moving around?
|
||||
resolved, err := cr.idToPath(ctx, folderID, depth)
|
||||
if err != nil {
|
||||
err = clues.Wrap(err, "repopulating uncached container")
|
||||
}
|
||||
|
||||
return resolved.idPath, resolved.locPath, resolved.deleted, err
|
||||
}
|
||||
|
||||
type resolvedPath struct {
|
||||
idPath *path.Builder
|
||||
locPath *path.Builder
|
||||
cached bool
|
||||
deleted bool
|
||||
}
|
||||
|
||||
func (cr *containerResolver) idToPath(
|
||||
ctx context.Context,
|
||||
folderID string,
|
||||
depth int,
|
||||
) (*path.Builder, *path.Builder, error) {
|
||||
) (resolvedPath, error) {
|
||||
ctx = clues.Add(ctx, "container_id", folderID)
|
||||
|
||||
if depth >= maxIterations {
|
||||
return nil, nil, clues.New("path contains cycle or is too tall").WithClues(ctx)
|
||||
return resolvedPath{
|
||||
idPath: nil,
|
||||
locPath: nil,
|
||||
cached: false,
|
||||
deleted: false,
|
||||
}, clues.New("path contains cycle or is too tall").WithClues(ctx)
|
||||
}
|
||||
|
||||
c, ok := cr.cache[folderID]
|
||||
if !ok {
|
||||
return nil, nil, clues.New("folder not cached").WithClues(ctx)
|
||||
pth, loc, deleted, err := cr.recoverContainer(ctx, folderID, depth)
|
||||
if err != nil {
|
||||
err = clues.Stack(err)
|
||||
}
|
||||
|
||||
return resolvedPath{
|
||||
idPath: pth,
|
||||
locPath: loc,
|
||||
cached: false,
|
||||
deleted: deleted,
|
||||
}, err
|
||||
}
|
||||
|
||||
p := c.Path()
|
||||
if p != nil {
|
||||
return p, c.Location(), nil
|
||||
return resolvedPath{
|
||||
idPath: p,
|
||||
locPath: c.Location(),
|
||||
cached: true,
|
||||
deleted: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
parentPath, parentLoc, err := cr.idToPath(
|
||||
resolved, err := cr.idToPath(
|
||||
ctx,
|
||||
ptr.Val(c.GetParentFolderId()),
|
||||
depth+1)
|
||||
if err != nil {
|
||||
return nil, nil, clues.Wrap(err, "retrieving parent folder")
|
||||
return resolvedPath{
|
||||
idPath: nil,
|
||||
locPath: nil,
|
||||
cached: true,
|
||||
deleted: false,
|
||||
}, clues.Wrap(err, "retrieving parent container")
|
||||
}
|
||||
|
||||
fullPath := parentPath.Append(ptr.Val(c.GetId()))
|
||||
if !resolved.cached {
|
||||
logger.Ctx(ctx).Debug("parent container was refreshed")
|
||||
|
||||
newContainer, shouldDelete, err := cr.refreshContainer(ctx, folderID)
|
||||
if err != nil {
|
||||
return resolvedPath{
|
||||
idPath: nil,
|
||||
locPath: nil,
|
||||
cached: true,
|
||||
deleted: false,
|
||||
}, clues.Wrap(err, "refreshing container").WithClues(ctx)
|
||||
}
|
||||
|
||||
if shouldDelete {
|
||||
logger.Ctx(ctx).Debug("refreshing container showed it was deleted")
|
||||
delete(cr.cache, folderID)
|
||||
|
||||
return resolvedPath{
|
||||
idPath: nil,
|
||||
locPath: nil,
|
||||
cached: true,
|
||||
deleted: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// See if the newer version of the current container we got back has
|
||||
// changed. If it has then it could be that the container was moved prior to
|
||||
// deleting the parent and we just hit some eventual consistency case in
|
||||
// Graph.
|
||||
//
|
||||
// TODO(ashmrtn): May want to bump the depth here just so we don't get stuck
|
||||
// retrying too much if for some reason things keep moving around?
|
||||
if ptr.Val(newContainer.GetParentFolderId()) != ptr.Val(c.GetParentFolderId()) ||
|
||||
ptr.Val(newContainer.GetDisplayName()) != ptr.Val(c.GetDisplayName()) {
|
||||
delete(cr.cache, folderID)
|
||||
|
||||
if err := cr.addFolder(newContainer); err != nil {
|
||||
return resolvedPath{
|
||||
idPath: nil,
|
||||
locPath: nil,
|
||||
cached: false,
|
||||
deleted: false,
|
||||
}, clues.Wrap(err, "updating cached container").WithClues(ctx)
|
||||
}
|
||||
|
||||
return cr.idToPath(ctx, folderID, depth)
|
||||
}
|
||||
}
|
||||
|
||||
// If the parent wasn't found and refreshing the current container produced no
|
||||
// diffs then delete the current container on the assumption that the parent
|
||||
// was deleted and the current container will later get deleted via eventual
|
||||
// consistency. If w're wrong then the container will get picked up again on
|
||||
// the next backup.
|
||||
if resolved.deleted {
|
||||
logger.Ctx(ctx).Debug("deleting container since parent was deleted")
|
||||
delete(cr.cache, folderID)
|
||||
|
||||
return resolvedPath{
|
||||
idPath: nil,
|
||||
locPath: nil,
|
||||
cached: true,
|
||||
deleted: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
fullPath := resolved.idPath.Append(ptr.Val(c.GetId()))
|
||||
c.SetPath(fullPath)
|
||||
|
||||
locPath := parentLoc.Append(ptr.Val(c.GetDisplayName()))
|
||||
locPath := resolved.locPath.Append(ptr.Val(c.GetDisplayName()))
|
||||
c.SetLocation(locPath)
|
||||
|
||||
return fullPath, locPath, nil
|
||||
return resolvedPath{
|
||||
idPath: fullPath,
|
||||
locPath: locPath,
|
||||
cached: true,
|
||||
deleted: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PathInCache is a utility function to return m365ID of a folder if the
|
||||
@ -139,14 +331,14 @@ func (cr *containerResolver) LocationInCache(pathString string) (string, bool) {
|
||||
|
||||
// addFolder adds a folder to the cache with the given ID. If the item is
|
||||
// already in the cache does nothing. The path for the item is not modified.
|
||||
func (cr *containerResolver) addFolder(cf graph.CacheFolder) error {
|
||||
func (cr *containerResolver) addFolder(cf graph.CachedContainer) error {
|
||||
// Only require a non-nil non-empty parent if the path isn't already populated.
|
||||
if cf.Path() != nil {
|
||||
if err := checkIDAndName(cf.Container); err != nil {
|
||||
if err := checkIDAndName(cf); err != nil {
|
||||
return clues.Wrap(err, "adding item to cache")
|
||||
}
|
||||
} else {
|
||||
if err := checkRequiredValues(cf.Container); err != nil {
|
||||
if err := checkRequiredValues(cf); err != nil {
|
||||
return clues.Wrap(err, "adding item to cache")
|
||||
}
|
||||
}
|
||||
@ -155,7 +347,7 @@ func (cr *containerResolver) addFolder(cf graph.CacheFolder) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
cr.cache[ptr.Val(cf.GetId())] = &cf
|
||||
cr.cache[ptr.Val(cf.GetId())] = cf
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -176,7 +368,7 @@ func (cr *containerResolver) AddToCache(
|
||||
ctx context.Context,
|
||||
f graph.Container,
|
||||
) error {
|
||||
temp := graph.CacheFolder{
|
||||
temp := &graph.CacheFolder{
|
||||
Container: f,
|
||||
}
|
||||
if err := cr.addFolder(temp); err != nil {
|
||||
@ -185,7 +377,7 @@ func (cr *containerResolver) AddToCache(
|
||||
|
||||
// Populate the path for this entry so calls to PathInCache succeed no matter
|
||||
// when they're made.
|
||||
_, _, err := cr.IDToPath(ctx, ptr.Val(f.GetId()))
|
||||
_, err := cr.idToPath(ctx, ptr.Val(f.GetId()), 0)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "adding cache entry")
|
||||
}
|
||||
@ -208,7 +400,7 @@ func (cr *containerResolver) populatePaths(
|
||||
return el.Failure()
|
||||
}
|
||||
|
||||
_, _, err := cr.IDToPath(ctx, ptr.Val(f.GetId()))
|
||||
_, err := cr.idToPath(ctx, ptr.Val(f.GetId()), 0)
|
||||
if err != nil {
|
||||
err = clues.Wrap(err, "populating path")
|
||||
el.AddRecoverable(err)
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
package exchange
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
stdpath "path"
|
||||
"testing"
|
||||
@ -232,8 +233,8 @@ func (suite *FolderCacheUnitSuite) TestAddFolder() {
|
||||
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
fc := newContainerResolver()
|
||||
err := fc.addFolder(test.cf)
|
||||
fc := newContainerResolver(nil)
|
||||
err := fc.addFolder(&test.cf)
|
||||
test.check(suite.T(), err, clues.ToCore(err))
|
||||
})
|
||||
}
|
||||
@ -293,7 +294,7 @@ func resolverWithContainers(numContainers int, useIDInPath bool) (*containerReso
|
||||
containers[i].expectedLocation = stdpath.Join(containers[i-1].expectedLocation, dn)
|
||||
}
|
||||
|
||||
resolver := newContainerResolver()
|
||||
resolver := newContainerResolver(nil)
|
||||
|
||||
for _, c := range containers {
|
||||
resolver.cache[c.id] = c
|
||||
@ -302,6 +303,37 @@ func resolverWithContainers(numContainers int, useIDInPath bool) (*containerReso
|
||||
return resolver, containers
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// mock container refresher
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type refreshResult struct {
|
||||
err error
|
||||
c graph.CachedContainer
|
||||
}
|
||||
|
||||
type mockContainerRefresher struct {
|
||||
// Folder ID -> result
|
||||
entries map[string]refreshResult
|
||||
}
|
||||
|
||||
func (r mockContainerRefresher) refreshContainer(
|
||||
ctx context.Context,
|
||||
id string,
|
||||
) (graph.CachedContainer, error) {
|
||||
rr, ok := r.entries[id]
|
||||
if !ok {
|
||||
// May not be this precise error, but it's easy to get a handle on.
|
||||
return nil, graph.ErrDeletedInFlight
|
||||
}
|
||||
|
||||
if rr.err != nil {
|
||||
return nil, rr.err
|
||||
}
|
||||
|
||||
return rr.c, nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// configured unit suite
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -326,6 +358,160 @@ func TestConfiguredFolderCacheUnitSuite(t *testing.T) {
|
||||
suite.Run(t, &ConfiguredFolderCacheUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
func (suite *ConfiguredFolderCacheUnitSuite) TestRefreshContainer_RefreshParent() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
t := suite.T()
|
||||
|
||||
resolver, containers := resolverWithContainers(4, true)
|
||||
almostLast := containers[len(containers)-2]
|
||||
last := containers[len(containers)-1]
|
||||
|
||||
refresher := mockContainerRefresher{
|
||||
entries: map[string]refreshResult{
|
||||
almostLast.id: {c: almostLast},
|
||||
last.id: {c: last},
|
||||
},
|
||||
}
|
||||
|
||||
resolver.refresher = refresher
|
||||
|
||||
delete(resolver.cache, almostLast.id)
|
||||
|
||||
ferrs := fault.New(true)
|
||||
err := resolver.populatePaths(ctx, ferrs)
|
||||
require.NoError(t, err, "populating paths", clues.ToCore(err))
|
||||
|
||||
p, l, err := resolver.IDToPath(ctx, last.id)
|
||||
require.NoError(t, err, "getting paths", clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, last.expectedPath, p.String())
|
||||
assert.Equal(t, last.expectedLocation, l.String())
|
||||
}
|
||||
|
||||
func (suite *ConfiguredFolderCacheUnitSuite) TestRefreshContainer_RefreshParent_NotFoundDeletes() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
t := suite.T()
|
||||
|
||||
resolver, containers := resolverWithContainers(4, true)
|
||||
almostLast := containers[len(containers)-2]
|
||||
last := containers[len(containers)-1]
|
||||
|
||||
refresher := mockContainerRefresher{
|
||||
entries: map[string]refreshResult{
|
||||
last.id: {c: last},
|
||||
},
|
||||
}
|
||||
|
||||
resolver.refresher = refresher
|
||||
|
||||
delete(resolver.cache, almostLast.id)
|
||||
|
||||
ferrs := fault.New(true)
|
||||
err := resolver.populatePaths(ctx, ferrs)
|
||||
require.NoError(t, err, "populating paths", clues.ToCore(err))
|
||||
|
||||
_, _, err = resolver.IDToPath(ctx, last.id)
|
||||
assert.Error(t, err, "getting paths", clues.ToCore(err))
|
||||
}
|
||||
|
||||
func (suite *ConfiguredFolderCacheUnitSuite) TestRefreshContainer_RefreshAncestor_NotFoundDeletes() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
t := suite.T()
|
||||
|
||||
resolver, containers := resolverWithContainers(4, true)
|
||||
gone := containers[0]
|
||||
child := containers[1]
|
||||
last := containers[len(containers)-1]
|
||||
|
||||
refresher := mockContainerRefresher{
|
||||
entries: map[string]refreshResult{
|
||||
child.id: {c: child},
|
||||
},
|
||||
}
|
||||
|
||||
resolver.refresher = refresher
|
||||
|
||||
delete(resolver.cache, gone.id)
|
||||
|
||||
ferrs := fault.New(true)
|
||||
err := resolver.populatePaths(ctx, ferrs)
|
||||
require.NoError(t, err, "populating paths", clues.ToCore(err))
|
||||
|
||||
_, _, err = resolver.IDToPath(ctx, last.id)
|
||||
assert.Error(t, err, "getting paths", clues.ToCore(err))
|
||||
}
|
||||
|
||||
func (suite *ConfiguredFolderCacheUnitSuite) TestRefreshContainer_RefreshAncestor_NewParent() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
t := suite.T()
|
||||
|
||||
resolver, containers := resolverWithContainers(4, true)
|
||||
other := containers[len(containers)-3]
|
||||
gone := containers[len(containers)-2]
|
||||
last := containers[len(containers)-1]
|
||||
|
||||
expected := *last
|
||||
expected.parentID = other.id
|
||||
expected.expectedPath = stdpath.Join(other.expectedPath, expected.id)
|
||||
expected.expectedLocation = stdpath.Join(other.expectedLocation, expected.displayName)
|
||||
|
||||
refresher := mockContainerRefresher{
|
||||
entries: map[string]refreshResult{
|
||||
last.id: {c: &expected},
|
||||
},
|
||||
}
|
||||
|
||||
resolver.refresher = refresher
|
||||
|
||||
delete(resolver.cache, gone.id)
|
||||
|
||||
ferrs := fault.New(true)
|
||||
err := resolver.populatePaths(ctx, ferrs)
|
||||
require.NoError(t, err, "populating paths", clues.ToCore(err))
|
||||
|
||||
p, l, err := resolver.IDToPath(ctx, last.id)
|
||||
require.NoError(t, err, "getting paths", clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, expected.expectedPath, p.String())
|
||||
assert.Equal(t, expected.expectedLocation, l.String())
|
||||
}
|
||||
|
||||
func (suite *ConfiguredFolderCacheUnitSuite) TestRefreshContainer_RefreshFolder_FolderDeleted() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
t := suite.T()
|
||||
|
||||
resolver, containers := resolverWithContainers(4, true)
|
||||
parent := containers[len(containers)-2]
|
||||
last := containers[len(containers)-1]
|
||||
|
||||
refresher := mockContainerRefresher{
|
||||
entries: map[string]refreshResult{
|
||||
parent.id: {c: parent},
|
||||
},
|
||||
}
|
||||
|
||||
resolver.refresher = refresher
|
||||
|
||||
delete(resolver.cache, parent.id)
|
||||
|
||||
ferrs := fault.New(true)
|
||||
err := resolver.populatePaths(ctx, ferrs)
|
||||
require.NoError(t, err, "populating paths", clues.ToCore(err))
|
||||
|
||||
_, _, err = resolver.IDToPath(ctx, last.id)
|
||||
assert.Error(t, err, "getting paths", clues.ToCore(err))
|
||||
}
|
||||
|
||||
func (suite *ConfiguredFolderCacheUnitSuite) TestDepthLimit() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
@ -350,7 +536,7 @@ func (suite *ConfiguredFolderCacheUnitSuite) TestDepthLimit() {
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
resolver, containers := resolverWithContainers(test.numContainers, false)
|
||||
_, _, err := resolver.IDToPath(ctx, containers[len(containers)-1].id)
|
||||
_, err := resolver.idToPath(ctx, containers[len(containers)-1].id, 0)
|
||||
test.check(suite.T(), err, clues.ToCore(err))
|
||||
})
|
||||
}
|
||||
@ -384,6 +570,9 @@ func (suite *ConfiguredFolderCacheUnitSuite) TestLookupCachedFolderNoPathsCached
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
err := suite.fc.populatePaths(ctx, fault.New(true))
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
for _, c := range suite.allContainers {
|
||||
suite.Run(ptr.Val(c.GetDisplayName()), func() {
|
||||
t := suite.T()
|
||||
@ -396,10 +585,14 @@ func (suite *ConfiguredFolderCacheUnitSuite) TestLookupCachedFolderNoPathsCached
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(ashmrtn): Remove this since the same cache can do IDs or locations.
|
||||
func (suite *ConfiguredFolderCacheUnitSuite) TestLookupCachedFolderNoPathsCached_useID() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
err := suite.fcWithID.populatePaths(ctx, fault.New(true))
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
for _, c := range suite.containersWithID {
|
||||
suite.Run(ptr.Val(c.GetDisplayName()), func() {
|
||||
t := suite.T()
|
||||
@ -419,6 +612,9 @@ func (suite *ConfiguredFolderCacheUnitSuite) TestLookupCachedFolderCachesPaths()
|
||||
t := suite.T()
|
||||
c := suite.allContainers[len(suite.allContainers)-1]
|
||||
|
||||
err := suite.fc.populatePaths(ctx, fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
p, l, err := suite.fc.IDToPath(ctx, c.id)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
assert.Equal(t, c.expectedPath, p.String())
|
||||
@ -432,6 +628,7 @@ func (suite *ConfiguredFolderCacheUnitSuite) TestLookupCachedFolderCachesPaths()
|
||||
assert.Equal(t, c.expectedLocation, l.String())
|
||||
}
|
||||
|
||||
// TODO(ashmrtn): Remove this since the same cache can do IDs or locations.
|
||||
func (suite *ConfiguredFolderCacheUnitSuite) TestLookupCachedFolderCachesPaths_useID() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
@ -439,6 +636,9 @@ func (suite *ConfiguredFolderCacheUnitSuite) TestLookupCachedFolderCachesPaths_u
|
||||
t := suite.T()
|
||||
c := suite.containersWithID[len(suite.containersWithID)-1]
|
||||
|
||||
err := suite.fcWithID.populatePaths(ctx, fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
p, l, err := suite.fcWithID.IDToPath(ctx, c.id)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
assert.Equal(t, c.expectedPath, p.String())
|
||||
@ -457,12 +657,21 @@ func (suite *ConfiguredFolderCacheUnitSuite) TestLookupCachedFolderErrorsParentN
|
||||
defer flush()
|
||||
|
||||
t := suite.T()
|
||||
last := suite.allContainers[len(suite.allContainers)-1]
|
||||
almostLast := suite.allContainers[len(suite.allContainers)-2]
|
||||
|
||||
delete(suite.fc.cache, almostLast.id)
|
||||
|
||||
_, _, err := suite.fc.IDToPath(ctx, last.id)
|
||||
err := suite.fc.populatePaths(ctx, fault.New(true))
|
||||
assert.Error(t, err, clues.ToCore(err))
|
||||
}
|
||||
|
||||
func (suite *ConfiguredFolderCacheUnitSuite) TestLookupCachedFolder_Errors_PathsNotBuilt() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
t := suite.T()
|
||||
|
||||
_, _, err := suite.fc.IDToPath(ctx, suite.allContainers[len(suite.allContainers)-1].id)
|
||||
assert.Error(t, err, clues.ToCore(err))
|
||||
}
|
||||
|
||||
|
||||
@ -597,7 +597,7 @@ func (suite *DataCollectionsIntegrationSuite) TestEventsSerializationRegression(
|
||||
bdayID string
|
||||
)
|
||||
|
||||
fn := func(gcf graph.CacheFolder) error {
|
||||
fn := func(gcf graph.CachedContainer) error {
|
||||
if ptr.Val(gcf.GetDisplayName()) == DefaultCalendar {
|
||||
calID = ptr.Val(gcf.GetId())
|
||||
}
|
||||
|
||||
@ -27,7 +27,7 @@ func (ecc *eventCalendarCache) init(
|
||||
ctx context.Context,
|
||||
) error {
|
||||
if ecc.containerResolver == nil {
|
||||
ecc.containerResolver = newContainerResolver()
|
||||
ecc.containerResolver = newContainerResolver(nil)
|
||||
}
|
||||
|
||||
return ecc.populateEventRoot(ctx)
|
||||
@ -49,7 +49,7 @@ func (ecc *eventCalendarCache) populateEventRoot(ctx context.Context) error {
|
||||
f,
|
||||
path.Builder{}.Append(ptr.Val(f.GetId())), // storage path
|
||||
path.Builder{}.Append(ptr.Val(f.GetDisplayName()))) // display location
|
||||
if err := ecc.addFolder(temp); err != nil {
|
||||
if err := ecc.addFolder(&temp); err != nil {
|
||||
return clues.Wrap(err, "initializing calendar resolver").WithClues(ctx)
|
||||
}
|
||||
|
||||
@ -98,7 +98,7 @@ func (ecc *eventCalendarCache) AddToCache(ctx context.Context, f graph.Container
|
||||
path.Builder{}.Append(ptr.Val(f.GetId())), // storage path
|
||||
path.Builder{}.Append(ptr.Val(f.GetDisplayName()))) // display location
|
||||
|
||||
if err := ecc.addFolder(temp); err != nil {
|
||||
if err := ecc.addFolder(&temp); err != nil {
|
||||
return clues.Wrap(err, "adding container").WithClues(ctx)
|
||||
}
|
||||
|
||||
|
||||
@ -10,7 +10,29 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
|
||||
var _ graph.ContainerResolver = &mailFolderCache{}
|
||||
var (
|
||||
_ graph.ContainerResolver = &mailFolderCache{}
|
||||
_ containerRefresher = &mailRefresher{}
|
||||
)
|
||||
|
||||
type mailRefresher struct {
|
||||
getter containerGetter
|
||||
userID string
|
||||
}
|
||||
|
||||
func (r *mailRefresher) refreshContainer(
|
||||
ctx context.Context,
|
||||
id string,
|
||||
) (graph.CachedContainer, error) {
|
||||
c, err := r.getter.GetContainerByID(ctx, r.userID, id)
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err)
|
||||
}
|
||||
|
||||
f := graph.NewCacheFolder(c, nil, nil)
|
||||
|
||||
return &f, nil
|
||||
}
|
||||
|
||||
// mailFolderCache struct used to improve lookup of directories within exchange.Mail
|
||||
// cache map of cachedContainers where the key = M365ID
|
||||
@ -29,7 +51,10 @@ func (mc *mailFolderCache) init(
|
||||
ctx context.Context,
|
||||
) error {
|
||||
if mc.containerResolver == nil {
|
||||
mc.containerResolver = newContainerResolver()
|
||||
mc.containerResolver = newContainerResolver(&mailRefresher{
|
||||
userID: mc.userID,
|
||||
getter: mc.getter,
|
||||
})
|
||||
}
|
||||
|
||||
return mc.populateMailRoot(ctx)
|
||||
@ -52,7 +77,7 @@ func (mc *mailFolderCache) populateMailRoot(ctx context.Context) error {
|
||||
// the user doesn't see in the regular UI for Exchange.
|
||||
path.Builder{}.Append(), // path of IDs
|
||||
path.Builder{}.Append()) // display location
|
||||
if err := mc.addFolder(temp); err != nil {
|
||||
if err := mc.addFolder(&temp); err != nil {
|
||||
return clues.Wrap(err, "adding resolver dir").WithClues(ctx)
|
||||
}
|
||||
|
||||
|
||||
@ -169,21 +169,22 @@ func CreateCalendarDisplayable(entry any, parentID string) *CalendarDisplayable
|
||||
// helper funcs
|
||||
// =========================================
|
||||
|
||||
// checkRequiredValues is a helper function to ensure that
|
||||
// all the pointers are set prior to being called.
|
||||
func CheckRequiredValues(c Container) error {
|
||||
id, ok := ptr.ValOK(c.GetId())
|
||||
if !ok {
|
||||
// CheckIDAndName is a validator that ensures the ID
|
||||
// and name are populated and not zero valued.
|
||||
func CheckIDAndName(c Container) error {
|
||||
if c == nil {
|
||||
return clues.New("nil container")
|
||||
}
|
||||
|
||||
id := ptr.Val(c.GetId())
|
||||
if len(id) == 0 {
|
||||
return clues.New("container missing ID")
|
||||
}
|
||||
|
||||
if _, ok := ptr.ValOK(c.GetDisplayName()); !ok {
|
||||
dn := ptr.Val(c.GetDisplayName())
|
||||
if len(dn) == 0 {
|
||||
return clues.New("container missing display name").With("container_id", id)
|
||||
}
|
||||
|
||||
if _, ok := ptr.ValOK(c.GetParentFolderId()); !ok {
|
||||
return clues.New("container missing parent ID").With("container_id", id)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -3,7 +3,9 @@ package graph
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
khttp "github.com/microsoft/kiota-http-go"
|
||||
@ -200,3 +202,144 @@ func (mw *RateLimiterMiddleware) Intercept(
|
||||
QueueRequest(req.Context())
|
||||
return pipeline.Next(req, middlewareIndex)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// global throttle fencing
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// timedFence sets up a fence for a certain amount of time.
|
||||
// the time can be extended arbitrarily. All processes blocked at
|
||||
// the fence will be let through when all timer extensions conclude.
|
||||
type timedFence struct {
|
||||
mu sync.Mutex
|
||||
c chan struct{}
|
||||
timers map[int64]*time.Timer
|
||||
}
|
||||
|
||||
func newTimedFence() *timedFence {
|
||||
return &timedFence{
|
||||
mu: sync.Mutex{},
|
||||
c: nil,
|
||||
timers: map[int64]*time.Timer{},
|
||||
}
|
||||
}
|
||||
|
||||
// Block until the fence is let down.
|
||||
// if no fence is up, return immediately.
|
||||
// returns if the ctx deadlines before the fence is let down.
|
||||
func (tf *timedFence) Block(ctx context.Context) error {
|
||||
// set to a local var to avoid race panics from tf.c
|
||||
// getting set to nil between the conditional check and
|
||||
// the read case. If c gets closed between those two
|
||||
// points then the select case will exit immediately,
|
||||
// as if we didn't block at all.
|
||||
c := tf.c
|
||||
|
||||
if c != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return clues.Wrap(ctx.Err(), "blocked on throttling fence")
|
||||
case <-c:
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RaiseFence puts up a fence to block requests for the provided
|
||||
// duration of time. Seconds are always added to the current time.
|
||||
// Multiple calls to RaiseFence are not additive. ie: calling
|
||||
// `RaiseFence(5); RaiseFence(1)` will keep the fence up until
|
||||
// now+5 seconds, not now+6 seconds. When the last remaining fence
|
||||
// is dropped, all currently blocked calls are allowed through.
|
||||
func (tf *timedFence) RaiseFence(seconds time.Duration) {
|
||||
tf.mu.Lock()
|
||||
defer tf.mu.Unlock()
|
||||
|
||||
if seconds < 1 {
|
||||
return
|
||||
}
|
||||
|
||||
if tf.c == nil {
|
||||
tf.c = make(chan struct{})
|
||||
}
|
||||
|
||||
timer := time.NewTimer(seconds)
|
||||
tid := time.Now().Add(seconds).UnixMilli()
|
||||
tf.timers[tid] = timer
|
||||
|
||||
go func(c <-chan time.Time, id int64) {
|
||||
// wait for the timeout
|
||||
<-c
|
||||
|
||||
tf.mu.Lock()
|
||||
defer tf.mu.Unlock()
|
||||
|
||||
// remove the timer
|
||||
delete(tf.timers, id)
|
||||
|
||||
// if no timers remain, close the channel to drop the fence
|
||||
// and set the fenc channel to nil
|
||||
if len(tf.timers) == 0 && tf.c != nil {
|
||||
close(tf.c)
|
||||
tf.c = nil
|
||||
}
|
||||
}(timer.C, tid)
|
||||
}
|
||||
|
||||
// throttlingMiddleware is used to ensure we don't overstep per-min request limits.
|
||||
type throttlingMiddleware struct {
|
||||
tf *timedFence
|
||||
}
|
||||
|
||||
func (mw *throttlingMiddleware) Intercept(
|
||||
pipeline khttp.Pipeline,
|
||||
middlewareIndex int,
|
||||
req *http.Request,
|
||||
) (*http.Response, error) {
|
||||
err := mw.tf.Block(req.Context())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := pipeline.Next(req, middlewareIndex)
|
||||
if resp == nil || err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
seconds := getRetryAfterHeader(resp)
|
||||
if seconds < 1 {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// if all prior conditions pass, we need to add a fence that blocks
|
||||
// calls, globally, from progressing until the timeout retry-after
|
||||
// passes.
|
||||
mw.tf.RaiseFence(time.Duration(seconds) * time.Second)
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func getRetryAfterHeader(resp *http.Response) int {
|
||||
if resp == nil || len(resp.Header) == 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusTooManyRequests {
|
||||
return -1
|
||||
}
|
||||
|
||||
rah := resp.Header.Get(retryAfterHeader)
|
||||
if len(rah) == 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
seconds, err := strconv.Atoi(rah)
|
||||
if err != nil {
|
||||
// the error itself is irrelevant, we only want
|
||||
// to wait if we have a clear length of time to wait until.
|
||||
return -1
|
||||
}
|
||||
|
||||
return seconds
|
||||
}
|
||||
|
||||
@ -8,23 +8,25 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
khttp "github.com/microsoft/kiota-http-go"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
)
|
||||
|
||||
type ConcurrencyLimiterUnitTestSuite struct {
|
||||
type ConcurrencyMWUnitTestSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestConcurrencyLimiterSuite(t *testing.T) {
|
||||
suite.Run(t, &ConcurrencyLimiterUnitTestSuite{Suite: tester.NewUnitSuite(t)})
|
||||
suite.Run(t, &ConcurrencyMWUnitTestSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
func (suite *ConcurrencyLimiterUnitTestSuite) TestConcurrencyLimiter() {
|
||||
func (suite *ConcurrencyMWUnitTestSuite) TestConcurrencyLimiter() {
|
||||
t := suite.T()
|
||||
|
||||
maxConcurrentRequests := 4
|
||||
@ -71,7 +73,7 @@ func (suite *ConcurrencyLimiterUnitTestSuite) TestConcurrencyLimiter() {
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func (suite *ConcurrencyLimiterUnitTestSuite) TestInitializeConcurrencyLimiter() {
|
||||
func (suite *ConcurrencyMWUnitTestSuite) TestInitializeConcurrencyLimiter() {
|
||||
t := suite.T()
|
||||
|
||||
InitializeConcurrencyLimiter(2)
|
||||
@ -80,7 +82,7 @@ func (suite *ConcurrencyLimiterUnitTestSuite) TestInitializeConcurrencyLimiter()
|
||||
assert.Equal(t, cap(concurrencyLim.semaphore), 2, "singleton semaphore capacity changed")
|
||||
}
|
||||
|
||||
func (suite *ConcurrencyLimiterUnitTestSuite) TestGenerateConcurrencyLimiter() {
|
||||
func (suite *ConcurrencyMWUnitTestSuite) TestGenerateConcurrencyLimiter() {
|
||||
tests := []struct {
|
||||
name string
|
||||
cap int
|
||||
@ -118,3 +120,201 @@ func (suite *ConcurrencyLimiterUnitTestSuite) TestGenerateConcurrencyLimiter() {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *ConcurrencyMWUnitTestSuite) TestTimedFence_Block() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
t := suite.T()
|
||||
tf := newTimedFence()
|
||||
|
||||
// raise multiple fences, the longest at 5 seconds
|
||||
for i := -5; i < 6; i++ {
|
||||
tf.RaiseFence(time.Duration(i) * time.Second)
|
||||
}
|
||||
|
||||
// -5..0 get dropped, 1..5 get added
|
||||
assert.Len(t, tf.timers, 5)
|
||||
|
||||
start := time.Now()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
wg.Add(5)
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
|
||||
err := tf.Block(ctx)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// should block for 5 seconds. comparing to 4 to avoid
|
||||
// race condition flakes.
|
||||
assert.Less(t, 4.0, time.Since(start).Seconds())
|
||||
}
|
||||
|
||||
func (suite *ConcurrencyMWUnitTestSuite) TestTimedFence_Block_ctxDeadline() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
ctx, _ = context.WithDeadline(ctx, time.Now().Add(2*time.Second))
|
||||
|
||||
t := suite.T()
|
||||
tf := newTimedFence()
|
||||
|
||||
// raise multiple fences, the longest at 10 seconds
|
||||
for i := 1; i < 6; i++ {
|
||||
tf.RaiseFence(time.Duration(i*2) * time.Second)
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
wg.Add(5)
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
|
||||
err := tf.Block(ctx)
|
||||
// should error from ctx deadline
|
||||
require.Error(t, err, clues.ToCore(err))
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// should block for 2 seconds. comparing to 3 to avoid
|
||||
// race condition flakes.
|
||||
assert.Greater(t, 3.0, time.Since(start).Seconds())
|
||||
}
|
||||
|
||||
type mockPipeline struct {
|
||||
resp *http.Response
|
||||
err error
|
||||
}
|
||||
|
||||
func (mp mockPipeline) Next(*http.Request, int) (*http.Response, error) {
|
||||
return mp.resp, mp.err
|
||||
}
|
||||
|
||||
func (suite *ConcurrencyMWUnitTestSuite) TestThrottlingMiddleware() {
|
||||
retryAfterNan := http.Header{}
|
||||
retryAfterNan.Set(retryAfterHeader, "brunhuldi")
|
||||
|
||||
retryAfterNeg1 := http.Header{}
|
||||
retryAfterNeg1.Set(retryAfterHeader, "-1")
|
||||
|
||||
retryAfter0 := http.Header{}
|
||||
retryAfter0.Set(retryAfterHeader, "0")
|
||||
|
||||
retryAfter5 := http.Header{}
|
||||
retryAfter5.Set(retryAfterHeader, "5")
|
||||
|
||||
goodPipe := mockPipeline{
|
||||
resp: &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Header: http.Header{},
|
||||
},
|
||||
}
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
pipeline mockPipeline
|
||||
expectMinWait float64
|
||||
}{
|
||||
{
|
||||
name: "2xx response",
|
||||
pipeline: goodPipe,
|
||||
expectMinWait: 0,
|
||||
},
|
||||
{
|
||||
name: "non-429 response",
|
||||
pipeline: mockPipeline{
|
||||
resp: &http.Response{
|
||||
StatusCode: http.StatusBadGateway,
|
||||
Header: retryAfter5,
|
||||
},
|
||||
},
|
||||
expectMinWait: 0,
|
||||
},
|
||||
{
|
||||
name: "429 response w/out retry header",
|
||||
pipeline: mockPipeline{
|
||||
resp: &http.Response{
|
||||
StatusCode: http.StatusTooManyRequests,
|
||||
Header: http.Header{},
|
||||
},
|
||||
},
|
||||
expectMinWait: 0,
|
||||
},
|
||||
{
|
||||
name: "429 response w/ nan retry-after",
|
||||
pipeline: mockPipeline{
|
||||
resp: &http.Response{
|
||||
StatusCode: http.StatusTooManyRequests,
|
||||
Header: retryAfterNan,
|
||||
},
|
||||
},
|
||||
expectMinWait: 0,
|
||||
},
|
||||
{
|
||||
name: "429 response w/ negative retry-after",
|
||||
pipeline: mockPipeline{
|
||||
resp: &http.Response{
|
||||
StatusCode: http.StatusTooManyRequests,
|
||||
Header: retryAfterNeg1,
|
||||
},
|
||||
},
|
||||
expectMinWait: 0,
|
||||
},
|
||||
{
|
||||
name: "429 response w/ zero retry-after",
|
||||
pipeline: mockPipeline{
|
||||
resp: &http.Response{
|
||||
StatusCode: http.StatusTooManyRequests,
|
||||
Header: retryAfter0,
|
||||
},
|
||||
},
|
||||
expectMinWait: 0,
|
||||
},
|
||||
{
|
||||
name: "429 response w/ positive retry-after",
|
||||
pipeline: mockPipeline{
|
||||
resp: &http.Response{
|
||||
StatusCode: http.StatusTooManyRequests,
|
||||
Header: retryAfter5,
|
||||
},
|
||||
},
|
||||
expectMinWait: 4,
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
t := suite.T()
|
||||
tm := throttlingMiddleware{newTimedFence()}
|
||||
|
||||
req := &http.Request{}
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
start := time.Now()
|
||||
|
||||
_, err := tm.Intercept(test.pipeline, 0, req)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
_, err = tm.Intercept(goodPipe, 0, req)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Less(t, test.expectMinWait, time.Since(start).Seconds())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -147,6 +147,7 @@ func internalMiddleware(cc *clientConfig) []khttp.Middleware {
|
||||
},
|
||||
khttp.NewRedirectHandler(),
|
||||
&LoggingMiddleware{},
|
||||
&throttlingMiddleware{newTimedFence()},
|
||||
&RateLimiterMiddleware{},
|
||||
&MetricsMiddleware{},
|
||||
}
|
||||
|
||||
@ -268,6 +268,7 @@ func kiotaMiddlewares(
|
||||
|
||||
mw = append(
|
||||
mw,
|
||||
&throttlingMiddleware{newTimedFence()},
|
||||
&RateLimiterMiddleware{},
|
||||
&MetricsMiddleware{})
|
||||
|
||||
|
||||
1030
src/pkg/backup/details/testdata/testdata.go
vendored
1030
src/pkg/backup/details/testdata/testdata.go
vendored
File diff suppressed because it is too large
Load Diff
@ -14,7 +14,7 @@ import (
|
||||
"go.uber.org/zap/zapcore"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common"
|
||||
"github.com/alcionai/corso/src/internal/common/str"
|
||||
)
|
||||
|
||||
// Default location for writing logs, initialized in platform specific files
|
||||
@ -256,7 +256,7 @@ func (s Settings) EnsureDefaults() Settings {
|
||||
|
||||
algs := []piiAlg{PIIPlainText, PIIMask, PIIHash}
|
||||
if len(set.PIIHandling) == 0 || !slices.Contains(algs, set.PIIHandling) {
|
||||
set.PIIHandling = piiAlg(common.First(piiHandling, string(PIIPlainText)))
|
||||
set.PIIHandling = piiAlg(str.First(piiHandling, string(PIIPlainText)))
|
||||
}
|
||||
|
||||
if len(set.File) == 0 {
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
package selectors_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -9,9 +10,11 @@ import (
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
)
|
||||
|
||||
@ -27,25 +30,31 @@ func (suite *SelectorReduceSuite) TestReduce() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
allDetails := testdata.GetDetailsSet()
|
||||
table := []struct {
|
||||
name string
|
||||
selFunc func() selectors.Reducer
|
||||
expected []details.Entry
|
||||
selFunc func(t *testing.T, wantVersion int) selectors.Reducer
|
||||
expected func(t *testing.T, wantVersion int) []details.Entry
|
||||
}{
|
||||
{
|
||||
name: "ExchangeAllMail",
|
||||
selFunc: func() selectors.Reducer {
|
||||
selFunc: func(t *testing.T, wantVersion int) selectors.Reducer {
|
||||
sel := selectors.NewExchangeRestore(selectors.Any())
|
||||
sel.Include(sel.Mails(selectors.Any(), selectors.Any()))
|
||||
|
||||
return sel
|
||||
},
|
||||
expected: testdata.ExchangeEmailItems,
|
||||
expected: func(t *testing.T, wantVersion int) []details.Entry {
|
||||
return testdata.GetItemsForVersion(
|
||||
t,
|
||||
path.ExchangeService,
|
||||
path.EmailCategory,
|
||||
wantVersion,
|
||||
-1)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ExchangeMailFolderPrefixMatch",
|
||||
selFunc: func() selectors.Reducer {
|
||||
selFunc: func(t *testing.T, wantVersion int) selectors.Reducer {
|
||||
sel := selectors.NewExchangeRestore(selectors.Any())
|
||||
sel.Include(sel.MailFolders(
|
||||
[]string{testdata.ExchangeEmailInboxPath.FolderLocation()},
|
||||
@ -53,48 +62,79 @@ func (suite *SelectorReduceSuite) TestReduce() {
|
||||
|
||||
return sel
|
||||
},
|
||||
expected: testdata.ExchangeEmailItems,
|
||||
expected: func(t *testing.T, wantVersion int) []details.Entry {
|
||||
return testdata.GetItemsForVersion(
|
||||
t,
|
||||
path.ExchangeService,
|
||||
path.EmailCategory,
|
||||
wantVersion,
|
||||
-1)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ExchangeMailSubject",
|
||||
selFunc: func() selectors.Reducer {
|
||||
selFunc: func(t *testing.T, wantVersion int) selectors.Reducer {
|
||||
sel := selectors.NewExchangeRestore(selectors.Any())
|
||||
sel.Filter(sel.MailSubject("foo"))
|
||||
|
||||
return sel
|
||||
},
|
||||
expected: []details.Entry{testdata.ExchangeEmailItems[0]},
|
||||
expected: func(t *testing.T, wantVersion int) []details.Entry {
|
||||
return testdata.GetItemsForVersion(
|
||||
t,
|
||||
path.ExchangeService,
|
||||
path.EmailCategory,
|
||||
wantVersion,
|
||||
0)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ExchangeMailSubjectExcludeItem",
|
||||
selFunc: func() selectors.Reducer {
|
||||
selFunc: func(t *testing.T, wantVersion int) selectors.Reducer {
|
||||
deets := testdata.GetDeetsForVersion(
|
||||
t,
|
||||
path.ExchangeService,
|
||||
path.EmailCategory,
|
||||
wantVersion)
|
||||
|
||||
sel := selectors.NewExchangeRestore(selectors.Any())
|
||||
sel.Filter(sel.MailSender("a-person"))
|
||||
sel.Exclude(sel.Mails(
|
||||
selectors.Any(),
|
||||
[]string{testdata.ExchangeEmailItemPath2.RR.ShortRef()},
|
||||
[]string{deets[1].ShortRef},
|
||||
))
|
||||
|
||||
return sel
|
||||
},
|
||||
expected: []details.Entry{testdata.ExchangeEmailItems[0]},
|
||||
expected: func(t *testing.T, wantVersion int) []details.Entry {
|
||||
return testdata.GetItemsForVersion(
|
||||
t,
|
||||
path.ExchangeService,
|
||||
path.EmailCategory,
|
||||
wantVersion,
|
||||
0)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ExchangeMailSender",
|
||||
selFunc: func() selectors.Reducer {
|
||||
selFunc: func(t *testing.T, wantVersion int) selectors.Reducer {
|
||||
sel := selectors.NewExchangeRestore(selectors.Any())
|
||||
sel.Filter(sel.MailSender("a-person"))
|
||||
|
||||
return sel
|
||||
},
|
||||
expected: []details.Entry{
|
||||
testdata.ExchangeEmailItems[0],
|
||||
testdata.ExchangeEmailItems[1],
|
||||
expected: func(t *testing.T, wantVersion int) []details.Entry {
|
||||
return testdata.GetItemsForVersion(
|
||||
t,
|
||||
path.ExchangeService,
|
||||
path.EmailCategory,
|
||||
wantVersion,
|
||||
0, 1)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ExchangeMailReceivedTime",
|
||||
selFunc: func() selectors.Reducer {
|
||||
selFunc: func(t *testing.T, wantVersion int) selectors.Reducer {
|
||||
sel := selectors.NewExchangeRestore(selectors.Any())
|
||||
sel.Filter(sel.MailReceivedBefore(
|
||||
dttm.Format(testdata.Time1.Add(time.Second)),
|
||||
@ -102,11 +142,18 @@ func (suite *SelectorReduceSuite) TestReduce() {
|
||||
|
||||
return sel
|
||||
},
|
||||
expected: []details.Entry{testdata.ExchangeEmailItems[0]},
|
||||
expected: func(t *testing.T, wantVersion int) []details.Entry {
|
||||
return testdata.GetItemsForVersion(
|
||||
t,
|
||||
path.ExchangeService,
|
||||
path.EmailCategory,
|
||||
wantVersion,
|
||||
0)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ExchangeMailID",
|
||||
selFunc: func() selectors.Reducer {
|
||||
selFunc: func(t *testing.T, wantVersion int) selectors.Reducer {
|
||||
sel := selectors.NewExchangeRestore(selectors.Any())
|
||||
sel.Include(sel.Mails(
|
||||
selectors.Any(),
|
||||
@ -115,24 +162,44 @@ func (suite *SelectorReduceSuite) TestReduce() {
|
||||
|
||||
return sel
|
||||
},
|
||||
expected: []details.Entry{testdata.ExchangeEmailItems[0]},
|
||||
expected: func(t *testing.T, wantVersion int) []details.Entry {
|
||||
return testdata.GetItemsForVersion(
|
||||
t,
|
||||
path.ExchangeService,
|
||||
path.EmailCategory,
|
||||
wantVersion,
|
||||
0)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ExchangeMailShortRef",
|
||||
selFunc: func() selectors.Reducer {
|
||||
selFunc: func(t *testing.T, wantVersion int) selectors.Reducer {
|
||||
deets := testdata.GetDeetsForVersion(
|
||||
t,
|
||||
path.ExchangeService,
|
||||
path.EmailCategory,
|
||||
wantVersion)
|
||||
|
||||
sel := selectors.NewExchangeRestore(selectors.Any())
|
||||
sel.Include(sel.Mails(
|
||||
selectors.Any(),
|
||||
[]string{testdata.ExchangeEmailItemPath1.RR.ShortRef()},
|
||||
[]string{deets[0].ShortRef},
|
||||
))
|
||||
|
||||
return sel
|
||||
},
|
||||
expected: []details.Entry{testdata.ExchangeEmailItems[0]},
|
||||
expected: func(t *testing.T, wantVersion int) []details.Entry {
|
||||
return testdata.GetItemsForVersion(
|
||||
t,
|
||||
path.ExchangeService,
|
||||
path.EmailCategory,
|
||||
wantVersion,
|
||||
0)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ExchangeAllEventsAndMailWithSubject",
|
||||
selFunc: func() selectors.Reducer {
|
||||
selFunc: func(t *testing.T, wantVersion int) selectors.Reducer {
|
||||
sel := selectors.NewExchangeRestore(selectors.Any())
|
||||
sel.Include(sel.Events(
|
||||
selectors.Any(),
|
||||
@ -142,39 +209,62 @@ func (suite *SelectorReduceSuite) TestReduce() {
|
||||
|
||||
return sel
|
||||
},
|
||||
expected: []details.Entry{testdata.ExchangeEmailItems[0]},
|
||||
expected: func(t *testing.T, wantVersion int) []details.Entry {
|
||||
return testdata.GetItemsForVersion(
|
||||
t,
|
||||
path.ExchangeService,
|
||||
path.EmailCategory,
|
||||
wantVersion,
|
||||
0)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ExchangeEventsAndMailWithSubject",
|
||||
selFunc: func() selectors.Reducer {
|
||||
selFunc: func(t *testing.T, wantVersion int) selectors.Reducer {
|
||||
sel := selectors.NewExchangeRestore(selectors.Any())
|
||||
sel.Filter(sel.EventSubject("foo"))
|
||||
sel.Filter(sel.MailSubject("foo"))
|
||||
|
||||
return sel
|
||||
},
|
||||
expected: []details.Entry{},
|
||||
expected: func(t *testing.T, wantVersion int) []details.Entry {
|
||||
return []details.Entry{}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ExchangeAll",
|
||||
selFunc: func() selectors.Reducer {
|
||||
selFunc: func(t *testing.T, wantVersion int) selectors.Reducer {
|
||||
sel := selectors.NewExchangeRestore(selectors.Any())
|
||||
sel.Include(sel.AllData())
|
||||
|
||||
return sel
|
||||
},
|
||||
expected: append(
|
||||
append(
|
||||
expected: func(t *testing.T, wantVersion int) []details.Entry {
|
||||
return append(
|
||||
append(
|
||||
[]details.Entry{},
|
||||
testdata.ExchangeEmailItems...),
|
||||
testdata.ExchangeContactsItems...),
|
||||
testdata.ExchangeEventsItems...,
|
||||
),
|
||||
testdata.GetItemsForVersion(
|
||||
t,
|
||||
path.ExchangeService,
|
||||
path.EmailCategory,
|
||||
wantVersion,
|
||||
-1),
|
||||
testdata.GetItemsForVersion(
|
||||
t,
|
||||
path.ExchangeService,
|
||||
path.EventsCategory,
|
||||
wantVersion,
|
||||
-1)...),
|
||||
testdata.GetItemsForVersion(
|
||||
t,
|
||||
path.ExchangeService,
|
||||
path.ContactsCategory,
|
||||
wantVersion,
|
||||
-1)...)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ExchangeMailByFolder",
|
||||
selFunc: func() selectors.Reducer {
|
||||
selFunc: func(t *testing.T, wantVersion int) selectors.Reducer {
|
||||
sel := selectors.NewExchangeRestore(selectors.Any())
|
||||
sel.Include(sel.MailFolders(
|
||||
[]string{testdata.ExchangeEmailBasePath.FolderLocation()},
|
||||
@ -182,14 +272,21 @@ func (suite *SelectorReduceSuite) TestReduce() {
|
||||
|
||||
return sel
|
||||
},
|
||||
expected: []details.Entry{testdata.ExchangeEmailItems[0]},
|
||||
expected: func(t *testing.T, wantVersion int) []details.Entry {
|
||||
return testdata.GetItemsForVersion(
|
||||
t,
|
||||
path.ExchangeService,
|
||||
path.EmailCategory,
|
||||
wantVersion,
|
||||
0)
|
||||
},
|
||||
},
|
||||
// TODO (keepers): all folders are treated as prefix-matches at this time.
|
||||
// so this test actually does nothing different. In the future, we'll
|
||||
// need to amend the non-prefix folder tests to expect non-prefix matches.
|
||||
{
|
||||
name: "ExchangeMailByFolderPrefix",
|
||||
selFunc: func() selectors.Reducer {
|
||||
selFunc: func(t *testing.T, wantVersion int) selectors.Reducer {
|
||||
sel := selectors.NewExchangeRestore(selectors.Any())
|
||||
sel.Include(sel.MailFolders(
|
||||
[]string{testdata.ExchangeEmailBasePath.FolderLocation()},
|
||||
@ -198,11 +295,18 @@ func (suite *SelectorReduceSuite) TestReduce() {
|
||||
|
||||
return sel
|
||||
},
|
||||
expected: []details.Entry{testdata.ExchangeEmailItems[0]},
|
||||
expected: func(t *testing.T, wantVersion int) []details.Entry {
|
||||
return testdata.GetItemsForVersion(
|
||||
t,
|
||||
path.ExchangeService,
|
||||
path.EmailCategory,
|
||||
wantVersion,
|
||||
0)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ExchangeMailByFolderRoot",
|
||||
selFunc: func() selectors.Reducer {
|
||||
selFunc: func(t *testing.T, wantVersion int) selectors.Reducer {
|
||||
sel := selectors.NewExchangeRestore(selectors.Any())
|
||||
sel.Include(sel.MailFolders(
|
||||
[]string{testdata.ExchangeEmailInboxPath.FolderLocation()},
|
||||
@ -210,11 +314,18 @@ func (suite *SelectorReduceSuite) TestReduce() {
|
||||
|
||||
return sel
|
||||
},
|
||||
expected: testdata.ExchangeEmailItems,
|
||||
expected: func(t *testing.T, wantVersion int) []details.Entry {
|
||||
return testdata.GetItemsForVersion(
|
||||
t,
|
||||
path.ExchangeService,
|
||||
path.EmailCategory,
|
||||
wantVersion,
|
||||
-1)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ExchangeContactByFolder",
|
||||
selFunc: func() selectors.Reducer {
|
||||
selFunc: func(t *testing.T, wantVersion int) selectors.Reducer {
|
||||
sel := selectors.NewExchangeRestore(selectors.Any())
|
||||
sel.Include(sel.ContactFolders(
|
||||
[]string{testdata.ExchangeContactsBasePath.FolderLocation()},
|
||||
@ -222,11 +333,18 @@ func (suite *SelectorReduceSuite) TestReduce() {
|
||||
|
||||
return sel
|
||||
},
|
||||
expected: []details.Entry{testdata.ExchangeContactsItems[0]},
|
||||
expected: func(t *testing.T, wantVersion int) []details.Entry {
|
||||
return testdata.GetItemsForVersion(
|
||||
t,
|
||||
path.ExchangeService,
|
||||
path.ContactsCategory,
|
||||
wantVersion,
|
||||
0)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ExchangeContactByFolderRoot",
|
||||
selFunc: func() selectors.Reducer {
|
||||
selFunc: func(t *testing.T, wantVersion int) selectors.Reducer {
|
||||
sel := selectors.NewExchangeRestore(selectors.Any())
|
||||
sel.Include(sel.ContactFolders(
|
||||
[]string{testdata.ExchangeContactsRootPath.FolderLocation()},
|
||||
@ -234,12 +352,19 @@ func (suite *SelectorReduceSuite) TestReduce() {
|
||||
|
||||
return sel
|
||||
},
|
||||
expected: testdata.ExchangeContactsItems,
|
||||
expected: func(t *testing.T, wantVersion int) []details.Entry {
|
||||
return testdata.GetItemsForVersion(
|
||||
t,
|
||||
path.ExchangeService,
|
||||
path.ContactsCategory,
|
||||
wantVersion,
|
||||
-1)
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "ExchangeEventsByFolder",
|
||||
selFunc: func() selectors.Reducer {
|
||||
selFunc: func(t *testing.T, wantVersion int) selectors.Reducer {
|
||||
sel := selectors.NewExchangeRestore(selectors.Any())
|
||||
sel.Include(sel.EventCalendars(
|
||||
[]string{testdata.ExchangeEventsBasePath.FolderLocation()},
|
||||
@ -247,16 +372,28 @@ func (suite *SelectorReduceSuite) TestReduce() {
|
||||
|
||||
return sel
|
||||
},
|
||||
expected: []details.Entry{testdata.ExchangeEventsItems[0]},
|
||||
expected: func(t *testing.T, wantVersion int) []details.Entry {
|
||||
return testdata.GetItemsForVersion(
|
||||
t,
|
||||
path.ExchangeService,
|
||||
path.EventsCategory,
|
||||
wantVersion,
|
||||
0)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
for v := 0; v <= version.Backup; v++ {
|
||||
suite.Run(fmt.Sprintf("version%d", v), func() {
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
output := test.selFunc().Reduce(ctx, allDetails, fault.New(true))
|
||||
assert.ElementsMatch(t, test.expected, output.Entries)
|
||||
allDetails := testdata.GetDetailsSetForVersion(t, v)
|
||||
output := test.selFunc(t, v).Reduce(ctx, allDetails, fault.New(true))
|
||||
assert.ElementsMatch(t, test.expected(t, v), output.Entries)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -3,29 +3,11 @@ package api
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
)
|
||||
|
||||
// checkIDAndName is a helper function to ensure that
|
||||
// the ID and name pointers are set prior to being called.
|
||||
func checkIDAndName(c graph.Container) error {
|
||||
id := ptr.Val(c.GetId())
|
||||
if len(id) == 0 {
|
||||
return clues.New("container missing ID")
|
||||
}
|
||||
|
||||
dn := ptr.Val(c.GetDisplayName())
|
||||
if len(dn) == 0 {
|
||||
return clues.New("container missing display name").With("container_id", id)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func HasAttachments(body models.ItemBodyable) bool {
|
||||
if body == nil {
|
||||
return false
|
||||
@ -1,10 +1,7 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/microsoft/kiota-abstractions-go/serialization"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
@ -78,31 +75,3 @@ func newLargeItemService(creds account.M365Config) (*graph.Service, error) {
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// common types and consts
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// DeltaUpdate holds the results of a current delta token. It normally
|
||||
// gets produced when aggregating the addition and removal of items in
|
||||
// a delta-queryable folder.
|
||||
type DeltaUpdate struct {
|
||||
// the deltaLink itself
|
||||
URL string
|
||||
// true if the old delta was marked as invalid
|
||||
Reset bool
|
||||
}
|
||||
|
||||
// GraphQuery represents functions which perform exchange-specific queries
|
||||
// into M365 backstore. Responses -> returned items will only contain the information
|
||||
// that is included in the options
|
||||
// TODO: use selector or path for granularity into specific folders or specific date ranges
|
||||
type GraphQuery func(ctx context.Context, userID string) (serialization.Parsable, error)
|
||||
|
||||
// GraphRetrievalFunctions are functions from the Microsoft Graph API that retrieve
|
||||
// the default associated data of a M365 object. This varies by object. Additional
|
||||
// Queries must be run to obtain the omitted fields.
|
||||
type GraphRetrievalFunc func(
|
||||
ctx context.Context,
|
||||
user, m365ID string,
|
||||
) (serialization.Parsable, error)
|
||||
@ -49,41 +49,6 @@ func (suite *ExchangeServiceSuite) SetupSuite() {
|
||||
suite.gs = graph.NewService(adpt)
|
||||
}
|
||||
|
||||
func (suite *ExchangeServiceSuite) TestOptionsForCalendars() {
|
||||
tests := []struct {
|
||||
name string
|
||||
params []string
|
||||
checkError assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "Empty Literal",
|
||||
params: []string{},
|
||||
checkError: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "Invalid Parameter",
|
||||
params: []string{"status"},
|
||||
checkError: assert.Error,
|
||||
},
|
||||
{
|
||||
name: "Invalid Parameters",
|
||||
params: []string{"status", "height", "month"},
|
||||
checkError: assert.Error,
|
||||
},
|
||||
{
|
||||
name: "Valid Parameters",
|
||||
params: []string{"changeKey", "events", "owner"},
|
||||
checkError: assert.NoError,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
suite.Run(test.name, func() {
|
||||
_, err := optionsForCalendars(test.params)
|
||||
test.checkError(suite.T(), err, clues.ToCore(err))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:lll
|
||||
var stubHTMLContent = "<html><head>\r\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\"><style type=\"text/css\" style=\"display:none\">\r\n<!--\r\np\r\n\t{margin-top:0;\r\n\tmargin-bottom:0}\r\n-->\r\n</style></head><body dir=\"ltr\"><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">Happy New Year,</div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">In accordance with TPS report guidelines, there have been questions about how to address our activities SharePoint Cover page. Do you believe this is the best picture? </div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><img class=\"FluidPluginCopy ContentPasted0 w-2070 h-1380\" size=\"5854817\" data-outlook-trace=\"F:1|T:1\" src=\"cid:85f4faa3-9851-40c7-ba0a-e63dce1185f9\" style=\"max-width:100%\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">Let me know if this meets our culture requirements.</div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">Warm Regards,</div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">Dustin</div></body></html>"
|
||||
|
||||
3
src/pkg/services/m365/api/consts.go
Normal file
3
src/pkg/services/m365/api/consts.go
Normal file
@ -0,0 +1,3 @@
|
||||
package api
|
||||
|
||||
const maxPageSize = int32(999)
|
||||
@ -96,12 +96,13 @@ func (c Contacts) GetContainerByID(
|
||||
ctx context.Context,
|
||||
userID, dirID string,
|
||||
) (graph.Container, error) {
|
||||
ofcf, err := optionsForContactFolderByID([]string{"displayName", "parentFolderId"})
|
||||
if err != nil {
|
||||
return nil, graph.Wrap(ctx, err, "setting contact folder options")
|
||||
queryParams := &users.ItemContactFoldersContactFolderItemRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: &users.ItemContactFoldersContactFolderItemRequestBuilderGetQueryParameters{
|
||||
Select: []string{"id", "displayName", "parentFolderId"},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := c.Stable.Client().Users().ByUserId(userID).ContactFolders().ByContactFolderId(dirID).Get(ctx, ofcf)
|
||||
resp, err := c.Stable.Client().Users().ByUserId(userID).ContactFolders().ByContactFolderId(dirID).Get(ctx, queryParams)
|
||||
if err != nil {
|
||||
return nil, graph.Stack(ctx, err)
|
||||
}
|
||||
@ -117,7 +118,7 @@ func (c Contacts) GetContainerByID(
|
||||
func (c Contacts) EnumerateContainers(
|
||||
ctx context.Context,
|
||||
userID, baseDirID string,
|
||||
fn func(graph.CacheFolder) error,
|
||||
fn func(graph.CachedContainer) error,
|
||||
errs *fault.Bus,
|
||||
) error {
|
||||
service, err := c.Service()
|
||||
@ -125,11 +126,10 @@ func (c Contacts) EnumerateContainers(
|
||||
return graph.Stack(ctx, err)
|
||||
}
|
||||
|
||||
fields := []string{"displayName", "parentFolderId"}
|
||||
|
||||
ofcf, err := optionsForContactChildFolders(fields)
|
||||
if err != nil {
|
||||
return graph.Wrap(ctx, err, "setting contact child folder options")
|
||||
queryParams := &users.ItemContactFoldersItemChildFoldersRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: &users.ItemContactFoldersItemChildFoldersRequestBuilderGetQueryParameters{
|
||||
Select: []string{"id", "displayName", "parentFolderId"},
|
||||
},
|
||||
}
|
||||
|
||||
el := errs.Local()
|
||||
@ -145,7 +145,7 @@ func (c Contacts) EnumerateContainers(
|
||||
break
|
||||
}
|
||||
|
||||
resp, err := builder.Get(ctx, ofcf)
|
||||
resp, err := builder.Get(ctx, queryParams)
|
||||
if err != nil {
|
||||
return graph.Stack(ctx, err)
|
||||
}
|
||||
@ -155,7 +155,7 @@ func (c Contacts) EnumerateContainers(
|
||||
return el.Failure()
|
||||
}
|
||||
|
||||
if err := checkIDAndName(fold); err != nil {
|
||||
if err := graph.CheckIDAndName(fold); err != nil {
|
||||
errs.AddRecoverable(graph.Stack(ctx, err).Label(fault.LabelForceNoBackupCreation))
|
||||
continue
|
||||
}
|
||||
@ -166,7 +166,7 @@ func (c Contacts) EnumerateContainers(
|
||||
"container_display_name", ptr.Val(fold.GetDisplayName()))
|
||||
|
||||
temp := graph.NewCacheFolder(fold, nil, nil)
|
||||
if err := fn(temp); err != nil {
|
||||
if err := fn(&temp); err != nil {
|
||||
errs.AddRecoverable(graph.Stack(fctx, err).Label(fault.LabelForceNoBackupCreation))
|
||||
continue
|
||||
}
|
||||
@ -200,28 +200,17 @@ func NewContactPager(
|
||||
gs graph.Servicer,
|
||||
user, directoryID string,
|
||||
immutableIDs bool,
|
||||
) (itemPager, error) {
|
||||
selecting, err := buildOptions([]string{"parentFolderId"}, fieldsForContacts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
requestParameters := &users.ItemContactFoldersItemContactsRequestBuilderGetQueryParameters{
|
||||
Select: selecting,
|
||||
}
|
||||
|
||||
options := &users.ItemContactFoldersItemContactsRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: requestParameters,
|
||||
Headers: buildPreferHeaders(true, immutableIDs),
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return &contactPager{}, err
|
||||
) itemPager {
|
||||
queryParams := &users.ItemContactFoldersItemContactsRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: &users.ItemContactFoldersItemContactsRequestBuilderGetQueryParameters{
|
||||
Select: []string{"id", "parentFolderId"},
|
||||
},
|
||||
Headers: buildPreferHeaders(true, immutableIDs),
|
||||
}
|
||||
|
||||
builder := gs.Client().Users().ByUserId(user).ContactFolders().ByContactFolderId(directoryID).Contacts()
|
||||
|
||||
return &contactPager{gs, builder, options}, nil
|
||||
return &contactPager{gs, builder, queryParams}
|
||||
}
|
||||
|
||||
func (p *contactPager) getPage(ctx context.Context) (api.DeltaPageLinker, error) {
|
||||
@ -274,23 +263,12 @@ func NewContactDeltaPager(
|
||||
gs graph.Servicer,
|
||||
user, directoryID, deltaURL string,
|
||||
immutableIDs bool,
|
||||
) (itemPager, error) {
|
||||
selecting, err := buildOptions([]string{"parentFolderId"}, fieldsForContacts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
requestParameters := &users.ItemContactFoldersItemContactsDeltaRequestBuilderGetQueryParameters{
|
||||
Select: selecting,
|
||||
}
|
||||
|
||||
) itemPager {
|
||||
options := &users.ItemContactFoldersItemContactsDeltaRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: requestParameters,
|
||||
Headers: buildPreferHeaders(true, immutableIDs),
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return &contactDeltaPager{}, err
|
||||
QueryParameters: &users.ItemContactFoldersItemContactsDeltaRequestBuilderGetQueryParameters{
|
||||
Select: []string{"id", "parentFolderId"},
|
||||
},
|
||||
Headers: buildPreferHeaders(true, immutableIDs),
|
||||
}
|
||||
|
||||
var builder *users.ItemContactFoldersItemContactsDeltaRequestBuilder
|
||||
@ -300,7 +278,7 @@ func NewContactDeltaPager(
|
||||
builder = getContactDeltaBuilder(ctx, gs, user, directoryID, options)
|
||||
}
|
||||
|
||||
return &contactDeltaPager{gs, user, directoryID, builder, options}, nil
|
||||
return &contactDeltaPager{gs, user, directoryID, builder, options}
|
||||
}
|
||||
|
||||
func (p *contactDeltaPager) getPage(ctx context.Context) (api.DeltaPageLinker, error) {
|
||||
@ -340,15 +318,8 @@ func (c Contacts) GetAddedAndRemovedItemIDs(
|
||||
"category", selectors.ExchangeContact,
|
||||
"container_id", directoryID)
|
||||
|
||||
pager, err := NewContactPager(ctx, service, user, directoryID, immutableIDs)
|
||||
if err != nil {
|
||||
return nil, nil, DeltaUpdate{}, graph.Wrap(ctx, err, "creating non-delta pager")
|
||||
}
|
||||
|
||||
deltaPager, err := NewContactDeltaPager(ctx, service, user, directoryID, oldDelta, immutableIDs)
|
||||
if err != nil {
|
||||
return nil, nil, DeltaUpdate{}, graph.Wrap(ctx, err, "creating delta pager")
|
||||
}
|
||||
pager := NewContactPager(ctx, service, user, directoryID, immutableIDs)
|
||||
deltaPager := NewContactDeltaPager(ctx, service, user, directoryID, oldDelta, immutableIDs)
|
||||
|
||||
return getAddedAndRemovedItemIDs(ctx, service, pager, deltaPager, oldDelta, canMakeDeltaQueries)
|
||||
}
|
||||
|
||||
11
src/pkg/services/m365/api/delta.go
Normal file
11
src/pkg/services/m365/api/delta.go
Normal file
@ -0,0 +1,11 @@
|
||||
package api
|
||||
|
||||
// DeltaUpdate holds the results of a current delta token. It normally
|
||||
// gets produced when aggregating the addition and removal of items in
|
||||
// a delta-queryable folder.
|
||||
type DeltaUpdate struct {
|
||||
// the deltaLink itself
|
||||
URL string
|
||||
// true if the old delta was marked as invalid
|
||||
Reset bool
|
||||
}
|
||||
@ -3,312 +3,14 @@ package api
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
abstractions "github.com/microsoft/kiota-abstractions-go"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/drives"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/sites"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/users"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph/api"
|
||||
onedrive "github.com/alcionai/corso/src/internal/connector/onedrive/consts"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
)
|
||||
|
||||
func getValues[T any](l api.PageLinker) ([]T, error) {
|
||||
page, ok := l.(interface{ GetValue() []T })
|
||||
if !ok {
|
||||
return nil, clues.New("page does not comply with GetValue() interface").With("page_item_type", fmt.Sprintf("%T", l))
|
||||
}
|
||||
|
||||
return page.GetValue(), nil
|
||||
}
|
||||
|
||||
// max we can do is 999
|
||||
const pageSize = int32(999)
|
||||
|
||||
type driveItemPager struct {
|
||||
gs graph.Servicer
|
||||
driveID string
|
||||
builder *drives.ItemItemsItemDeltaRequestBuilder
|
||||
options *drives.ItemItemsItemDeltaRequestBuilderGetRequestConfiguration
|
||||
}
|
||||
|
||||
func NewItemPager(
|
||||
gs graph.Servicer,
|
||||
driveID, link string,
|
||||
fields []string,
|
||||
) *driveItemPager {
|
||||
pageCount := pageSize
|
||||
|
||||
headers := abstractions.NewRequestHeaders()
|
||||
preferHeaderItems := []string{
|
||||
"deltashowremovedasdeleted",
|
||||
"deltatraversepermissiongaps",
|
||||
"deltashowsharingchanges",
|
||||
"hierarchicalsharing",
|
||||
}
|
||||
headers.Add("Prefer", strings.Join(preferHeaderItems, ","))
|
||||
|
||||
requestConfig := &drives.ItemItemsItemDeltaRequestBuilderGetRequestConfiguration{
|
||||
Headers: headers,
|
||||
QueryParameters: &drives.ItemItemsItemDeltaRequestBuilderGetQueryParameters{
|
||||
Top: &pageCount,
|
||||
Select: fields,
|
||||
},
|
||||
}
|
||||
|
||||
res := &driveItemPager{
|
||||
gs: gs,
|
||||
driveID: driveID,
|
||||
options: requestConfig,
|
||||
builder: gs.Client().
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().ByDriveItemId(onedrive.RootID).Delta(),
|
||||
}
|
||||
|
||||
if len(link) > 0 {
|
||||
res.builder = drives.NewItemItemsItemDeltaRequestBuilder(link, gs.Adapter())
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (p *driveItemPager) GetPage(ctx context.Context) (api.DeltaPageLinker, error) {
|
||||
var (
|
||||
resp api.DeltaPageLinker
|
||||
err error
|
||||
)
|
||||
|
||||
resp, err = p.builder.Get(ctx, p.options)
|
||||
if err != nil {
|
||||
return nil, graph.Stack(ctx, err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (p *driveItemPager) SetNext(link string) {
|
||||
p.builder = drives.NewItemItemsItemDeltaRequestBuilder(link, p.gs.Adapter())
|
||||
}
|
||||
|
||||
func (p *driveItemPager) Reset() {
|
||||
p.builder = p.gs.Client().
|
||||
Drives().
|
||||
ByDriveId(p.driveID).
|
||||
Items().
|
||||
ByDriveItemId(onedrive.RootID).
|
||||
Delta()
|
||||
}
|
||||
|
||||
func (p *driveItemPager) ValuesIn(l api.DeltaPageLinker) ([]models.DriveItemable, error) {
|
||||
return getValues[models.DriveItemable](l)
|
||||
}
|
||||
|
||||
type userDrivePager struct {
|
||||
userID string
|
||||
gs graph.Servicer
|
||||
builder *users.ItemDrivesRequestBuilder
|
||||
options *users.ItemDrivesRequestBuilderGetRequestConfiguration
|
||||
}
|
||||
|
||||
func NewUserDrivePager(
|
||||
gs graph.Servicer,
|
||||
userID string,
|
||||
fields []string,
|
||||
) *userDrivePager {
|
||||
requestConfig := &users.ItemDrivesRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: &users.ItemDrivesRequestBuilderGetQueryParameters{
|
||||
Select: fields,
|
||||
},
|
||||
}
|
||||
|
||||
res := &userDrivePager{
|
||||
userID: userID,
|
||||
gs: gs,
|
||||
options: requestConfig,
|
||||
builder: gs.Client().Users().ByUserId(userID).Drives(),
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
type nopUserDrivePageLinker struct {
|
||||
drive models.Driveable
|
||||
}
|
||||
|
||||
func (nl nopUserDrivePageLinker) GetOdataNextLink() *string { return nil }
|
||||
|
||||
func (p *userDrivePager) GetPage(ctx context.Context) (api.PageLinker, error) {
|
||||
var (
|
||||
resp api.PageLinker
|
||||
err error
|
||||
)
|
||||
|
||||
d, err := p.gs.Client().Users().ByUserId(p.userID).Drive().Get(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, graph.Stack(ctx, err)
|
||||
}
|
||||
|
||||
resp = &nopUserDrivePageLinker{drive: d}
|
||||
|
||||
// TODO(keepers): turn back on when we can separate drive enumeration
|
||||
// from default drive lookup.
|
||||
|
||||
// resp, err = p.builder.Get(ctx, p.options)
|
||||
// if err != nil {
|
||||
// return nil, graph.Stack(ctx, err)
|
||||
// }
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (p *userDrivePager) SetNext(link string) {
|
||||
p.builder = users.NewItemDrivesRequestBuilder(link, p.gs.Adapter())
|
||||
}
|
||||
|
||||
func (p *userDrivePager) ValuesIn(l api.PageLinker) ([]models.Driveable, error) {
|
||||
nl, ok := l.(*nopUserDrivePageLinker)
|
||||
if !ok || nl == nil {
|
||||
return nil, clues.New(fmt.Sprintf("improper page linker struct for user drives: %T", l))
|
||||
}
|
||||
|
||||
// TODO(keepers): turn back on when we can separate drive enumeration
|
||||
// from default drive lookup.
|
||||
|
||||
// return getValues[models.Driveable](l)
|
||||
|
||||
return []models.Driveable{nl.drive}, nil
|
||||
}
|
||||
|
||||
type siteDrivePager struct {
|
||||
gs graph.Servicer
|
||||
builder *sites.ItemDrivesRequestBuilder
|
||||
options *sites.ItemDrivesRequestBuilderGetRequestConfiguration
|
||||
}
|
||||
|
||||
// NewSiteDrivePager is a constructor for creating a siteDrivePager
|
||||
// fields are the associated site drive fields that are desired to be returned
|
||||
// in a query. NOTE: Fields are case-sensitive. Incorrect field settings will
|
||||
// cause errors during later paging.
|
||||
// Available fields: https://learn.microsoft.com/en-us/graph/api/resources/drive?view=graph-rest-1.0
|
||||
func NewSiteDrivePager(
|
||||
gs graph.Servicer,
|
||||
siteID string,
|
||||
fields []string,
|
||||
) *siteDrivePager {
|
||||
requestConfig := &sites.ItemDrivesRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: &sites.ItemDrivesRequestBuilderGetQueryParameters{
|
||||
Select: fields,
|
||||
},
|
||||
}
|
||||
|
||||
res := &siteDrivePager{
|
||||
gs: gs,
|
||||
options: requestConfig,
|
||||
builder: gs.Client().Sites().BySiteId(siteID).Drives(),
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (p *siteDrivePager) GetPage(ctx context.Context) (api.PageLinker, error) {
|
||||
var (
|
||||
resp api.PageLinker
|
||||
err error
|
||||
)
|
||||
|
||||
resp, err = p.builder.Get(ctx, p.options)
|
||||
if err != nil {
|
||||
return nil, graph.Stack(ctx, err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (p *siteDrivePager) SetNext(link string) {
|
||||
p.builder = sites.NewItemDrivesRequestBuilder(link, p.gs.Adapter())
|
||||
}
|
||||
|
||||
func (p *siteDrivePager) ValuesIn(l api.PageLinker) ([]models.Driveable, error) {
|
||||
return getValues[models.Driveable](l)
|
||||
}
|
||||
|
||||
// DrivePager pages through different types of drive owners
|
||||
type DrivePager interface {
|
||||
GetPage(context.Context) (api.PageLinker, error)
|
||||
SetNext(nextLink string)
|
||||
ValuesIn(api.PageLinker) ([]models.Driveable, error)
|
||||
}
|
||||
|
||||
// GetAllDrives fetches all drives for the given pager
|
||||
func GetAllDrives(
|
||||
ctx context.Context,
|
||||
pager DrivePager,
|
||||
retry bool,
|
||||
maxRetryCount int,
|
||||
) ([]models.Driveable, error) {
|
||||
ds := []models.Driveable{}
|
||||
|
||||
if !retry {
|
||||
maxRetryCount = 0
|
||||
}
|
||||
|
||||
// Loop through all pages returned by Graph API.
|
||||
for {
|
||||
var (
|
||||
err error
|
||||
page api.PageLinker
|
||||
)
|
||||
|
||||
// Retry Loop for Drive retrieval. Request can timeout
|
||||
for i := 0; i <= maxRetryCount; i++ {
|
||||
page, err = pager.GetPage(ctx)
|
||||
if err != nil {
|
||||
if clues.HasLabel(err, graph.LabelsMysiteNotFound) {
|
||||
logger.Ctx(ctx).Infof("resource owner does not have a drive")
|
||||
return make([]models.Driveable, 0), nil // no license or drives.
|
||||
}
|
||||
|
||||
if graph.IsErrTimeout(err) && i < maxRetryCount {
|
||||
time.Sleep(time.Duration(3*(i+1)) * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, graph.Wrap(ctx, err, "retrieving drives")
|
||||
}
|
||||
|
||||
// No error encountered, break the retry loop so we can extract results
|
||||
// and see if there's another page to fetch.
|
||||
break
|
||||
}
|
||||
|
||||
tmp, err := pager.ValuesIn(page)
|
||||
if err != nil {
|
||||
return nil, graph.Wrap(ctx, err, "extracting drives from response")
|
||||
}
|
||||
|
||||
ds = append(ds, tmp...)
|
||||
|
||||
nextLink := ptr.Val(page.GetOdataNextLink())
|
||||
if len(nextLink) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
pager.SetNext(nextLink)
|
||||
}
|
||||
|
||||
logger.Ctx(ctx).Debugf("retrieved %d valid drives", len(ds))
|
||||
|
||||
return ds, nil
|
||||
}
|
||||
|
||||
// generic drive item getter
|
||||
func GetDriveItem(
|
||||
ctx context.Context,
|
||||
|
||||
325
src/pkg/services/m365/api/drive_pager.go
Normal file
325
src/pkg/services/m365/api/drive_pager.go
Normal file
@ -0,0 +1,325 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
abstractions "github.com/microsoft/kiota-abstractions-go"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/drives"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/sites"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/users"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph/api"
|
||||
onedrive "github.com/alcionai/corso/src/internal/connector/onedrive/consts"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// item pager
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type driveItemPager struct {
|
||||
gs graph.Servicer
|
||||
driveID string
|
||||
builder *drives.ItemItemsItemDeltaRequestBuilder
|
||||
options *drives.ItemItemsItemDeltaRequestBuilderGetRequestConfiguration
|
||||
}
|
||||
|
||||
func NewItemPager(
|
||||
gs graph.Servicer,
|
||||
driveID, link string,
|
||||
fields []string,
|
||||
) *driveItemPager {
|
||||
headers := abstractions.NewRequestHeaders()
|
||||
preferHeaderItems := []string{
|
||||
"deltashowremovedasdeleted",
|
||||
"deltatraversepermissiongaps",
|
||||
"deltashowsharingchanges",
|
||||
"hierarchicalsharing",
|
||||
}
|
||||
headers.Add("Prefer", strings.Join(preferHeaderItems, ","))
|
||||
|
||||
requestConfig := &drives.ItemItemsItemDeltaRequestBuilderGetRequestConfiguration{
|
||||
Headers: headers,
|
||||
QueryParameters: &drives.ItemItemsItemDeltaRequestBuilderGetQueryParameters{
|
||||
Top: ptr.To(maxPageSize),
|
||||
Select: fields,
|
||||
},
|
||||
}
|
||||
|
||||
res := &driveItemPager{
|
||||
gs: gs,
|
||||
driveID: driveID,
|
||||
options: requestConfig,
|
||||
builder: gs.Client().
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().ByDriveItemId(onedrive.RootID).Delta(),
|
||||
}
|
||||
|
||||
if len(link) > 0 {
|
||||
res.builder = drives.NewItemItemsItemDeltaRequestBuilder(link, gs.Adapter())
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (p *driveItemPager) GetPage(ctx context.Context) (api.DeltaPageLinker, error) {
|
||||
var (
|
||||
resp api.DeltaPageLinker
|
||||
err error
|
||||
)
|
||||
|
||||
resp, err = p.builder.Get(ctx, p.options)
|
||||
if err != nil {
|
||||
return nil, graph.Stack(ctx, err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (p *driveItemPager) SetNext(link string) {
|
||||
p.builder = drives.NewItemItemsItemDeltaRequestBuilder(link, p.gs.Adapter())
|
||||
}
|
||||
|
||||
func (p *driveItemPager) Reset() {
|
||||
p.builder = p.gs.Client().
|
||||
Drives().
|
||||
ByDriveId(p.driveID).
|
||||
Items().
|
||||
ByDriveItemId(onedrive.RootID).
|
||||
Delta()
|
||||
}
|
||||
|
||||
func (p *driveItemPager) ValuesIn(l api.DeltaPageLinker) ([]models.DriveItemable, error) {
|
||||
return getValues[models.DriveItemable](l)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// user pager
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type userDrivePager struct {
|
||||
userID string
|
||||
gs graph.Servicer
|
||||
builder *users.ItemDrivesRequestBuilder
|
||||
options *users.ItemDrivesRequestBuilderGetRequestConfiguration
|
||||
}
|
||||
|
||||
func NewUserDrivePager(
|
||||
gs graph.Servicer,
|
||||
userID string,
|
||||
fields []string,
|
||||
) *userDrivePager {
|
||||
requestConfig := &users.ItemDrivesRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: &users.ItemDrivesRequestBuilderGetQueryParameters{
|
||||
Select: fields,
|
||||
},
|
||||
}
|
||||
|
||||
res := &userDrivePager{
|
||||
userID: userID,
|
||||
gs: gs,
|
||||
options: requestConfig,
|
||||
builder: gs.Client().Users().ByUserId(userID).Drives(),
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
type nopUserDrivePageLinker struct {
|
||||
drive models.Driveable
|
||||
}
|
||||
|
||||
func (nl nopUserDrivePageLinker) GetOdataNextLink() *string { return nil }
|
||||
|
||||
func (p *userDrivePager) GetPage(ctx context.Context) (api.PageLinker, error) {
|
||||
var (
|
||||
resp api.PageLinker
|
||||
err error
|
||||
)
|
||||
|
||||
d, err := p.gs.Client().Users().ByUserId(p.userID).Drive().Get(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, graph.Stack(ctx, err)
|
||||
}
|
||||
|
||||
resp = &nopUserDrivePageLinker{drive: d}
|
||||
|
||||
// TODO(keepers): turn back on when we can separate drive enumeration
|
||||
// from default drive lookup.
|
||||
|
||||
// resp, err = p.builder.Get(ctx, p.options)
|
||||
// if err != nil {
|
||||
// return nil, graph.Stack(ctx, err)
|
||||
// }
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (p *userDrivePager) SetNext(link string) {
|
||||
p.builder = users.NewItemDrivesRequestBuilder(link, p.gs.Adapter())
|
||||
}
|
||||
|
||||
func (p *userDrivePager) ValuesIn(l api.PageLinker) ([]models.Driveable, error) {
|
||||
nl, ok := l.(*nopUserDrivePageLinker)
|
||||
if !ok || nl == nil {
|
||||
return nil, clues.New(fmt.Sprintf("improper page linker struct for user drives: %T", l))
|
||||
}
|
||||
|
||||
// TODO(keepers): turn back on when we can separate drive enumeration
|
||||
// from default drive lookup.
|
||||
|
||||
// return getValues[models.Driveable](l)
|
||||
|
||||
return []models.Driveable{nl.drive}, nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// site pager
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type siteDrivePager struct {
|
||||
gs graph.Servicer
|
||||
builder *sites.ItemDrivesRequestBuilder
|
||||
options *sites.ItemDrivesRequestBuilderGetRequestConfiguration
|
||||
}
|
||||
|
||||
// NewSiteDrivePager is a constructor for creating a siteDrivePager
|
||||
// fields are the associated site drive fields that are desired to be returned
|
||||
// in a query. NOTE: Fields are case-sensitive. Incorrect field settings will
|
||||
// cause errors during later paging.
|
||||
// Available fields: https://learn.microsoft.com/en-us/graph/api/resources/drive?view=graph-rest-1.0
|
||||
func NewSiteDrivePager(
|
||||
gs graph.Servicer,
|
||||
siteID string,
|
||||
fields []string,
|
||||
) *siteDrivePager {
|
||||
requestConfig := &sites.ItemDrivesRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: &sites.ItemDrivesRequestBuilderGetQueryParameters{
|
||||
Select: fields,
|
||||
},
|
||||
}
|
||||
|
||||
res := &siteDrivePager{
|
||||
gs: gs,
|
||||
options: requestConfig,
|
||||
builder: gs.Client().Sites().BySiteId(siteID).Drives(),
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (p *siteDrivePager) GetPage(ctx context.Context) (api.PageLinker, error) {
|
||||
var (
|
||||
resp api.PageLinker
|
||||
err error
|
||||
)
|
||||
|
||||
resp, err = p.builder.Get(ctx, p.options)
|
||||
if err != nil {
|
||||
return nil, graph.Stack(ctx, err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (p *siteDrivePager) SetNext(link string) {
|
||||
p.builder = sites.NewItemDrivesRequestBuilder(link, p.gs.Adapter())
|
||||
}
|
||||
|
||||
func (p *siteDrivePager) ValuesIn(l api.PageLinker) ([]models.Driveable, error) {
|
||||
return getValues[models.Driveable](l)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// drive pager
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// DrivePager pages through different types of drive owners
|
||||
type DrivePager interface {
|
||||
GetPage(context.Context) (api.PageLinker, error)
|
||||
SetNext(nextLink string)
|
||||
ValuesIn(api.PageLinker) ([]models.Driveable, error)
|
||||
}
|
||||
|
||||
// GetAllDrives fetches all drives for the given pager
|
||||
func GetAllDrives(
|
||||
ctx context.Context,
|
||||
pager DrivePager,
|
||||
retry bool,
|
||||
maxRetryCount int,
|
||||
) ([]models.Driveable, error) {
|
||||
ds := []models.Driveable{}
|
||||
|
||||
if !retry {
|
||||
maxRetryCount = 0
|
||||
}
|
||||
|
||||
// Loop through all pages returned by Graph API.
|
||||
for {
|
||||
var (
|
||||
err error
|
||||
page api.PageLinker
|
||||
)
|
||||
|
||||
// Retry Loop for Drive retrieval. Request can timeout
|
||||
for i := 0; i <= maxRetryCount; i++ {
|
||||
page, err = pager.GetPage(ctx)
|
||||
if err != nil {
|
||||
if clues.HasLabel(err, graph.LabelsMysiteNotFound) {
|
||||
logger.Ctx(ctx).Infof("resource owner does not have a drive")
|
||||
return make([]models.Driveable, 0), nil // no license or drives.
|
||||
}
|
||||
|
||||
if graph.IsErrTimeout(err) && i < maxRetryCount {
|
||||
time.Sleep(time.Duration(3*(i+1)) * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, graph.Wrap(ctx, err, "retrieving drives")
|
||||
}
|
||||
|
||||
// No error encountered, break the retry loop so we can extract results
|
||||
// and see if there's another page to fetch.
|
||||
break
|
||||
}
|
||||
|
||||
tmp, err := pager.ValuesIn(page)
|
||||
if err != nil {
|
||||
return nil, graph.Wrap(ctx, err, "extracting drives from response")
|
||||
}
|
||||
|
||||
ds = append(ds, tmp...)
|
||||
|
||||
nextLink := ptr.Val(page.GetOdataNextLink())
|
||||
if len(nextLink) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
pager.SetNext(nextLink)
|
||||
}
|
||||
|
||||
logger.Ctx(ctx).Debugf("retrieved %d valid drives", len(ds))
|
||||
|
||||
return ds, nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func getValues[T any](l api.PageLinker) ([]T, error) {
|
||||
page, ok := l.(interface{ GetValue() []T })
|
||||
if !ok {
|
||||
return nil, clues.New("page does not comply with GetValue() interface").With("page_item_type", fmt.Sprintf("%T", l))
|
||||
}
|
||||
|
||||
return page.GetValue(), nil
|
||||
}
|
||||
@ -84,12 +84,13 @@ func (c Events) GetContainerByID(
|
||||
return nil, graph.Stack(ctx, err)
|
||||
}
|
||||
|
||||
ofc, err := optionsForCalendarsByID([]string{"name", "owner"})
|
||||
if err != nil {
|
||||
return nil, graph.Wrap(ctx, err, "setting event calendar options")
|
||||
queryParams := &users.ItemCalendarsCalendarItemRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: &users.ItemCalendarsCalendarItemRequestBuilderGetQueryParameters{
|
||||
Select: []string{"id", "name", "owner"},
|
||||
},
|
||||
}
|
||||
|
||||
cal, err := service.Client().Users().ByUserId(userID).Calendars().ByCalendarId(containerID).Get(ctx, ofc)
|
||||
cal, err := service.Client().Users().ByUserId(userID).Calendars().ByCalendarId(containerID).Get(ctx, queryParams)
|
||||
if err != nil {
|
||||
return nil, graph.Stack(ctx, err).WithClues(ctx)
|
||||
}
|
||||
@ -129,7 +130,7 @@ func (c Events) GetContainerByName(
|
||||
cal := resp.GetValue()[0]
|
||||
cd := CalendarDisplayable{Calendarable: cal}
|
||||
|
||||
if err := checkIDAndName(cd); err != nil {
|
||||
if err := graph.CheckIDAndName(cd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -191,7 +192,7 @@ func (c Events) GetItem(
|
||||
func (c Events) EnumerateContainers(
|
||||
ctx context.Context,
|
||||
userID, baseDirID string,
|
||||
fn func(graph.CacheFolder) error,
|
||||
fn func(graph.CachedContainer) error,
|
||||
errs *fault.Bus,
|
||||
) error {
|
||||
service, err := c.Service()
|
||||
@ -199,9 +200,10 @@ func (c Events) EnumerateContainers(
|
||||
return graph.Stack(ctx, err)
|
||||
}
|
||||
|
||||
ofc, err := optionsForCalendars([]string{"name"})
|
||||
if err != nil {
|
||||
return graph.Wrap(ctx, err, "setting calendar options")
|
||||
queryParams := &users.ItemCalendarsRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: &users.ItemCalendarsRequestBuilderGetQueryParameters{
|
||||
Select: []string{"id", "name"},
|
||||
},
|
||||
}
|
||||
|
||||
el := errs.Local()
|
||||
@ -212,7 +214,7 @@ func (c Events) EnumerateContainers(
|
||||
break
|
||||
}
|
||||
|
||||
resp, err := builder.Get(ctx, ofc)
|
||||
resp, err := builder.Get(ctx, queryParams)
|
||||
if err != nil {
|
||||
return graph.Stack(ctx, err)
|
||||
}
|
||||
@ -223,7 +225,7 @@ func (c Events) EnumerateContainers(
|
||||
}
|
||||
|
||||
cd := CalendarDisplayable{Calendarable: cal}
|
||||
if err := checkIDAndName(cd); err != nil {
|
||||
if err := graph.CheckIDAndName(cd); err != nil {
|
||||
errs.AddRecoverable(graph.Stack(ctx, err).Label(fault.LabelForceNoBackupCreation))
|
||||
continue
|
||||
}
|
||||
@ -237,7 +239,7 @@ func (c Events) EnumerateContainers(
|
||||
cd,
|
||||
path.Builder{}.Append(ptr.Val(cd.GetId())), // storage path
|
||||
path.Builder{}.Append(ptr.Val(cd.GetDisplayName()))) // display location
|
||||
if err := fn(temp); err != nil {
|
||||
if err := fn(&temp); err != nil {
|
||||
errs.AddRecoverable(graph.Stack(fctx, err).Label(fault.LabelForceNoBackupCreation))
|
||||
continue
|
||||
}
|
||||
|
||||
@ -123,9 +123,10 @@ func (c Mail) GetContainerByID(
|
||||
return nil, graph.Stack(ctx, err)
|
||||
}
|
||||
|
||||
ofmf, err := optionsForMailFoldersItem([]string{"displayName", "parentFolderId"})
|
||||
if err != nil {
|
||||
return nil, graph.Wrap(ctx, err, "setting mail folder options")
|
||||
queryParams := &users.ItemMailFoldersMailFolderItemRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: &users.ItemMailFoldersMailFolderItemRequestBuilderGetQueryParameters{
|
||||
Select: []string{"id", "displayName", "parentFolderId"},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := service.Client().
|
||||
@ -133,7 +134,7 @@ func (c Mail) GetContainerByID(
|
||||
ByUserId(userID).
|
||||
MailFolders().
|
||||
ByMailFolderId(dirID).
|
||||
Get(ctx, ofmf)
|
||||
Get(ctx, queryParams)
|
||||
if err != nil {
|
||||
return nil, graph.Stack(ctx, err)
|
||||
}
|
||||
@ -308,7 +309,7 @@ func (p *mailFolderPager) valuesIn(pl api.PageLinker) ([]models.MailFolderable,
|
||||
func (c Mail) EnumerateContainers(
|
||||
ctx context.Context,
|
||||
userID, baseDirID string,
|
||||
fn func(graph.CacheFolder) error,
|
||||
fn func(graph.CachedContainer) error,
|
||||
errs *fault.Bus,
|
||||
) error {
|
||||
service, err := c.Service()
|
||||
@ -346,7 +347,7 @@ func (c Mail) EnumerateContainers(
|
||||
"container_name", ptr.Val(v.GetDisplayName()))
|
||||
|
||||
temp := graph.NewCacheFolder(v, nil, nil)
|
||||
if err := fn(temp); err != nil {
|
||||
if err := fn(&temp); err != nil {
|
||||
errs.AddRecoverable(graph.Stack(fctx, err).Label(fault.LabelForceNoBackupCreation))
|
||||
continue
|
||||
}
|
||||
@ -380,23 +381,12 @@ func NewMailPager(
|
||||
gs graph.Servicer,
|
||||
user, directoryID string,
|
||||
immutableIDs bool,
|
||||
) (itemPager, error) {
|
||||
selecting, err := buildOptions([]string{"isRead"}, fieldsForMessages)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
requestParameters := &users.ItemMailFoldersItemMessagesRequestBuilderGetQueryParameters{
|
||||
Select: selecting,
|
||||
}
|
||||
|
||||
options := &users.ItemMailFoldersItemMessagesRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: requestParameters,
|
||||
Headers: buildPreferHeaders(true, immutableIDs),
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return &mailPager{}, err
|
||||
) itemPager {
|
||||
queryParams := &users.ItemMailFoldersItemMessagesRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: &users.ItemMailFoldersItemMessagesRequestBuilderGetQueryParameters{
|
||||
Select: []string{"id", "isRead"},
|
||||
},
|
||||
Headers: buildPreferHeaders(true, immutableIDs),
|
||||
}
|
||||
|
||||
builder := gs.Client().
|
||||
@ -406,7 +396,7 @@ func NewMailPager(
|
||||
ByMailFolderId(directoryID).
|
||||
Messages()
|
||||
|
||||
return &mailPager{gs, builder, options}, nil
|
||||
return &mailPager{gs, builder, queryParams}
|
||||
}
|
||||
|
||||
func (p *mailPager) getPage(ctx context.Context) (api.DeltaPageLinker, error) {
|
||||
@ -466,23 +456,12 @@ func NewMailDeltaPager(
|
||||
gs graph.Servicer,
|
||||
user, directoryID, oldDelta string,
|
||||
immutableIDs bool,
|
||||
) (itemPager, error) {
|
||||
selecting, err := buildOptions([]string{"isRead"}, fieldsForMessages)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
requestParameters := &users.ItemMailFoldersItemMessagesDeltaRequestBuilderGetQueryParameters{
|
||||
Select: selecting,
|
||||
}
|
||||
|
||||
options := &users.ItemMailFoldersItemMessagesDeltaRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: requestParameters,
|
||||
Headers: buildPreferHeaders(true, immutableIDs),
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return &mailDeltaPager{}, err
|
||||
) itemPager {
|
||||
queryParams := &users.ItemMailFoldersItemMessagesDeltaRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: &users.ItemMailFoldersItemMessagesDeltaRequestBuilderGetQueryParameters{
|
||||
Select: []string{"id", "isRead"},
|
||||
},
|
||||
Headers: buildPreferHeaders(true, immutableIDs),
|
||||
}
|
||||
|
||||
var builder *users.ItemMailFoldersItemMessagesDeltaRequestBuilder
|
||||
@ -490,10 +469,10 @@ func NewMailDeltaPager(
|
||||
if len(oldDelta) > 0 {
|
||||
builder = users.NewItemMailFoldersItemMessagesDeltaRequestBuilder(oldDelta, gs.Adapter())
|
||||
} else {
|
||||
builder = getMailDeltaBuilder(ctx, gs, user, directoryID, options)
|
||||
builder = getMailDeltaBuilder(ctx, gs, user, directoryID, queryParams)
|
||||
}
|
||||
|
||||
return &mailDeltaPager{gs, user, directoryID, builder, options}, nil
|
||||
return &mailDeltaPager{gs, user, directoryID, builder, queryParams}
|
||||
}
|
||||
|
||||
func (p *mailDeltaPager) getPage(ctx context.Context) (api.DeltaPageLinker, error) {
|
||||
@ -539,15 +518,8 @@ func (c Mail) GetAddedAndRemovedItemIDs(
|
||||
"category", selectors.ExchangeMail,
|
||||
"container_id", directoryID)
|
||||
|
||||
pager, err := NewMailPager(ctx, service, user, directoryID, immutableIDs)
|
||||
if err != nil {
|
||||
return nil, nil, DeltaUpdate{}, graph.Wrap(ctx, err, "creating delta pager")
|
||||
}
|
||||
|
||||
deltaPager, err := NewMailDeltaPager(ctx, service, user, directoryID, oldDelta, immutableIDs)
|
||||
if err != nil {
|
||||
return nil, nil, DeltaUpdate{}, graph.Wrap(ctx, err, "creating delta pager")
|
||||
}
|
||||
pager := NewMailPager(ctx, service, user, directoryID, immutableIDs)
|
||||
deltaPager := NewMailDeltaPager(ctx, service, user, directoryID, oldDelta, immutableIDs)
|
||||
|
||||
return getAddedAndRemovedItemIDs(ctx, service, pager, deltaPager, oldDelta, canMakeDeltaQueries)
|
||||
}
|
||||
|
||||
@ -1,214 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
abstractions "github.com/microsoft/kiota-abstractions-go"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/users"
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Constant Section
|
||||
// Defines the allowable strings that can be passed into
|
||||
// selectors for M365 objects
|
||||
// -----------------------------------------------------------------------
|
||||
var (
|
||||
fieldsForCalendars = map[string]struct{}{
|
||||
"changeKey": {},
|
||||
"events": {},
|
||||
"id": {},
|
||||
"isDefaultCalendar": {},
|
||||
"name": {},
|
||||
"owner": {},
|
||||
}
|
||||
|
||||
fieldsForFolders = map[string]struct{}{
|
||||
"childFolderCount": {},
|
||||
"displayName": {},
|
||||
"id": {},
|
||||
"isHidden": {},
|
||||
"parentFolderId": {},
|
||||
"totalItemCount": {},
|
||||
"unreadItemCount": {},
|
||||
}
|
||||
|
||||
fieldsForMessages = map[string]struct{}{
|
||||
"conservationId": {},
|
||||
"conversationIndex": {},
|
||||
"parentFolderId": {},
|
||||
"subject": {},
|
||||
"webLink": {},
|
||||
"id": {},
|
||||
"isRead": {},
|
||||
}
|
||||
|
||||
fieldsForContacts = map[string]struct{}{
|
||||
"id": {},
|
||||
"companyName": {},
|
||||
"department": {},
|
||||
"displayName": {},
|
||||
"fileAs": {},
|
||||
"givenName": {},
|
||||
"manager": {},
|
||||
"parentFolderId": {},
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
// headerKeyPrefer is used to set query preferences
|
||||
headerKeyPrefer = "Prefer"
|
||||
// maxPageSizeHeaderFmt is used to indicate max page size
|
||||
// preferences
|
||||
maxPageSizeHeaderFmt = "odata.maxpagesize=%d"
|
||||
// deltaMaxPageSize is the max page size to use for delta queries
|
||||
deltaMaxPageSize = 200
|
||||
idTypeFmt = "IdType=%q"
|
||||
immutableIDType = "ImmutableId"
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// exchange.Query Option Section
|
||||
// These functions can be used to filter a response on M365
|
||||
// Graph queries and reduce / filter the amount of data returned
|
||||
// which reduces the overall latency of complex calls
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
// optionsForCalendars places allowed options for exchange.Calendar object
|
||||
// @param moreOps should reflect elements from fieldsForCalendars
|
||||
// @return is first call in Calendars().GetWithRequestConfigurationAndResponseHandler
|
||||
func optionsForCalendars(moreOps []string) (
|
||||
*users.ItemCalendarsRequestBuilderGetRequestConfiguration,
|
||||
error,
|
||||
) {
|
||||
selecting, err := buildOptions(moreOps, fieldsForCalendars)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// should be a CalendarsRequestBuilderGetRequestConfiguration
|
||||
requestParams := &users.ItemCalendarsRequestBuilderGetQueryParameters{
|
||||
Select: selecting,
|
||||
}
|
||||
options := &users.ItemCalendarsRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: requestParams,
|
||||
}
|
||||
|
||||
return options, nil
|
||||
}
|
||||
|
||||
// optionsForCalendarsByID places allowed options for exchange.Calendar object
|
||||
// @param moreOps should reflect elements from fieldsForCalendars
|
||||
// @return is first call in Calendars().GetWithRequestConfigurationAndResponseHandler
|
||||
func optionsForCalendarsByID(moreOps []string) (
|
||||
*users.ItemCalendarsCalendarItemRequestBuilderGetRequestConfiguration,
|
||||
error,
|
||||
) {
|
||||
selecting, err := buildOptions(moreOps, fieldsForCalendars)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// should be a CalendarsRequestBuilderGetRequestConfiguration
|
||||
requestParams := &users.ItemCalendarsCalendarItemRequestBuilderGetQueryParameters{
|
||||
Select: selecting,
|
||||
}
|
||||
options := &users.ItemCalendarsCalendarItemRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: requestParams,
|
||||
}
|
||||
|
||||
return options, nil
|
||||
}
|
||||
|
||||
func optionsForContactFolderByID(moreOps []string) (
|
||||
*users.ItemContactFoldersContactFolderItemRequestBuilderGetRequestConfiguration,
|
||||
error,
|
||||
) {
|
||||
selecting, err := buildOptions(moreOps, fieldsForFolders)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
requestParameters := &users.ItemContactFoldersContactFolderItemRequestBuilderGetQueryParameters{
|
||||
Select: selecting,
|
||||
}
|
||||
options := &users.ItemContactFoldersContactFolderItemRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: requestParameters,
|
||||
}
|
||||
|
||||
return options, nil
|
||||
}
|
||||
|
||||
// optionsForMailFoldersItem transforms the options into a more dynamic call for MailFoldersById.
|
||||
// moreOps is a []string of options(e.g. "displayName", "isHidden")
|
||||
// Returns first call in MailFoldersById().GetWithRequestConfigurationAndResponseHandler(options, handler)
|
||||
func optionsForMailFoldersItem(
|
||||
moreOps []string,
|
||||
) (*users.ItemMailFoldersMailFolderItemRequestBuilderGetRequestConfiguration, error) {
|
||||
selecting, err := buildOptions(moreOps, fieldsForFolders)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
requestParameters := &users.ItemMailFoldersMailFolderItemRequestBuilderGetQueryParameters{
|
||||
Select: selecting,
|
||||
}
|
||||
options := &users.ItemMailFoldersMailFolderItemRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: requestParameters,
|
||||
}
|
||||
|
||||
return options, nil
|
||||
}
|
||||
|
||||
// optionsForContactChildFolders builds a contacts child folders request.
|
||||
func optionsForContactChildFolders(
|
||||
moreOps []string,
|
||||
) (*users.ItemContactFoldersItemChildFoldersRequestBuilderGetRequestConfiguration, error) {
|
||||
selecting, err := buildOptions(moreOps, fieldsForContacts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
requestParameters := &users.ItemContactFoldersItemChildFoldersRequestBuilderGetQueryParameters{
|
||||
Select: selecting,
|
||||
}
|
||||
options := &users.ItemContactFoldersItemChildFoldersRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: requestParameters,
|
||||
}
|
||||
|
||||
return options, nil
|
||||
}
|
||||
|
||||
// buildOptions - Utility Method for verifying if select options are valid for the m365 object type
|
||||
// @return is a pair. The first is a string literal of allowable options based on the object type,
|
||||
// the second is an error. An error is returned if an unsupported option or optionIdentifier was used
|
||||
func buildOptions(fields []string, allowed map[string]struct{}) ([]string, error) {
|
||||
returnedOptions := []string{"id"}
|
||||
|
||||
for _, entry := range fields {
|
||||
_, ok := allowed[entry]
|
||||
if !ok {
|
||||
return nil, clues.New("unsupported field: " + entry)
|
||||
}
|
||||
}
|
||||
|
||||
return append(returnedOptions, fields...), nil
|
||||
}
|
||||
|
||||
// buildPreferHeaders returns the headers we add to item delta page
|
||||
// requests.
|
||||
func buildPreferHeaders(pageSize, immutableID bool) *abstractions.RequestHeaders {
|
||||
var allHeaders []string
|
||||
|
||||
if pageSize {
|
||||
allHeaders = append(allHeaders, fmt.Sprintf(maxPageSizeHeaderFmt, deltaMaxPageSize))
|
||||
}
|
||||
|
||||
if immutableID {
|
||||
allHeaders = append(allHeaders, fmt.Sprintf(idTypeFmt, immutableIDType))
|
||||
}
|
||||
|
||||
headers := abstractions.NewRequestHeaders()
|
||||
headers.Add(headerKeyPrefer, strings.Join(allHeaders, ","))
|
||||
|
||||
return headers
|
||||
}
|
||||
26
src/pkg/services/m365/api/query_params.go
Normal file
26
src/pkg/services/m365/api/query_params.go
Normal file
@ -0,0 +1,26 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
abstractions "github.com/microsoft/kiota-abstractions-go"
|
||||
)
|
||||
|
||||
// buildPreferHeaders returns the headers we add to item delta page requests.
|
||||
func buildPreferHeaders(pageSize, immutableID bool) *abstractions.RequestHeaders {
|
||||
var allHeaders []string
|
||||
|
||||
if pageSize {
|
||||
allHeaders = append(allHeaders, fmt.Sprintf("odata.maxpagesize=%d", maxPageSize))
|
||||
}
|
||||
|
||||
if immutableID {
|
||||
allHeaders = append(allHeaders, `IdType="ImmutableId"`)
|
||||
}
|
||||
|
||||
headers := abstractions.NewRequestHeaders()
|
||||
headers.Add("Prefer", strings.Join(allHeaders, ","))
|
||||
|
||||
return headers
|
||||
}
|
||||
@ -14,6 +14,8 @@ import (
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/common/str"
|
||||
"github.com/alcionai/corso/src/internal/common/tform"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
@ -394,90 +396,90 @@ func (c Users) getMailboxSettings(
|
||||
|
||||
additionalData := settings.GetAdditionalData()
|
||||
|
||||
mi.ArchiveFolder, err = toString(ctx, "archiveFolder", additionalData)
|
||||
mi.ArchiveFolder, err = str.FromMapToAny("archiveFolder", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.Timezone, err = toString(ctx, "timeZone", additionalData)
|
||||
mi.Timezone, err = str.FromMapToAny("timeZone", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.DateFormat, err = toString(ctx, "dateFormat", additionalData)
|
||||
mi.DateFormat, err = str.FromMapToAny("dateFormat", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.TimeFormat, err = toString(ctx, "timeFormat", additionalData)
|
||||
mi.TimeFormat, err = str.FromMapToAny("timeFormat", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.Purpose, err = toString(ctx, "userPurpose", additionalData)
|
||||
mi.Purpose, err = str.FromMapToAny("userPurpose", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.DelegateMeetMsgDeliveryOpt, err = toString(ctx, "delegateMeetingMessageDeliveryOptions", additionalData)
|
||||
mi.DelegateMeetMsgDeliveryOpt, err = str.FromMapToAny("delegateMeetingMessageDeliveryOptions", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
// decode automatic replies settings
|
||||
replySetting, err := toT[map[string]any](ctx, "automaticRepliesSetting", additionalData)
|
||||
replySetting, err := tform.FromMapToAny[map[string]any]("automaticRepliesSetting", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.AutomaticRepliesSetting.Status, err = toString(ctx, "status", replySetting)
|
||||
mi.AutomaticRepliesSetting.Status, err = str.FromMapToAny("status", replySetting)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.AutomaticRepliesSetting.ExternalAudience, err = toString(ctx, "externalAudience", replySetting)
|
||||
mi.AutomaticRepliesSetting.ExternalAudience, err = str.FromMapToAny("externalAudience", replySetting)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.AutomaticRepliesSetting.ExternalReplyMessage, err = toString(ctx, "externalReplyMessage", replySetting)
|
||||
mi.AutomaticRepliesSetting.ExternalReplyMessage, err = str.FromMapToAny("externalReplyMessage", replySetting)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.AutomaticRepliesSetting.InternalReplyMessage, err = toString(ctx, "internalReplyMessage", replySetting)
|
||||
mi.AutomaticRepliesSetting.InternalReplyMessage, err = str.FromMapToAny("internalReplyMessage", replySetting)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
// decode scheduledStartDateTime
|
||||
startDateTime, err := toT[map[string]any](ctx, "scheduledStartDateTime", replySetting)
|
||||
startDateTime, err := tform.FromMapToAny[map[string]any]("scheduledStartDateTime", replySetting)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.AutomaticRepliesSetting.ScheduledStartDateTime.DateTime, err = toString(ctx, "dateTime", startDateTime)
|
||||
mi.AutomaticRepliesSetting.ScheduledStartDateTime.DateTime, err = str.FromMapToAny("dateTime", startDateTime)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.AutomaticRepliesSetting.ScheduledStartDateTime.Timezone, err = toString(ctx, "timeZone", startDateTime)
|
||||
mi.AutomaticRepliesSetting.ScheduledStartDateTime.Timezone, err = str.FromMapToAny("timeZone", startDateTime)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
endDateTime, err := toT[map[string]any](ctx, "scheduledEndDateTime", replySetting)
|
||||
endDateTime, err := tform.FromMapToAny[map[string]any]("scheduledEndDateTime", replySetting)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.AutomaticRepliesSetting.ScheduledEndDateTime.DateTime, err = toString(ctx, "dateTime", endDateTime)
|
||||
mi.AutomaticRepliesSetting.ScheduledEndDateTime.DateTime, err = str.FromMapToAny("dateTime", endDateTime)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.AutomaticRepliesSetting.ScheduledEndDateTime.Timezone, err = toString(ctx, "timeZone", endDateTime)
|
||||
mi.AutomaticRepliesSetting.ScheduledEndDateTime.Timezone, err = str.FromMapToAny("timeZone", endDateTime)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
// Language decode
|
||||
language, err := toT[map[string]any](ctx, "language", additionalData)
|
||||
language, err := tform.FromMapToAny[map[string]any]("language", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.Language.DisplayName, err = toString(ctx, "displayName", language)
|
||||
mi.Language.DisplayName, err = str.FromMapToAny("displayName", language)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.Language.Locale, err = toString(ctx, "locale", language)
|
||||
mi.Language.Locale, err = str.FromMapToAny("locale", language)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
// working hours
|
||||
workingHours, err := toT[map[string]any](ctx, "workingHours", additionalData)
|
||||
workingHours, err := tform.FromMapToAny[map[string]any]("workingHours", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.WorkingHours.StartTime, err = toString(ctx, "startTime", workingHours)
|
||||
mi.WorkingHours.StartTime, err = str.FromMapToAny("startTime", workingHours)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.WorkingHours.EndTime, err = toString(ctx, "endTime", workingHours)
|
||||
mi.WorkingHours.EndTime, err = str.FromMapToAny("endTime", workingHours)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
timeZone, err := toT[map[string]any](ctx, "timeZone", workingHours)
|
||||
timeZone, err := tform.FromMapToAny[map[string]any]("timeZone", workingHours)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.WorkingHours.TimeZone.Name, err = toString(ctx, "name", timeZone)
|
||||
mi.WorkingHours.TimeZone.Name, err = str.FromMapToAny("name", timeZone)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
days, err := toT[[]any](ctx, "daysOfWeek", workingHours)
|
||||
days, err := tform.FromMapToAny[[]any]("daysOfWeek", workingHours)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
for _, day := range days {
|
||||
s, err := anyToString(ctx, "dayOfTheWeek", day)
|
||||
s, err := str.FromAny(day)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
mi.WorkingHours.DaysOfWeek = append(mi.WorkingHours.DaysOfWeek, s)
|
||||
}
|
||||
@ -510,53 +512,3 @@ func validateUser(item models.Userable) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func toString(ctx context.Context, key string, data map[string]any) (string, error) {
|
||||
ctx = clues.Add(ctx, "setting_name", key)
|
||||
|
||||
if len(data) == 0 {
|
||||
logger.Ctx(ctx).Info("not found: ", key)
|
||||
return "", ErrMailBoxSettingsNotFound
|
||||
}
|
||||
|
||||
return anyToString(ctx, key, data[key])
|
||||
}
|
||||
|
||||
func anyToString(ctx context.Context, key string, val any) (string, error) {
|
||||
if val == nil {
|
||||
logger.Ctx(ctx).Info("nil value: ", key)
|
||||
return "", ErrMailBoxSettingsNotFound
|
||||
}
|
||||
|
||||
sp, ok := val.(*string)
|
||||
if !ok {
|
||||
logger.Ctx(ctx).Info("value is not a *string: ", key)
|
||||
return "", ErrMailBoxSettingsNotFound
|
||||
}
|
||||
|
||||
return ptr.Val(sp), nil
|
||||
}
|
||||
|
||||
func toT[T any](ctx context.Context, key string, data map[string]any) (T, error) {
|
||||
ctx = clues.Add(ctx, "setting_name", key)
|
||||
|
||||
if len(data) == 0 {
|
||||
logger.Ctx(ctx).Info("not found: ", key)
|
||||
return *new(T), ErrMailBoxSettingsNotFound
|
||||
}
|
||||
|
||||
val := data[key]
|
||||
|
||||
if data == nil {
|
||||
logger.Ctx(ctx).Info("nil value: ", key)
|
||||
return *new(T), ErrMailBoxSettingsNotFound
|
||||
}
|
||||
|
||||
value, ok := val.(T)
|
||||
if !ok {
|
||||
logger.Ctx(ctx).Info(fmt.Sprintf("unexpected type for %s: %T", key, val))
|
||||
return *new(T), ErrMailBoxSettingsNotFound
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
|
||||
@ -2,13 +2,16 @@ package m365
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/users"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/discovery"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
@ -47,12 +50,12 @@ type UserNoInfo struct {
|
||||
func UsersCompat(ctx context.Context, acct account.Account) ([]*User, error) {
|
||||
errs := fault.New(true)
|
||||
|
||||
users, err := Users(ctx, acct, errs)
|
||||
us, err := Users(ctx, acct, errs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return users, errs.Failure()
|
||||
return us, errs.Failure()
|
||||
}
|
||||
|
||||
// UsersCompatNoInfo returns a list of users in the specified M365 tenant.
|
||||
@ -61,12 +64,71 @@ func UsersCompat(ctx context.Context, acct account.Account) ([]*User, error) {
|
||||
func UsersCompatNoInfo(ctx context.Context, acct account.Account) ([]*UserNoInfo, error) {
|
||||
errs := fault.New(true)
|
||||
|
||||
users, err := usersNoInfo(ctx, acct, errs)
|
||||
us, err := usersNoInfo(ctx, acct, errs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return users, errs.Failure()
|
||||
return us, errs.Failure()
|
||||
}
|
||||
|
||||
// UserHasMailbox returns true if the user has an exchange mailbox enabled
|
||||
// false otherwise, and a nil pointer and an error in case of error
|
||||
func UserHasMailbox(ctx context.Context, acct account.Account, userID string) (bool, error) {
|
||||
uapi, err := makeUserAPI(acct)
|
||||
if err != nil {
|
||||
return false, clues.Wrap(err, "getting mailbox").WithClues(ctx)
|
||||
}
|
||||
|
||||
requestParameters := users.ItemMailFoldersRequestBuilderGetQueryParameters{
|
||||
Select: []string{"id"},
|
||||
Top: ptr.To[int32](1), // if we get any folders, then we have access.
|
||||
}
|
||||
|
||||
options := users.ItemMailFoldersRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: &requestParameters,
|
||||
}
|
||||
|
||||
_, err = uapi.GetMailFolders(ctx, userID, options)
|
||||
if err != nil {
|
||||
if graph.IsErrUserNotFound(err) {
|
||||
return false, clues.Stack(graph.ErrResourceOwnerNotFound, err)
|
||||
}
|
||||
|
||||
if !graph.IsErrExchangeMailFolderNotFound(err) ||
|
||||
clues.HasLabel(err, graph.LabelStatus(http.StatusNotFound)) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// UserHasDrives returns true if the user has any drives
|
||||
// false otherwise, and a nil pointer and an error in case of error
|
||||
func UserHasDrives(ctx context.Context, acct account.Account, userID string) (bool, error) {
|
||||
uapi, err := makeUserAPI(acct)
|
||||
if err != nil {
|
||||
return false, clues.Wrap(err, "getting drives").WithClues(ctx)
|
||||
}
|
||||
|
||||
_, err = uapi.GetDrives(ctx, userID)
|
||||
if err != nil {
|
||||
if graph.IsErrUserNotFound(err) {
|
||||
return false, clues.Stack(graph.ErrResourceOwnerNotFound, err)
|
||||
}
|
||||
|
||||
if !graph.IsErrExchangeMailFolderNotFound(err) ||
|
||||
clues.HasLabel(err, graph.LabelStatus(http.StatusNotFound)) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// usersNoInfo returns a list of users in the specified M365 tenant - with no info
|
||||
@ -78,14 +140,14 @@ func usersNoInfo(ctx context.Context, acct account.Account, errs *fault.Bus) ([]
|
||||
return nil, clues.Wrap(err, "getting users").WithClues(ctx)
|
||||
}
|
||||
|
||||
users, err := discovery.Users(ctx, uapi, errs)
|
||||
us, err := discovery.Users(ctx, uapi, errs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret := make([]*UserNoInfo, 0, len(users))
|
||||
ret := make([]*UserNoInfo, 0, len(us))
|
||||
|
||||
for _, u := range users {
|
||||
for _, u := range us {
|
||||
pu, err := parseUser(u)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "formatting user data")
|
||||
@ -110,14 +172,14 @@ func Users(ctx context.Context, acct account.Account, errs *fault.Bus) ([]*User,
|
||||
return nil, clues.Wrap(err, "getting users").WithClues(ctx)
|
||||
}
|
||||
|
||||
users, err := discovery.Users(ctx, uapi, errs)
|
||||
us, err := discovery.Users(ctx, uapi, errs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret := make([]*User, 0, len(users))
|
||||
ret := make([]*User, 0, len(us))
|
||||
|
||||
for _, u := range users {
|
||||
for _, u := range us {
|
||||
pu, err := parseUser(u)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "formatting user data")
|
||||
|
||||
@ -102,6 +102,36 @@ func (suite *M365IntegrationSuite) TestGetUserInfo() {
|
||||
assert.Equal(t, "user", info.Mailbox.Purpose)
|
||||
}
|
||||
|
||||
func (suite *M365IntegrationSuite) TestUserHasMailbox() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
var (
|
||||
t = suite.T()
|
||||
acct = tester.NewM365Account(t)
|
||||
uid = tester.M365UserID(t)
|
||||
)
|
||||
|
||||
enabled, err := m365.UserHasMailbox(ctx, acct, uid)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
assert.True(t, enabled)
|
||||
}
|
||||
|
||||
func (suite *M365IntegrationSuite) TestUserHasDrive() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
var (
|
||||
t = suite.T()
|
||||
acct = tester.NewM365Account(t)
|
||||
uid = tester.M365UserID(t)
|
||||
)
|
||||
|
||||
enabled, err := m365.UserHasDrives(ctx, acct, uid)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
assert.True(t, enabled)
|
||||
}
|
||||
|
||||
func (suite *M365IntegrationSuite) TestSites() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
@ -6,6 +6,7 @@ import (
|
||||
"github.com/alcionai/clues"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common"
|
||||
"github.com/alcionai/corso/src/internal/common/str"
|
||||
)
|
||||
|
||||
type S3Config struct {
|
||||
@ -68,8 +69,8 @@ func (s Storage) S3Config() (S3Config, error) {
|
||||
c.Bucket = orEmptyString(s.Config[keyS3Bucket])
|
||||
c.Endpoint = orEmptyString(s.Config[keyS3Endpoint])
|
||||
c.Prefix = orEmptyString(s.Config[keyS3Prefix])
|
||||
c.DoNotUseTLS = common.ParseBool(s.Config[keyS3DoNotUseTLS])
|
||||
c.DoNotVerifyTLS = common.ParseBool(s.Config[keyS3DoNotVerifyTLS])
|
||||
c.DoNotUseTLS = str.ParseBool(s.Config[keyS3DoNotUseTLS])
|
||||
c.DoNotVerifyTLS = str.ParseBool(s.Config[keyS3DoNotVerifyTLS])
|
||||
}
|
||||
|
||||
return c, c.validate()
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 36 KiB After Width: | Height: | Size: 224 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 34 KiB After Width: | Height: | Size: 232 KiB |
@ -6,8 +6,8 @@ CUSTOM TO THE NEW HOME PAGE
|
||||
@import "icons.scss";
|
||||
@import url('https://fonts.googleapis.com/css2?family=Nunito&display=swap');
|
||||
|
||||
* {
|
||||
font-family: 'Nunito', sans-serif !important;
|
||||
:root {
|
||||
--ifm-font-family-base: 'Nunito', sans-serif;
|
||||
}
|
||||
|
||||
html {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user