Merge branch 'main' of https://github.com/alcionai/corso into updateKopiaPassword
This commit is contained in:
commit
76fbbb19ea
53
.github/actions/backup-restore-test/action.yml
vendored
53
.github/actions/backup-restore-test/action.yml
vendored
@ -45,6 +45,9 @@ runs:
|
||||
shell: bash
|
||||
working-directory: src
|
||||
run: |
|
||||
echo "---------------------------"
|
||||
echo Backup ${{ inputs.service }} ${{ inputs.kind }}
|
||||
echo "---------------------------"
|
||||
set -euo pipefail
|
||||
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-backup-${{ inputs.service }}-${{inputs.kind }}.log
|
||||
./corso backup create '${{ inputs.service }}' \
|
||||
@ -61,6 +64,9 @@ runs:
|
||||
shell: bash
|
||||
working-directory: src
|
||||
run: |
|
||||
echo "---------------------------"
|
||||
echo Restore ${{ inputs.service }} ${{ inputs.kind }}
|
||||
echo "---------------------------"
|
||||
set -euo pipefail
|
||||
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-restore-${{ inputs.service }}-${{inputs.kind }}.log
|
||||
./corso restore '${{ inputs.service }}' \
|
||||
@ -85,11 +91,14 @@ runs:
|
||||
SANITY_TEST_KIND: restore
|
||||
SANITY_TEST_FOLDER: ${{ steps.restore.outputs.result }}
|
||||
SANITY_TEST_SERVICE: ${{ inputs.service }}
|
||||
TEST_DATA: ${{ inputs.test-folder }}
|
||||
BASE_BACKUP: ${{ inputs.base-backup }}
|
||||
SANITY_TEST_DATA: ${{ inputs.test-folder }}
|
||||
SANITY_BASE_BACKUP: ${{ inputs.base-backup }}
|
||||
run: |
|
||||
echo "---------------------------"
|
||||
echo Sanity Test Restore ${{ inputs.service }} ${{ inputs.kind }}
|
||||
echo "---------------------------"
|
||||
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-validate-${{ inputs.service }}-${{inputs.kind }}.log
|
||||
./sanity-test
|
||||
./sanity-test restore ${{ inputs.service }}
|
||||
|
||||
- name: Export ${{ inputs.service }} ${{ inputs.kind }}
|
||||
if: inputs.with-export == true
|
||||
@ -97,6 +106,9 @@ runs:
|
||||
shell: bash
|
||||
working-directory: src
|
||||
run: |
|
||||
echo "---------------------------"
|
||||
echo Export ${{ inputs.service }} ${{ inputs.kind }}
|
||||
echo "---------------------------"
|
||||
set -euo pipefail
|
||||
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-restore-${{ inputs.service }}-${{inputs.kind }}.log
|
||||
./corso export '${{ inputs.service }}' \
|
||||
@ -116,11 +128,14 @@ runs:
|
||||
SANITY_TEST_KIND: export
|
||||
SANITY_TEST_FOLDER: /tmp/export-${{ inputs.service }}-${{inputs.kind }}
|
||||
SANITY_TEST_SERVICE: ${{ inputs.service }}
|
||||
TEST_DATA: ${{ inputs.test-folder }}
|
||||
BASE_BACKUP: ${{ inputs.base-backup }}
|
||||
SANITY_TEST_DATA: ${{ inputs.test-folder }}
|
||||
SANITY_BASE_BACKUP: ${{ inputs.base-backup }}
|
||||
run: |
|
||||
echo "---------------------------"
|
||||
echo Sanity-Test Export ${{ inputs.service }} ${{ inputs.kind }}
|
||||
echo "---------------------------"
|
||||
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-validate-${{ inputs.service }}-${{inputs.kind }}.log
|
||||
./sanity-test
|
||||
./sanity-test export ${{ inputs.service }}
|
||||
|
||||
- name: Export archive ${{ inputs.service }} ${{ inputs.kind }}
|
||||
if: inputs.with-export == true
|
||||
@ -128,6 +143,9 @@ runs:
|
||||
shell: bash
|
||||
working-directory: src
|
||||
run: |
|
||||
echo "---------------------------"
|
||||
echo Export Archive ${{ inputs.service }} ${{ inputs.kind }}
|
||||
echo "---------------------------"
|
||||
set -euo pipefail
|
||||
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-restore-${{ inputs.service }}-${{inputs.kind }}.log
|
||||
./corso export '${{ inputs.service }}' \
|
||||
@ -150,16 +168,22 @@ runs:
|
||||
SANITY_TEST_KIND: export
|
||||
SANITY_TEST_FOLDER: /tmp/export-${{ inputs.service }}-${{inputs.kind }}-unzipped
|
||||
SANITY_TEST_SERVICE: ${{ inputs.service }}
|
||||
TEST_DATA: ${{ inputs.test-folder }}
|
||||
BASE_BACKUP: ${{ inputs.base-backup }}
|
||||
SANITY_TEST_DATA: ${{ inputs.test-folder }}
|
||||
SANITY_BASE_BACKUP: ${{ inputs.base-backup }}
|
||||
run: |
|
||||
echo "---------------------------"
|
||||
echo Sanity-Test Export Archive ${{ inputs.service }} ${{ inputs.kind }}
|
||||
echo "---------------------------"
|
||||
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-validate-${{ inputs.service }}-${{inputs.kind }}.log
|
||||
./sanity-test
|
||||
./sanity-test export ${{ inputs.service }}
|
||||
|
||||
- name: List ${{ inputs.service }} ${{ inputs.kind }}
|
||||
shell: bash
|
||||
working-directory: src
|
||||
run: |
|
||||
echo "---------------------------"
|
||||
echo Backup list ${{ inputs.service }} ${{ inputs.kind }}
|
||||
echo "---------------------------"
|
||||
set -euo pipefail
|
||||
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-backup-list-${{ inputs.service }}-${{inputs.kind }}.log
|
||||
./corso backup list ${{ inputs.service }} \
|
||||
@ -178,6 +202,9 @@ runs:
|
||||
shell: bash
|
||||
working-directory: src
|
||||
run: |
|
||||
echo "---------------------------"
|
||||
echo Backup List w/ Backup ${{ inputs.service }} ${{ inputs.kind }}
|
||||
echo "---------------------------"
|
||||
set -euo pipefail
|
||||
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-backup-list-single-${{ inputs.service }}-${{inputs.kind }}.log
|
||||
./corso backup list ${{ inputs.service }} \
|
||||
@ -193,7 +220,13 @@ runs:
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Upload the original go test output as an artifact for later review.
|
||||
- if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
echo "---------------------------"
|
||||
echo Logging Results
|
||||
echo "---------------------------"
|
||||
|
||||
- name: Upload test log
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
|
||||
4
.github/actions/slack-message/action.yml
vendored
4
.github/actions/slack-message/action.yml
vendored
@ -31,7 +31,7 @@ runs:
|
||||
- name: use url or blank val
|
||||
shell: bash
|
||||
run: |
|
||||
echo "STEP=${{ github.action || '' }}" >> $GITHUB_ENV
|
||||
echo "STEP=${{ env.trimmed_ref || '' }}" >> $GITHUB_ENV
|
||||
echo "JOB=${{ github.job || '' }}" >> $GITHUB_ENV
|
||||
echo "LOGS=${{ github.run_id && env.logurl || '-' }}" >> $GITHUB_ENV
|
||||
echo "COMMIT=${{ github.sha && env.commiturl || '-' }}" >> $GITHUB_ENV
|
||||
@ -51,7 +51,7 @@ runs:
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": "${{ inputs.msg }} :: ${{ env.JOB }} - ${{ env.STEP }}\n${{ env.LOGS }} ${{ env.COMMIT }} ${{ env.REF }}"
|
||||
"text": "${{ inputs.msg }}\n${{ env.JOB }} :: ${{ env.STEP }}\n${{ env.LOGS }} ${{ env.COMMIT }} ${{ env.REF }}"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
32
.github/workflows/sanity-test.yaml
vendored
32
.github/workflows/sanity-test.yaml
vendored
@ -181,7 +181,7 @@ jobs:
|
||||
uses: ./.github/actions/backup-restore-test
|
||||
with:
|
||||
service: exchange
|
||||
kind: initial
|
||||
kind: first-backup
|
||||
backup-args: '--mailbox "${{ env.TEST_USER }}" --data "email"'
|
||||
restore-args: '--email-folder ${{ env.RESTORE_DEST_PFX }}${{ steps.repo-init.outputs.result }}'
|
||||
test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.repo-init.outputs.result }}'
|
||||
@ -249,7 +249,7 @@ jobs:
|
||||
uses: ./.github/actions/backup-restore-test
|
||||
with:
|
||||
service: onedrive
|
||||
kind: initial
|
||||
kind: first-backup
|
||||
backup-args: '--user "${{ env.TEST_USER }}"'
|
||||
restore-args: '--folder ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-onedrive.outputs.result }}'
|
||||
test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-onedrive.outputs.result }}'
|
||||
@ -305,7 +305,7 @@ jobs:
|
||||
uses: ./.github/actions/backup-restore-test
|
||||
with:
|
||||
service: sharepoint
|
||||
kind: initial
|
||||
kind: first-backup
|
||||
backup-args: '--site "${{ secrets.CORSO_M365_TEST_SITE_URL }}"'
|
||||
restore-args: '--folder ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-sharepoint.outputs.result }}'
|
||||
test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-sharepoint.outputs.result }}'
|
||||
@ -362,12 +362,34 @@ jobs:
|
||||
uses: ./.github/actions/backup-restore-test
|
||||
with:
|
||||
service: groups
|
||||
kind: initial
|
||||
kind: first-backup
|
||||
backup-args: '--group "${{ vars.CORSO_M365_TEST_TEAM_ID }}"'
|
||||
test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }}'
|
||||
log-dir: ${{ env.CORSO_LOG_DIR }}
|
||||
|
||||
# TODO: incrementals
|
||||
# generate some more enteries for incremental check
|
||||
# - name: Groups - Create new data (for incremental)
|
||||
# working-directory: ./src/cmd/factory
|
||||
# run: |
|
||||
# go run . sharepoint files \
|
||||
# --site ${{ secrets.CORSO_M365_TEST_GROUPS_SITE_URL }} \
|
||||
# --user ${{ env.TEST_USER }} \
|
||||
# --secondaryuser ${{ env.CORSO_SECONDARY_M365_TEST_USER_ID }} \
|
||||
# --tenant ${{ secrets.TENANT_ID }} \
|
||||
# --destination ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }} \
|
||||
# --count 4
|
||||
|
||||
# - name: Groups - Incremental backup
|
||||
# id: groups-incremental
|
||||
# uses: ./.github/actions/backup-restore-test
|
||||
# with:
|
||||
# service: groups
|
||||
# kind: incremental
|
||||
# backup-args: '--site "${{ secrets.CORSO_M365_TEST_GROUPS_SITE_URL }}"'
|
||||
# restore-args: '--folder ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }}'
|
||||
# test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }}'
|
||||
# log-dir: ${{ env.CORSO_LOG_DIR }}
|
||||
# with-export: true
|
||||
|
||||
##########################################################################################################################################
|
||||
|
||||
|
||||
@ -48,12 +48,12 @@ func AddCommands(cmd *cobra.Command) {
|
||||
|
||||
for _, sc := range subCommandFuncs {
|
||||
subCommand := sc()
|
||||
flags.AddAllProviderFlags(subCommand)
|
||||
flags.AddAllStorageFlags(subCommand)
|
||||
backupC.AddCommand(subCommand)
|
||||
|
||||
for _, addBackupTo := range serviceCommands {
|
||||
addBackupTo(subCommand)
|
||||
sc := addBackupTo(subCommand)
|
||||
flags.AddAllProviderFlags(sc)
|
||||
flags.AddAllStorageFlags(sc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
@ -14,6 +13,7 @@ import (
|
||||
|
||||
"github.com/alcionai/corso/src/cli/flags"
|
||||
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
|
||||
cliTD "github.com/alcionai/corso/src/cli/testdata"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
utilsTD "github.com/alcionai/corso/src/cli/utils/testdata"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
@ -92,76 +92,46 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() {
|
||||
func (suite *ExchangeUnitSuite) TestBackupCreateFlags() {
|
||||
t := suite.T()
|
||||
|
||||
cmd := &cobra.Command{Use: createCommand}
|
||||
|
||||
// persistent flags not added by addCommands
|
||||
flags.AddRunModeFlag(cmd, true)
|
||||
|
||||
c := addExchangeCommands(cmd)
|
||||
require.NotNil(t, c)
|
||||
|
||||
// non-persistent flags not added by addCommands
|
||||
flags.AddAllProviderFlags(c)
|
||||
flags.AddAllStorageFlags(c)
|
||||
|
||||
flagsTD.WithFlags(
|
||||
cmd,
|
||||
exchangeServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
cmd := cliTD.SetUpCmdHasFlags(
|
||||
t,
|
||||
&cobra.Command{Use: createCommand},
|
||||
addExchangeCommands,
|
||||
[]cliTD.UseCobraCommandFn{
|
||||
flags.AddAllProviderFlags,
|
||||
flags.AddAllStorageFlags,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags())
|
||||
flagsTD.WithFlags(
|
||||
exchangeServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.MailBoxFN, flagsTD.FlgInputs(flagsTD.MailboxInput),
|
||||
"--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.ExchangeCategoryDataInput),
|
||||
"--" + flags.FetchParallelismFN, flagsTD.FetchParallelism,
|
||||
"--" + flags.DeltaPageSizeFN, flagsTD.DeltaPageSize,
|
||||
|
||||
// Test arg parsing for few args
|
||||
args := []string{
|
||||
exchangeServiceCommand,
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
|
||||
"--" + flags.MailBoxFN, flagsTD.FlgInputs(flagsTD.MailboxInput),
|
||||
"--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.ExchangeCategoryDataInput),
|
||||
|
||||
"--" + flags.FetchParallelismFN, flagsTD.FetchParallelism,
|
||||
"--" + flags.DeltaPageSizeFN, flagsTD.DeltaPageSize,
|
||||
|
||||
// bool flags
|
||||
"--" + flags.FailFastFN,
|
||||
"--" + flags.DisableIncrementalsFN,
|
||||
"--" + flags.ForceItemDataDownloadFN,
|
||||
"--" + flags.DisableDeltaFN,
|
||||
"--" + flags.EnableImmutableIDFN,
|
||||
"--" + flags.DisableConcurrencyLimiterFN,
|
||||
}
|
||||
|
||||
args = append(args, flagsTD.PreparedProviderFlags()...)
|
||||
args = append(args, flagsTD.PreparedStorageFlags()...)
|
||||
|
||||
cmd.SetArgs(args)
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
cmd.SetErr(new(bytes.Buffer)) // drop output
|
||||
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
// bool flags
|
||||
"--" + flags.FailFastFN,
|
||||
"--" + flags.DisableIncrementalsFN,
|
||||
"--" + flags.ForceItemDataDownloadFN,
|
||||
"--" + flags.DisableDeltaFN,
|
||||
"--" + flags.EnableImmutableIDFN,
|
||||
"--" + flags.DisableConcurrencyLimiterFN,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags()))
|
||||
|
||||
opts := utils.MakeExchangeOpts(cmd)
|
||||
co := utils.Control()
|
||||
|
||||
assert.ElementsMatch(t, flagsTD.MailboxInput, opts.Users)
|
||||
// no assertion for category data input
|
||||
|
||||
assert.Equal(t, flagsTD.FetchParallelism, strconv.Itoa(co.Parallelism.ItemFetch))
|
||||
assert.Equal(t, flagsTD.DeltaPageSize, strconv.Itoa(int(co.DeltaPageSize)))
|
||||
|
||||
// bool flags
|
||||
assert.Equal(t, control.FailFast, co.FailureHandling)
|
||||
assert.True(t, co.ToggleFeatures.DisableIncrementals)
|
||||
assert.True(t, co.ToggleFeatures.ForceItemDataDownload)
|
||||
assert.True(t, co.ToggleFeatures.DisableDelta)
|
||||
assert.True(t, co.ToggleFeatures.ExchangeImmutableIDs)
|
||||
assert.True(t, co.ToggleFeatures.DisableConcurrencyLimiter)
|
||||
|
||||
flagsTD.AssertProviderFlags(t, cmd)
|
||||
flagsTD.AssertStorageFlags(t, cmd)
|
||||
}
|
||||
@ -169,36 +139,25 @@ func (suite *ExchangeUnitSuite) TestBackupCreateFlags() {
|
||||
func (suite *ExchangeUnitSuite) TestBackupListFlags() {
|
||||
t := suite.T()
|
||||
|
||||
cmd := &cobra.Command{Use: listCommand}
|
||||
|
||||
// persistent flags not added by addCommands
|
||||
flags.AddRunModeFlag(cmd, true)
|
||||
|
||||
c := addExchangeCommands(cmd)
|
||||
require.NotNil(t, c)
|
||||
|
||||
// non-persistent flags not added by addCommands
|
||||
flags.AddAllProviderFlags(c)
|
||||
flags.AddAllStorageFlags(c)
|
||||
|
||||
flagsTD.WithFlags(
|
||||
cmd,
|
||||
exchangeServiceCommand, []string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
cmd := cliTD.SetUpCmdHasFlags(
|
||||
t,
|
||||
&cobra.Command{Use: listCommand},
|
||||
addExchangeCommands,
|
||||
[]cliTD.UseCobraCommandFn{
|
||||
flags.AddAllProviderFlags,
|
||||
flags.AddAllStorageFlags,
|
||||
},
|
||||
flagsTD.PreparedBackupListFlags(),
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags())
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
cmd.SetErr(new(bytes.Buffer)) // drop output
|
||||
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
flagsTD.WithFlags(
|
||||
exchangeServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
},
|
||||
flagsTD.PreparedBackupListFlags(),
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags()))
|
||||
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
|
||||
flagsTD.AssertBackupListFlags(t, cmd)
|
||||
flagsTD.AssertProviderFlags(t, cmd)
|
||||
flagsTD.AssertStorageFlags(t, cmd)
|
||||
@ -207,41 +166,28 @@ func (suite *ExchangeUnitSuite) TestBackupListFlags() {
|
||||
func (suite *ExchangeUnitSuite) TestBackupDetailsFlags() {
|
||||
t := suite.T()
|
||||
|
||||
cmd := &cobra.Command{Use: detailsCommand}
|
||||
|
||||
// persistent flags not added by addCommands
|
||||
flags.AddRunModeFlag(cmd, true)
|
||||
|
||||
c := addExchangeCommands(cmd)
|
||||
require.NotNil(t, c)
|
||||
|
||||
// non-persistent flags not added by addCommands
|
||||
flags.AddAllProviderFlags(c)
|
||||
flags.AddAllStorageFlags(c)
|
||||
|
||||
flagsTD.WithFlags(
|
||||
cmd,
|
||||
exchangeServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
"--" + flags.SkipReduceFN,
|
||||
cmd := cliTD.SetUpCmdHasFlags(
|
||||
t,
|
||||
&cobra.Command{Use: detailsCommand},
|
||||
addExchangeCommands,
|
||||
[]cliTD.UseCobraCommandFn{
|
||||
flags.AddAllProviderFlags,
|
||||
flags.AddAllStorageFlags,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags())
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
cmd.SetErr(new(bytes.Buffer)) // drop output
|
||||
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
flagsTD.WithFlags(
|
||||
exchangeServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
"--" + flags.SkipReduceFN,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags()))
|
||||
|
||||
co := utils.Control()
|
||||
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
|
||||
assert.True(t, co.SkipReduce)
|
||||
|
||||
flagsTD.AssertProviderFlags(t, cmd)
|
||||
flagsTD.AssertStorageFlags(t, cmd)
|
||||
}
|
||||
@ -249,36 +195,24 @@ func (suite *ExchangeUnitSuite) TestBackupDetailsFlags() {
|
||||
func (suite *ExchangeUnitSuite) TestBackupDeleteFlags() {
|
||||
t := suite.T()
|
||||
|
||||
cmd := &cobra.Command{Use: deleteCommand}
|
||||
|
||||
// persistent flags not added by addCommands
|
||||
flags.AddRunModeFlag(cmd, true)
|
||||
|
||||
c := addExchangeCommands(cmd)
|
||||
require.NotNil(t, c)
|
||||
|
||||
// non-persistent flags not added by addCommands
|
||||
flags.AddAllProviderFlags(c)
|
||||
flags.AddAllStorageFlags(c)
|
||||
|
||||
flagsTD.WithFlags(
|
||||
cmd,
|
||||
exchangeServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
cmd := cliTD.SetUpCmdHasFlags(
|
||||
t,
|
||||
&cobra.Command{Use: deleteCommand},
|
||||
addExchangeCommands,
|
||||
[]cliTD.UseCobraCommandFn{
|
||||
flags.AddAllProviderFlags,
|
||||
flags.AddAllStorageFlags,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags())
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
cmd.SetErr(new(bytes.Buffer)) // drop output
|
||||
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
flagsTD.WithFlags(
|
||||
exchangeServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags()))
|
||||
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
|
||||
flagsTD.AssertProviderFlags(t, cmd)
|
||||
flagsTD.AssertStorageFlags(t, cmd)
|
||||
}
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
@ -13,6 +12,7 @@ import (
|
||||
|
||||
"github.com/alcionai/corso/src/cli/flags"
|
||||
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
|
||||
cliTD "github.com/alcionai/corso/src/cli/testdata"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
@ -128,70 +128,38 @@ func (suite *GroupsUnitSuite) TestValidateGroupsBackupCreateFlags() {
|
||||
func (suite *GroupsUnitSuite) TestBackupCreateFlags() {
|
||||
t := suite.T()
|
||||
|
||||
cmd := &cobra.Command{Use: createCommand}
|
||||
|
||||
// persistent flags not added by addCommands
|
||||
flags.AddRunModeFlag(cmd, true)
|
||||
|
||||
c := addGroupsCommands(cmd)
|
||||
require.NotNil(t, c)
|
||||
|
||||
// non-persistent flags not added by addCommands
|
||||
flags.AddAllProviderFlags(c)
|
||||
flags.AddAllStorageFlags(c)
|
||||
|
||||
flagsTD.WithFlags(
|
||||
cmd,
|
||||
groupsServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
cmd := cliTD.SetUpCmdHasFlags(
|
||||
t,
|
||||
&cobra.Command{Use: createCommand},
|
||||
addGroupsCommands,
|
||||
[]cliTD.UseCobraCommandFn{
|
||||
flags.AddAllProviderFlags,
|
||||
flags.AddAllStorageFlags,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags())
|
||||
|
||||
// Test arg parsing for few args
|
||||
args := []string{
|
||||
groupsServiceCommand,
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
|
||||
"--" + flags.GroupFN, flagsTD.FlgInputs(flagsTD.GroupsInput),
|
||||
"--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.GroupsCategoryDataInput),
|
||||
|
||||
"--" + flags.FetchParallelismFN, flagsTD.FetchParallelism,
|
||||
|
||||
// bool flags
|
||||
"--" + flags.FailFastFN,
|
||||
"--" + flags.DisableIncrementalsFN,
|
||||
"--" + flags.ForceItemDataDownloadFN,
|
||||
"--" + flags.DisableDeltaFN,
|
||||
}
|
||||
|
||||
args = append(args, flagsTD.PreparedProviderFlags()...)
|
||||
args = append(args, flagsTD.PreparedStorageFlags()...)
|
||||
|
||||
cmd.SetArgs(args)
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
cmd.SetErr(new(bytes.Buffer)) // drop output
|
||||
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
flagsTD.WithFlags(
|
||||
groupsServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.GroupFN, flagsTD.FlgInputs(flagsTD.GroupsInput),
|
||||
"--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.GroupsCategoryDataInput),
|
||||
"--" + flags.FetchParallelismFN, flagsTD.FetchParallelism,
|
||||
"--" + flags.FailFastFN,
|
||||
"--" + flags.DisableIncrementalsFN,
|
||||
"--" + flags.ForceItemDataDownloadFN,
|
||||
"--" + flags.DisableDeltaFN,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags()))
|
||||
|
||||
opts := utils.MakeGroupsOpts(cmd)
|
||||
co := utils.Control()
|
||||
|
||||
assert.ElementsMatch(t, flagsTD.GroupsInput, opts.Groups)
|
||||
// no assertion for category data input
|
||||
|
||||
assert.Equal(t, flagsTD.FetchParallelism, strconv.Itoa(co.Parallelism.ItemFetch))
|
||||
|
||||
// bool flags
|
||||
assert.Equal(t, control.FailFast, co.FailureHandling)
|
||||
assert.True(t, co.ToggleFeatures.DisableIncrementals)
|
||||
assert.True(t, co.ToggleFeatures.ForceItemDataDownload)
|
||||
assert.True(t, co.ToggleFeatures.DisableDelta)
|
||||
|
||||
flagsTD.AssertProviderFlags(t, cmd)
|
||||
flagsTD.AssertStorageFlags(t, cmd)
|
||||
}
|
||||
@ -199,37 +167,25 @@ func (suite *GroupsUnitSuite) TestBackupCreateFlags() {
|
||||
func (suite *GroupsUnitSuite) TestBackupListFlags() {
|
||||
t := suite.T()
|
||||
|
||||
cmd := &cobra.Command{Use: listCommand}
|
||||
|
||||
// persistent flags not added by addCommands
|
||||
flags.AddRunModeFlag(cmd, true)
|
||||
|
||||
c := addGroupsCommands(cmd)
|
||||
require.NotNil(t, c)
|
||||
|
||||
// non-persistent flags not added by addCommands
|
||||
flags.AddAllProviderFlags(c)
|
||||
flags.AddAllStorageFlags(c)
|
||||
|
||||
flagsTD.WithFlags(
|
||||
cmd,
|
||||
groupsServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
cmd := cliTD.SetUpCmdHasFlags(
|
||||
t,
|
||||
&cobra.Command{Use: listCommand},
|
||||
addGroupsCommands,
|
||||
[]cliTD.UseCobraCommandFn{
|
||||
flags.AddAllProviderFlags,
|
||||
flags.AddAllStorageFlags,
|
||||
},
|
||||
flagsTD.PreparedBackupListFlags(),
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags())
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
cmd.SetErr(new(bytes.Buffer)) // drop output
|
||||
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
flagsTD.WithFlags(
|
||||
groupsServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
},
|
||||
flagsTD.PreparedBackupListFlags(),
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags()))
|
||||
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
|
||||
flagsTD.AssertBackupListFlags(t, cmd)
|
||||
flagsTD.AssertProviderFlags(t, cmd)
|
||||
flagsTD.AssertStorageFlags(t, cmd)
|
||||
@ -238,41 +194,28 @@ func (suite *GroupsUnitSuite) TestBackupListFlags() {
|
||||
func (suite *GroupsUnitSuite) TestBackupDetailsFlags() {
|
||||
t := suite.T()
|
||||
|
||||
cmd := &cobra.Command{Use: detailsCommand}
|
||||
|
||||
// persistent flags not added by addCommands
|
||||
flags.AddRunModeFlag(cmd, true)
|
||||
|
||||
c := addGroupsCommands(cmd)
|
||||
require.NotNil(t, c)
|
||||
|
||||
// non-persistent flags not added by addCommands
|
||||
flags.AddAllProviderFlags(c)
|
||||
flags.AddAllStorageFlags(c)
|
||||
|
||||
flagsTD.WithFlags(
|
||||
cmd,
|
||||
groupsServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
"--" + flags.SkipReduceFN,
|
||||
cmd := cliTD.SetUpCmdHasFlags(
|
||||
t,
|
||||
&cobra.Command{Use: detailsCommand},
|
||||
addGroupsCommands,
|
||||
[]cliTD.UseCobraCommandFn{
|
||||
flags.AddAllProviderFlags,
|
||||
flags.AddAllStorageFlags,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags())
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
cmd.SetErr(new(bytes.Buffer)) // drop output
|
||||
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
flagsTD.WithFlags(
|
||||
groupsServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
"--" + flags.SkipReduceFN,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags()))
|
||||
|
||||
co := utils.Control()
|
||||
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
|
||||
assert.True(t, co.SkipReduce)
|
||||
|
||||
flagsTD.AssertProviderFlags(t, cmd)
|
||||
flagsTD.AssertStorageFlags(t, cmd)
|
||||
}
|
||||
@ -280,48 +223,24 @@ func (suite *GroupsUnitSuite) TestBackupDetailsFlags() {
|
||||
func (suite *GroupsUnitSuite) TestBackupDeleteFlags() {
|
||||
t := suite.T()
|
||||
|
||||
cmd := &cobra.Command{Use: deleteCommand}
|
||||
|
||||
// persistent flags not added by addCommands
|
||||
flags.AddRunModeFlag(cmd, true)
|
||||
|
||||
c := addGroupsCommands(cmd)
|
||||
require.NotNil(t, c)
|
||||
|
||||
// non-persistent flags not added by addCommands
|
||||
flags.AddAllProviderFlags(c)
|
||||
flags.AddAllStorageFlags(c)
|
||||
|
||||
flagsTD.WithFlags(
|
||||
cmd,
|
||||
groupsServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
cmd := cliTD.SetUpCmdHasFlags(
|
||||
t,
|
||||
&cobra.Command{Use: deleteCommand},
|
||||
addGroupsCommands,
|
||||
[]cliTD.UseCobraCommandFn{
|
||||
flags.AddAllProviderFlags,
|
||||
flags.AddAllStorageFlags,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags())
|
||||
|
||||
// Test arg parsing for few args
|
||||
args := []string{
|
||||
groupsServiceCommand,
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
}
|
||||
|
||||
args = append(args, flagsTD.PreparedProviderFlags()...)
|
||||
args = append(args, flagsTD.PreparedStorageFlags()...)
|
||||
|
||||
cmd.SetArgs(args)
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
cmd.SetErr(new(bytes.Buffer)) // drop output
|
||||
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
flagsTD.WithFlags(
|
||||
groupsServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags()))
|
||||
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
|
||||
flagsTD.AssertProviderFlags(t, cmd)
|
||||
flagsTD.AssertStorageFlags(t, cmd)
|
||||
}
|
||||
|
||||
@ -140,11 +140,9 @@ func prepM365Test(
|
||||
recorder = strings.Builder{}
|
||||
)
|
||||
|
||||
sc, err := st.StorageConfig()
|
||||
cfg, err := st.ToS3Config()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
cfg := sc.(*storage.S3Config)
|
||||
|
||||
force := map[string]string{
|
||||
tconfig.TestCfgAccountProvider: account.ProviderM365.String(),
|
||||
tconfig.TestCfgStorageProvider: storage.ProviderS3.String(),
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
@ -13,6 +12,7 @@ import (
|
||||
|
||||
"github.com/alcionai/corso/src/cli/flags"
|
||||
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
|
||||
cliTD "github.com/alcionai/corso/src/cli/testdata"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
utilsTD "github.com/alcionai/corso/src/cli/utils/testdata"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
@ -92,48 +92,33 @@ func (suite *OneDriveUnitSuite) TestAddOneDriveCommands() {
|
||||
func (suite *OneDriveUnitSuite) TestBackupCreateFlags() {
|
||||
t := suite.T()
|
||||
|
||||
cmd := &cobra.Command{Use: createCommand}
|
||||
|
||||
// persistent flags not added by addCommands
|
||||
flags.AddRunModeFlag(cmd, true)
|
||||
|
||||
c := addOneDriveCommands(cmd)
|
||||
require.NotNil(t, c)
|
||||
|
||||
// non-persistent flags not added by addCommands
|
||||
flags.AddAllProviderFlags(c)
|
||||
flags.AddAllStorageFlags(c)
|
||||
|
||||
flagsTD.WithFlags(
|
||||
cmd,
|
||||
oneDriveServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.UserFN, flagsTD.FlgInputs(flagsTD.UsersInput),
|
||||
"--" + flags.FailFastFN,
|
||||
"--" + flags.DisableIncrementalsFN,
|
||||
"--" + flags.ForceItemDataDownloadFN,
|
||||
cmd := cliTD.SetUpCmdHasFlags(
|
||||
t,
|
||||
&cobra.Command{Use: createCommand},
|
||||
addOneDriveCommands,
|
||||
[]cliTD.UseCobraCommandFn{
|
||||
flags.AddAllProviderFlags,
|
||||
flags.AddAllStorageFlags,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags())
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
cmd.SetErr(new(bytes.Buffer)) // drop output
|
||||
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
flagsTD.WithFlags(
|
||||
oneDriveServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.UserFN, flagsTD.FlgInputs(flagsTD.UsersInput),
|
||||
"--" + flags.FailFastFN,
|
||||
"--" + flags.DisableIncrementalsFN,
|
||||
"--" + flags.ForceItemDataDownloadFN,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags()))
|
||||
|
||||
opts := utils.MakeOneDriveOpts(cmd)
|
||||
co := utils.Control()
|
||||
|
||||
assert.ElementsMatch(t, flagsTD.UsersInput, opts.Users)
|
||||
// no assertion for category data input
|
||||
|
||||
// bool flags
|
||||
assert.Equal(t, control.FailFast, co.FailureHandling)
|
||||
assert.True(t, co.ToggleFeatures.DisableIncrementals)
|
||||
assert.True(t, co.ToggleFeatures.ForceItemDataDownload)
|
||||
|
||||
flagsTD.AssertProviderFlags(t, cmd)
|
||||
flagsTD.AssertStorageFlags(t, cmd)
|
||||
}
|
||||
@ -141,37 +126,25 @@ func (suite *OneDriveUnitSuite) TestBackupCreateFlags() {
|
||||
func (suite *OneDriveUnitSuite) TestBackupListFlags() {
|
||||
t := suite.T()
|
||||
|
||||
cmd := &cobra.Command{Use: listCommand}
|
||||
|
||||
// persistent flags not added by addCommands
|
||||
flags.AddRunModeFlag(cmd, true)
|
||||
|
||||
c := addOneDriveCommands(cmd)
|
||||
require.NotNil(t, c)
|
||||
|
||||
// non-persistent flags not added by addCommands
|
||||
flags.AddAllProviderFlags(c)
|
||||
flags.AddAllStorageFlags(c)
|
||||
|
||||
flagsTD.WithFlags(
|
||||
cmd,
|
||||
oneDriveServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
cmd := cliTD.SetUpCmdHasFlags(
|
||||
t,
|
||||
&cobra.Command{Use: listCommand},
|
||||
addOneDriveCommands,
|
||||
[]cliTD.UseCobraCommandFn{
|
||||
flags.AddAllProviderFlags,
|
||||
flags.AddAllStorageFlags,
|
||||
},
|
||||
flagsTD.PreparedBackupListFlags(),
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags())
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
cmd.SetErr(new(bytes.Buffer)) // drop output
|
||||
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
flagsTD.WithFlags(
|
||||
oneDriveServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
},
|
||||
flagsTD.PreparedBackupListFlags(),
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags()))
|
||||
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
|
||||
flagsTD.AssertBackupListFlags(t, cmd)
|
||||
flagsTD.AssertProviderFlags(t, cmd)
|
||||
flagsTD.AssertStorageFlags(t, cmd)
|
||||
@ -180,41 +153,28 @@ func (suite *OneDriveUnitSuite) TestBackupListFlags() {
|
||||
func (suite *OneDriveUnitSuite) TestBackupDetailsFlags() {
|
||||
t := suite.T()
|
||||
|
||||
cmd := &cobra.Command{Use: detailsCommand}
|
||||
|
||||
// persistent flags not added by addCommands
|
||||
flags.AddRunModeFlag(cmd, true)
|
||||
|
||||
c := addOneDriveCommands(cmd)
|
||||
require.NotNil(t, c)
|
||||
|
||||
// non-persistent flags not added by addCommands
|
||||
flags.AddAllProviderFlags(c)
|
||||
flags.AddAllStorageFlags(c)
|
||||
|
||||
flagsTD.WithFlags(
|
||||
cmd,
|
||||
oneDriveServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
"--" + flags.SkipReduceFN,
|
||||
cmd := cliTD.SetUpCmdHasFlags(
|
||||
t,
|
||||
&cobra.Command{Use: detailsCommand},
|
||||
addOneDriveCommands,
|
||||
[]cliTD.UseCobraCommandFn{
|
||||
flags.AddAllProviderFlags,
|
||||
flags.AddAllStorageFlags,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags())
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
cmd.SetErr(new(bytes.Buffer)) // drop output
|
||||
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
flagsTD.WithFlags(
|
||||
oneDriveServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
"--" + flags.SkipReduceFN,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags()))
|
||||
|
||||
co := utils.Control()
|
||||
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
|
||||
assert.True(t, co.SkipReduce)
|
||||
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
flagsTD.AssertProviderFlags(t, cmd)
|
||||
flagsTD.AssertStorageFlags(t, cmd)
|
||||
}
|
||||
@ -222,36 +182,24 @@ func (suite *OneDriveUnitSuite) TestBackupDetailsFlags() {
|
||||
func (suite *OneDriveUnitSuite) TestBackupDeleteFlags() {
|
||||
t := suite.T()
|
||||
|
||||
cmd := &cobra.Command{Use: deleteCommand}
|
||||
|
||||
// persistent flags not added by addCommands
|
||||
flags.AddRunModeFlag(cmd, true)
|
||||
|
||||
c := addOneDriveCommands(cmd)
|
||||
require.NotNil(t, c)
|
||||
|
||||
// non-persistent flags not added by addCommands
|
||||
flags.AddAllProviderFlags(c)
|
||||
flags.AddAllStorageFlags(c)
|
||||
|
||||
flagsTD.WithFlags(
|
||||
cmd,
|
||||
oneDriveServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
cmd := cliTD.SetUpCmdHasFlags(
|
||||
t,
|
||||
&cobra.Command{Use: deleteCommand},
|
||||
addOneDriveCommands,
|
||||
[]cliTD.UseCobraCommandFn{
|
||||
flags.AddAllProviderFlags,
|
||||
flags.AddAllStorageFlags,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags())
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
cmd.SetErr(new(bytes.Buffer)) // drop output
|
||||
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
flagsTD.WithFlags(
|
||||
oneDriveServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags()))
|
||||
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
|
||||
flagsTD.AssertProviderFlags(t, cmd)
|
||||
flagsTD.AssertStorageFlags(t, cmd)
|
||||
}
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
@ -14,6 +13,7 @@ import (
|
||||
|
||||
"github.com/alcionai/corso/src/cli/flags"
|
||||
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
|
||||
cliTD "github.com/alcionai/corso/src/cli/testdata"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
utilsTD "github.com/alcionai/corso/src/cli/utils/testdata"
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
@ -94,51 +94,36 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
|
||||
func (suite *SharePointUnitSuite) TestBackupCreateFlags() {
|
||||
t := suite.T()
|
||||
|
||||
cmd := &cobra.Command{Use: createCommand}
|
||||
|
||||
// persistent flags not added by addCommands
|
||||
flags.AddRunModeFlag(cmd, true)
|
||||
|
||||
c := addSharePointCommands(cmd)
|
||||
require.NotNil(t, c)
|
||||
|
||||
// non-persistent flags not added by addCommands
|
||||
flags.AddAllProviderFlags(c)
|
||||
flags.AddAllStorageFlags(c)
|
||||
|
||||
flagsTD.WithFlags(
|
||||
cmd,
|
||||
sharePointServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.SiteIDFN, flagsTD.FlgInputs(flagsTD.SiteIDInput),
|
||||
"--" + flags.SiteFN, flagsTD.FlgInputs(flagsTD.WebURLInput),
|
||||
"--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.SharepointCategoryDataInput),
|
||||
"--" + flags.FailFastFN,
|
||||
"--" + flags.DisableIncrementalsFN,
|
||||
"--" + flags.ForceItemDataDownloadFN,
|
||||
cmd := cliTD.SetUpCmdHasFlags(
|
||||
t,
|
||||
&cobra.Command{Use: createCommand},
|
||||
addSharePointCommands,
|
||||
[]cliTD.UseCobraCommandFn{
|
||||
flags.AddAllProviderFlags,
|
||||
flags.AddAllStorageFlags,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags())
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
cmd.SetErr(new(bytes.Buffer)) // drop output
|
||||
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
flagsTD.WithFlags(
|
||||
sharePointServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.SiteIDFN, flagsTD.FlgInputs(flagsTD.SiteIDInput),
|
||||
"--" + flags.SiteFN, flagsTD.FlgInputs(flagsTD.WebURLInput),
|
||||
"--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.SharepointCategoryDataInput),
|
||||
"--" + flags.FailFastFN,
|
||||
"--" + flags.DisableIncrementalsFN,
|
||||
"--" + flags.ForceItemDataDownloadFN,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags()))
|
||||
|
||||
opts := utils.MakeSharePointOpts(cmd)
|
||||
co := utils.Control()
|
||||
|
||||
assert.ElementsMatch(t, []string{strings.Join(flagsTD.SiteIDInput, ",")}, opts.SiteID)
|
||||
assert.ElementsMatch(t, flagsTD.WebURLInput, opts.WebURL)
|
||||
// no assertion for category data input
|
||||
|
||||
// bool flags
|
||||
assert.Equal(t, control.FailFast, co.FailureHandling)
|
||||
assert.True(t, co.ToggleFeatures.DisableIncrementals)
|
||||
assert.True(t, co.ToggleFeatures.ForceItemDataDownload)
|
||||
|
||||
flagsTD.AssertProviderFlags(t, cmd)
|
||||
flagsTD.AssertStorageFlags(t, cmd)
|
||||
}
|
||||
@ -146,37 +131,25 @@ func (suite *SharePointUnitSuite) TestBackupCreateFlags() {
|
||||
func (suite *SharePointUnitSuite) TestBackupListFlags() {
|
||||
t := suite.T()
|
||||
|
||||
cmd := &cobra.Command{Use: listCommand}
|
||||
|
||||
// persistent flags not added by addCommands
|
||||
flags.AddRunModeFlag(cmd, true)
|
||||
|
||||
c := addSharePointCommands(cmd)
|
||||
require.NotNil(t, c)
|
||||
|
||||
// non-persistent flags not added by addCommands
|
||||
flags.AddAllProviderFlags(c)
|
||||
flags.AddAllStorageFlags(c)
|
||||
|
||||
flagsTD.WithFlags(
|
||||
cmd,
|
||||
sharePointServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
cmd := cliTD.SetUpCmdHasFlags(
|
||||
t,
|
||||
&cobra.Command{Use: listCommand},
|
||||
addSharePointCommands,
|
||||
[]cliTD.UseCobraCommandFn{
|
||||
flags.AddAllProviderFlags,
|
||||
flags.AddAllStorageFlags,
|
||||
},
|
||||
flagsTD.PreparedBackupListFlags(),
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags())
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
cmd.SetErr(new(bytes.Buffer)) // drop output
|
||||
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
flagsTD.WithFlags(
|
||||
sharePointServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
},
|
||||
flagsTD.PreparedBackupListFlags(),
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags()))
|
||||
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
|
||||
flagsTD.AssertBackupListFlags(t, cmd)
|
||||
flagsTD.AssertProviderFlags(t, cmd)
|
||||
flagsTD.AssertStorageFlags(t, cmd)
|
||||
@ -185,41 +158,28 @@ func (suite *SharePointUnitSuite) TestBackupListFlags() {
|
||||
func (suite *SharePointUnitSuite) TestBackupDetailsFlags() {
|
||||
t := suite.T()
|
||||
|
||||
cmd := &cobra.Command{Use: detailsCommand}
|
||||
|
||||
// persistent flags not added by addCommands
|
||||
flags.AddRunModeFlag(cmd, true)
|
||||
|
||||
c := addSharePointCommands(cmd)
|
||||
require.NotNil(t, c)
|
||||
|
||||
// non-persistent flags not added by addCommands
|
||||
flags.AddAllProviderFlags(c)
|
||||
flags.AddAllStorageFlags(c)
|
||||
|
||||
flagsTD.WithFlags(
|
||||
cmd,
|
||||
sharePointServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
"--" + flags.SkipReduceFN,
|
||||
cmd := cliTD.SetUpCmdHasFlags(
|
||||
t,
|
||||
&cobra.Command{Use: detailsCommand},
|
||||
addSharePointCommands,
|
||||
[]cliTD.UseCobraCommandFn{
|
||||
flags.AddAllProviderFlags,
|
||||
flags.AddAllStorageFlags,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags())
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
cmd.SetErr(new(bytes.Buffer)) // drop output
|
||||
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
flagsTD.WithFlags(
|
||||
sharePointServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
"--" + flags.SkipReduceFN,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags()))
|
||||
|
||||
co := utils.Control()
|
||||
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
|
||||
assert.True(t, co.SkipReduce)
|
||||
|
||||
flagsTD.AssertProviderFlags(t, cmd)
|
||||
flagsTD.AssertStorageFlags(t, cmd)
|
||||
}
|
||||
@ -227,36 +187,24 @@ func (suite *SharePointUnitSuite) TestBackupDetailsFlags() {
|
||||
func (suite *SharePointUnitSuite) TestBackupDeleteFlags() {
|
||||
t := suite.T()
|
||||
|
||||
cmd := &cobra.Command{Use: deleteCommand}
|
||||
|
||||
// persistent flags not added by addCommands
|
||||
flags.AddRunModeFlag(cmd, true)
|
||||
|
||||
c := addSharePointCommands(cmd)
|
||||
require.NotNil(t, c)
|
||||
|
||||
// non-persistent flags not added by addCommands
|
||||
flags.AddAllProviderFlags(c)
|
||||
flags.AddAllStorageFlags(c)
|
||||
|
||||
flagsTD.WithFlags(
|
||||
cmd,
|
||||
sharePointServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
cmd := cliTD.SetUpCmdHasFlags(
|
||||
t,
|
||||
&cobra.Command{Use: deleteCommand},
|
||||
addSharePointCommands,
|
||||
[]cliTD.UseCobraCommandFn{
|
||||
flags.AddAllProviderFlags,
|
||||
flags.AddAllStorageFlags,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags())
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
cmd.SetErr(new(bytes.Buffer)) // drop output
|
||||
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
flagsTD.WithFlags(
|
||||
sharePointServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags()))
|
||||
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
|
||||
flagsTD.AssertProviderFlags(t, cmd)
|
||||
flagsTD.AssertStorageFlags(t, cmd)
|
||||
}
|
||||
|
||||
@ -54,7 +54,7 @@ func configureAccount(
|
||||
if matchFromConfig {
|
||||
providerType := vpr.GetString(account.AccountProviderTypeKey)
|
||||
if providerType != account.ProviderM365.String() {
|
||||
return acct, clues.New("unsupported account provider: " + providerType)
|
||||
return acct, clues.New("unsupported account provider: [" + providerType + "]")
|
||||
}
|
||||
|
||||
if err := mustMatchConfig(vpr, m365Overrides(overrides)); err != nil {
|
||||
|
||||
@ -279,8 +279,7 @@ func getStorageAndAccountWithViper(
|
||||
|
||||
// possibly read the prior config from a .corso file
|
||||
if readFromFile {
|
||||
err = vpr.ReadInConfig()
|
||||
if err != nil {
|
||||
if err := vpr.ReadInConfig(); err != nil {
|
||||
if _, ok := err.(viper.ConfigFileNotFoundError); !ok {
|
||||
return config, clues.Wrap(err, "reading corso config file: "+vpr.ConfigFileUsed())
|
||||
}
|
||||
|
||||
@ -356,10 +356,9 @@ func (suite *ConfigSuite) TestReadFromFlags() {
|
||||
|
||||
m365Config, _ := repoDetails.Account.M365Config()
|
||||
|
||||
sc, err := repoDetails.Storage.StorageConfig()
|
||||
s3Cfg, err := repoDetails.Storage.ToS3Config()
|
||||
require.NoError(t, err, "reading s3 config from storage", clues.ToCore(err))
|
||||
|
||||
s3Cfg := sc.(*storage.S3Config)
|
||||
commonConfig, _ := repoDetails.Storage.CommonConfig()
|
||||
pass := commonConfig.Corso.CorsoPassphrase
|
||||
|
||||
@ -425,17 +424,21 @@ func (suite *ConfigIntegrationSuite) TestGetStorageAndAccount() {
|
||||
err = writeRepoConfigWithViper(vpr, s3Cfg, m365, repository.Options{}, "repoid")
|
||||
require.NoError(t, err, "writing repo config", clues.ToCore(err))
|
||||
|
||||
require.Equal(
|
||||
t,
|
||||
account.ProviderM365.String(),
|
||||
vpr.GetString(account.AccountProviderTypeKey),
|
||||
"viper should have m365 as the account provider")
|
||||
|
||||
err = vpr.ReadInConfig()
|
||||
require.NoError(t, err, "reading repo config", clues.ToCore(err))
|
||||
|
||||
cfg, err := getStorageAndAccountWithViper(vpr, storage.ProviderS3, true, true, nil)
|
||||
require.NoError(t, err, "getting storage and account from config", clues.ToCore(err))
|
||||
|
||||
sc, err := cfg.Storage.StorageConfig()
|
||||
readS3Cfg, err := cfg.Storage.ToS3Config()
|
||||
require.NoError(t, err, "reading s3 config from storage", clues.ToCore(err))
|
||||
|
||||
readS3Cfg := sc.(*storage.S3Config)
|
||||
|
||||
assert.Equal(t, readS3Cfg.Bucket, s3Cfg.Bucket)
|
||||
assert.Equal(t, readS3Cfg.Endpoint, s3Cfg.Endpoint)
|
||||
assert.Equal(t, readS3Cfg.Prefix, s3Cfg.Prefix)
|
||||
@ -482,11 +485,9 @@ func (suite *ConfigIntegrationSuite) TestGetStorageAndAccount_noFileOnlyOverride
|
||||
cfg, err := getStorageAndAccountWithViper(vpr, storage.ProviderS3, false, true, overrides)
|
||||
require.NoError(t, err, "getting storage and account from config", clues.ToCore(err))
|
||||
|
||||
sc, err := cfg.Storage.StorageConfig()
|
||||
readS3Cfg, err := cfg.Storage.ToS3Config()
|
||||
require.NoError(t, err, "reading s3 config from storage", clues.ToCore(err))
|
||||
|
||||
readS3Cfg := sc.(*storage.S3Config)
|
||||
|
||||
assert.Equal(t, readS3Cfg.Bucket, bkt)
|
||||
assert.Equal(t, cfg.RepoID, "")
|
||||
assert.Equal(t, readS3Cfg.Endpoint, end)
|
||||
|
||||
@ -27,11 +27,11 @@ var exportCommands = []func(cmd *cobra.Command) *cobra.Command{
|
||||
// AddCommands attaches all `corso export * *` commands to the parent.
|
||||
func AddCommands(cmd *cobra.Command) {
|
||||
subCommand := exportCmd()
|
||||
flags.AddAllStorageFlags(subCommand)
|
||||
cmd.AddCommand(subCommand)
|
||||
|
||||
for _, addExportTo := range exportCommands {
|
||||
addExportTo(subCommand)
|
||||
sc := addExportTo(subCommand)
|
||||
flags.AddAllStorageFlags(sc)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1,17 +1,15 @@
|
||||
package export
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/cli/flags"
|
||||
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
|
||||
cliTD "github.com/alcionai/corso/src/cli/testdata"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
)
|
||||
@ -39,55 +37,41 @@ func (suite *GroupsUnitSuite) TestAddGroupsCommands() {
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
parent := &cobra.Command{Use: exportCommand}
|
||||
|
||||
cmd := &cobra.Command{Use: test.use}
|
||||
|
||||
// persistent flags not added by addCommands
|
||||
flags.AddRunModeFlag(cmd, true)
|
||||
|
||||
c := addGroupsCommands(cmd)
|
||||
require.NotNil(t, c)
|
||||
|
||||
// non-persistent flags not added by addCommands
|
||||
flags.AddAllProviderFlags(c)
|
||||
flags.AddAllStorageFlags(c)
|
||||
|
||||
cmds := cmd.Commands()
|
||||
require.Len(t, cmds, 1)
|
||||
|
||||
child := cmds[0]
|
||||
assert.Equal(t, test.expectUse, child.Use)
|
||||
assert.Equal(t, test.expectShort, child.Short)
|
||||
tester.AreSameFunc(t, test.expectRunE, child.RunE)
|
||||
|
||||
flagsTD.WithFlags(
|
||||
cmd,
|
||||
groupsServiceCommand,
|
||||
[]string{
|
||||
flagsTD.RestoreDestination,
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
|
||||
"--" + flags.FormatFN, flagsTD.FormatType,
|
||||
|
||||
// bool flags
|
||||
"--" + flags.ArchiveFN,
|
||||
cmd := cliTD.SetUpCmdHasFlags(
|
||||
t,
|
||||
parent,
|
||||
addGroupsCommands,
|
||||
[]cliTD.UseCobraCommandFn{
|
||||
flags.AddAllProviderFlags,
|
||||
flags.AddAllStorageFlags,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags())
|
||||
flagsTD.WithFlags(
|
||||
groupsServiceCommand,
|
||||
[]string{
|
||||
flagsTD.RestoreDestination,
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
"--" + flags.FormatFN, flagsTD.FormatType,
|
||||
"--" + flags.ArchiveFN,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags()))
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
cmd.SetErr(new(bytes.Buffer)) // drop output
|
||||
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
cliTD.CheckCmdChild(
|
||||
t,
|
||||
parent,
|
||||
3,
|
||||
test.expectUse,
|
||||
test.expectShort,
|
||||
test.expectRunE)
|
||||
|
||||
opts := utils.MakeGroupsOpts(cmd)
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
assert.Equal(t, flagsTD.Archive, opts.ExportCfg.Archive)
|
||||
assert.Equal(t, flagsTD.FormatType, opts.ExportCfg.Format)
|
||||
|
||||
flagsTD.AssertStorageFlags(t, cmd)
|
||||
})
|
||||
}
|
||||
|
||||
@ -1,17 +1,15 @@
|
||||
package export
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/cli/flags"
|
||||
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
|
||||
cliTD "github.com/alcionai/corso/src/cli/testdata"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
)
|
||||
@ -39,67 +37,55 @@ func (suite *OneDriveUnitSuite) TestAddOneDriveCommands() {
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
parent := &cobra.Command{Use: exportCommand}
|
||||
|
||||
cmd := &cobra.Command{Use: test.use}
|
||||
|
||||
// persistent flags not added by addCommands
|
||||
flags.AddRunModeFlag(cmd, true)
|
||||
|
||||
c := addOneDriveCommands(cmd)
|
||||
require.NotNil(t, c)
|
||||
|
||||
// non-persistent flags not added by addCommands
|
||||
flags.AddAllProviderFlags(c)
|
||||
flags.AddAllStorageFlags(c)
|
||||
|
||||
cmds := cmd.Commands()
|
||||
require.Len(t, cmds, 1)
|
||||
|
||||
child := cmds[0]
|
||||
assert.Equal(t, test.expectUse, child.Use)
|
||||
assert.Equal(t, test.expectShort, child.Short)
|
||||
tester.AreSameFunc(t, test.expectRunE, child.RunE)
|
||||
|
||||
flagsTD.WithFlags(
|
||||
cmd,
|
||||
oneDriveServiceCommand,
|
||||
[]string{
|
||||
flagsTD.RestoreDestination,
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
|
||||
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
|
||||
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
|
||||
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
|
||||
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
|
||||
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
|
||||
|
||||
"--" + flags.FormatFN, flagsTD.FormatType,
|
||||
|
||||
// bool flags
|
||||
"--" + flags.ArchiveFN,
|
||||
cmd := cliTD.SetUpCmdHasFlags(
|
||||
t,
|
||||
parent,
|
||||
addOneDriveCommands,
|
||||
[]cliTD.UseCobraCommandFn{
|
||||
flags.AddAllProviderFlags,
|
||||
flags.AddAllStorageFlags,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags())
|
||||
flagsTD.WithFlags(
|
||||
oneDriveServiceCommand,
|
||||
[]string{
|
||||
flagsTD.RestoreDestination,
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
|
||||
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
|
||||
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
|
||||
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
|
||||
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
|
||||
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
cmd.SetErr(new(bytes.Buffer)) // drop output
|
||||
"--" + flags.FormatFN, flagsTD.FormatType,
|
||||
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
// bool flags
|
||||
"--" + flags.ArchiveFN,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags()))
|
||||
|
||||
cliTD.CheckCmdChild(
|
||||
t,
|
||||
parent,
|
||||
3,
|
||||
test.expectUse,
|
||||
test.expectShort,
|
||||
test.expectRunE)
|
||||
|
||||
opts := utils.MakeOneDriveOpts(cmd)
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName)
|
||||
assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath)
|
||||
assert.Equal(t, flagsTD.FileCreatedAfterInput, opts.FileCreatedAfter)
|
||||
assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore)
|
||||
assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter)
|
||||
assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore)
|
||||
|
||||
assert.Equal(t, flagsTD.CorsoPassphrase, flags.CorsoPassphraseFV)
|
||||
|
||||
flagsTD.AssertStorageFlags(t, cmd)
|
||||
})
|
||||
}
|
||||
|
||||
@ -1,17 +1,15 @@
|
||||
package export
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/cli/flags"
|
||||
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
|
||||
cliTD "github.com/alcionai/corso/src/cli/testdata"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
)
|
||||
@ -39,63 +37,50 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
parent := &cobra.Command{Use: exportCommand}
|
||||
|
||||
cmd := &cobra.Command{Use: test.use}
|
||||
|
||||
// persistent flags not added by addCommands
|
||||
flags.AddRunModeFlag(cmd, true)
|
||||
|
||||
c := addSharePointCommands(cmd)
|
||||
require.NotNil(t, c)
|
||||
|
||||
// non-persistent flags not added by addCommands
|
||||
flags.AddAllProviderFlags(c)
|
||||
flags.AddAllStorageFlags(c)
|
||||
|
||||
cmds := cmd.Commands()
|
||||
require.Len(t, cmds, 1)
|
||||
|
||||
child := cmds[0]
|
||||
assert.Equal(t, test.expectUse, child.Use)
|
||||
assert.Equal(t, test.expectShort, child.Short)
|
||||
tester.AreSameFunc(t, test.expectRunE, child.RunE)
|
||||
|
||||
flagsTD.WithFlags(
|
||||
cmd,
|
||||
sharePointServiceCommand,
|
||||
[]string{
|
||||
flagsTD.RestoreDestination,
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
"--" + flags.LibraryFN, flagsTD.LibraryInput,
|
||||
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
|
||||
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
|
||||
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
|
||||
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
|
||||
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
|
||||
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
|
||||
"--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput),
|
||||
"--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput),
|
||||
"--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput),
|
||||
"--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput),
|
||||
|
||||
"--" + flags.FormatFN, flagsTD.FormatType,
|
||||
|
||||
// bool flags
|
||||
"--" + flags.ArchiveFN,
|
||||
cmd := cliTD.SetUpCmdHasFlags(
|
||||
t,
|
||||
parent,
|
||||
addSharePointCommands,
|
||||
[]cliTD.UseCobraCommandFn{
|
||||
flags.AddAllProviderFlags,
|
||||
flags.AddAllStorageFlags,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags())
|
||||
flagsTD.WithFlags(
|
||||
sharePointServiceCommand,
|
||||
[]string{
|
||||
flagsTD.RestoreDestination,
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
"--" + flags.LibraryFN, flagsTD.LibraryInput,
|
||||
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
|
||||
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
|
||||
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
|
||||
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
|
||||
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
|
||||
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
|
||||
"--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput),
|
||||
"--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput),
|
||||
"--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput),
|
||||
"--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput),
|
||||
"--" + flags.FormatFN, flagsTD.FormatType,
|
||||
"--" + flags.ArchiveFN,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags()))
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
cmd.SetErr(new(bytes.Buffer)) // drop output
|
||||
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
cliTD.CheckCmdChild(
|
||||
t,
|
||||
parent,
|
||||
3,
|
||||
test.expectUse,
|
||||
test.expectShort,
|
||||
test.expectRunE)
|
||||
|
||||
opts := utils.MakeSharePointOpts(cmd)
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
assert.Equal(t, flagsTD.LibraryInput, opts.Library)
|
||||
assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName)
|
||||
assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath)
|
||||
@ -103,16 +88,12 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
|
||||
assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore)
|
||||
assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter)
|
||||
assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore)
|
||||
|
||||
assert.ElementsMatch(t, flagsTD.ListItemInput, opts.ListItem)
|
||||
assert.ElementsMatch(t, flagsTD.ListFolderInput, opts.ListFolder)
|
||||
|
||||
assert.ElementsMatch(t, flagsTD.PageInput, opts.Page)
|
||||
assert.ElementsMatch(t, flagsTD.PageFolderInput, opts.PageFolder)
|
||||
|
||||
assert.Equal(t, flagsTD.Archive, opts.ExportCfg.Archive)
|
||||
assert.Equal(t, flagsTD.FormatType, opts.ExportCfg.Format)
|
||||
|
||||
flagsTD.AssertStorageFlags(t, cmd)
|
||||
})
|
||||
}
|
||||
|
||||
17
src/cli/flags/testdata/flags.go
vendored
17
src/cli/flags/testdata/flags.go
vendored
@ -86,7 +86,7 @@ var (
|
||||
DisableConcurrencyLimiter = true
|
||||
)
|
||||
|
||||
func WithFlags(
|
||||
func WithFlags2(
|
||||
cc *cobra.Command,
|
||||
command string,
|
||||
flagSets ...[]string,
|
||||
@ -99,3 +99,18 @@ func WithFlags(
|
||||
|
||||
cc.SetArgs(args)
|
||||
}
|
||||
|
||||
func WithFlags(
|
||||
command string,
|
||||
flagSets ...[]string,
|
||||
) func(*cobra.Command) {
|
||||
return func(cc *cobra.Command) {
|
||||
args := []string{command}
|
||||
|
||||
for _, sl := range flagSets {
|
||||
args = append(args, sl...)
|
||||
}
|
||||
|
||||
cc.SetArgs(args)
|
||||
}
|
||||
}
|
||||
|
||||
@ -96,13 +96,11 @@ func initFilesystemCmd(cmd *cobra.Command, args []string) error {
|
||||
cfg.Account.ID(),
|
||||
opt)
|
||||
|
||||
sc, err := cfg.Storage.StorageConfig()
|
||||
storageCfg, err := cfg.Storage.ToFilesystemConfig()
|
||||
if err != nil {
|
||||
return Only(ctx, clues.Wrap(err, "Retrieving filesystem configuration"))
|
||||
}
|
||||
|
||||
storageCfg := sc.(*storage.FilesystemConfig)
|
||||
|
||||
m365, err := cfg.Account.M365Config()
|
||||
if err != nil {
|
||||
return Only(ctx, clues.Wrap(err, "Failed to parse m365 account config"))
|
||||
@ -123,14 +121,20 @@ func initFilesystemCmd(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
return Only(ctx, clues.Wrap(err, "Failed to initialize a new filesystem repository"))
|
||||
return Only(ctx, clues.Stack(ErrInitializingRepo, err))
|
||||
}
|
||||
|
||||
defer utils.CloseRepo(ctx, r)
|
||||
|
||||
Infof(ctx, "Initialized a repository at path %s", storageCfg.Path)
|
||||
|
||||
if err = config.WriteRepoConfig(ctx, sc, m365, opt.Repo, r.GetID()); err != nil {
|
||||
err = config.WriteRepoConfig(
|
||||
ctx,
|
||||
storageCfg,
|
||||
m365,
|
||||
opt.Repo,
|
||||
r.GetID())
|
||||
if err != nil {
|
||||
return Only(ctx, clues.Wrap(err, "Failed to write repository configuration"))
|
||||
}
|
||||
|
||||
@ -181,13 +185,11 @@ func connectFilesystemCmd(cmd *cobra.Command, args []string) error {
|
||||
repoID = events.RepoIDNotFound
|
||||
}
|
||||
|
||||
sc, err := cfg.Storage.StorageConfig()
|
||||
storageCfg, err := cfg.Storage.ToFilesystemConfig()
|
||||
if err != nil {
|
||||
return Only(ctx, clues.Wrap(err, "Retrieving filesystem configuration"))
|
||||
}
|
||||
|
||||
storageCfg := sc.(*storage.FilesystemConfig)
|
||||
|
||||
m365, err := cfg.Account.M365Config()
|
||||
if err != nil {
|
||||
return Only(ctx, clues.Wrap(err, "Failed to parse m365 account config"))
|
||||
@ -206,14 +208,20 @@ func connectFilesystemCmd(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
if err := r.Connect(ctx); err != nil {
|
||||
return Only(ctx, clues.Wrap(err, "Failed to connect to the filesystem repository"))
|
||||
return Only(ctx, clues.Stack(ErrConnectingRepo, err))
|
||||
}
|
||||
|
||||
defer utils.CloseRepo(ctx, r)
|
||||
|
||||
Infof(ctx, "Connected to repository at path %s", storageCfg.Path)
|
||||
|
||||
if err = config.WriteRepoConfig(ctx, sc, m365, opts.Repo, r.GetID()); err != nil {
|
||||
err = config.WriteRepoConfig(
|
||||
ctx,
|
||||
storageCfg,
|
||||
m365,
|
||||
opts.Repo,
|
||||
r.GetID())
|
||||
if err != nil {
|
||||
return Only(ctx, clues.Wrap(err, "Failed to write repository configuration"))
|
||||
}
|
||||
|
||||
|
||||
@ -56,9 +56,8 @@ func (suite *FilesystemE2ESuite) TestInitFilesystemCmd() {
|
||||
|
||||
st := storeTD.NewFilesystemStorage(t)
|
||||
|
||||
sc, err := st.StorageConfig()
|
||||
cfg, err := st.ToFilesystemConfig()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
cfg := sc.(*storage.FilesystemConfig)
|
||||
|
||||
force := map[string]string{
|
||||
tconfig.TestCfgStorageProvider: storage.ProviderFilesystem.String(),
|
||||
@ -113,9 +112,8 @@ func (suite *FilesystemE2ESuite) TestConnectFilesystemCmd() {
|
||||
defer flush()
|
||||
|
||||
st := storeTD.NewFilesystemStorage(t)
|
||||
sc, err := st.StorageConfig()
|
||||
cfg, err := st.ToFilesystemConfig()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
cfg := sc.(*storage.FilesystemConfig)
|
||||
|
||||
force := map[string]string{
|
||||
tconfig.TestCfgAccountProvider: account.ProviderM365.String(),
|
||||
|
||||
@ -21,6 +21,11 @@ const (
|
||||
maintenanceCommand = "maintenance"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrConnectingRepo = clues.New("connecting repository")
|
||||
ErrInitializingRepo = clues.New("initializing repository")
|
||||
)
|
||||
|
||||
var repoCommands = []func(cmd *cobra.Command) *cobra.Command{
|
||||
addS3Commands,
|
||||
addFilesystemCommands,
|
||||
|
||||
@ -116,13 +116,11 @@ func initS3Cmd(cmd *cobra.Command, args []string) error {
|
||||
cfg.Account.ID(),
|
||||
opt)
|
||||
|
||||
sc, err := cfg.Storage.StorageConfig()
|
||||
s3Cfg, err := cfg.Storage.ToS3Config()
|
||||
if err != nil {
|
||||
return Only(ctx, clues.Wrap(err, "Retrieving s3 configuration"))
|
||||
}
|
||||
|
||||
s3Cfg := sc.(*storage.S3Config)
|
||||
|
||||
if strings.HasPrefix(s3Cfg.Endpoint, "http://") || strings.HasPrefix(s3Cfg.Endpoint, "https://") {
|
||||
invalidEndpointErr := "endpoint doesn't support specifying protocol. " +
|
||||
"pass --disable-tls flag to use http:// instead of default https://"
|
||||
@ -150,7 +148,7 @@ func initS3Cmd(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
return Only(ctx, clues.Wrap(err, "Failed to initialize a new S3 repository"))
|
||||
return Only(ctx, clues.Stack(ErrInitializingRepo, err))
|
||||
}
|
||||
|
||||
defer utils.CloseRepo(ctx, r)
|
||||
@ -199,13 +197,11 @@ func connectS3Cmd(cmd *cobra.Command, args []string) error {
|
||||
repoID = events.RepoIDNotFound
|
||||
}
|
||||
|
||||
sc, err := cfg.Storage.StorageConfig()
|
||||
s3Cfg, err := cfg.Storage.ToS3Config()
|
||||
if err != nil {
|
||||
return Only(ctx, clues.Wrap(err, "Retrieving s3 configuration"))
|
||||
}
|
||||
|
||||
s3Cfg := sc.(*storage.S3Config)
|
||||
|
||||
m365, err := cfg.Account.M365Config()
|
||||
if err != nil {
|
||||
return Only(ctx, clues.Wrap(err, "Failed to parse m365 account config"))
|
||||
@ -231,7 +227,7 @@ func connectS3Cmd(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
if err := r.Connect(ctx); err != nil {
|
||||
return Only(ctx, clues.Wrap(err, "Failed to connect to the S3 repository"))
|
||||
return Only(ctx, clues.Stack(ErrConnectingRepo, err))
|
||||
}
|
||||
|
||||
defer utils.CloseRepo(ctx, r)
|
||||
|
||||
@ -8,10 +8,12 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/alcionai/corso/src/cli"
|
||||
"github.com/alcionai/corso/src/cli/config"
|
||||
cliTD "github.com/alcionai/corso/src/cli/testdata"
|
||||
"github.com/alcionai/corso/src/internal/common/str"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
@ -64,9 +66,8 @@ func (suite *S3E2ESuite) TestInitS3Cmd() {
|
||||
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
|
||||
sc, err := st.StorageConfig()
|
||||
cfg, err := st.ToS3Config()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
cfg := sc.(*storage.S3Config)
|
||||
|
||||
vpr, configFP := tconfig.MakeTempTestConfigClone(t, nil)
|
||||
if !test.hasConfigFile {
|
||||
@ -102,10 +103,9 @@ func (suite *S3E2ESuite) TestInitMultipleTimes() {
|
||||
defer flush()
|
||||
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
sc, err := st.StorageConfig()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
cfg := sc.(*storage.S3Config)
|
||||
cfg, err := st.ToS3Config()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
vpr, configFP := tconfig.MakeTempTestConfigClone(t, nil)
|
||||
|
||||
@ -134,11 +134,9 @@ func (suite *S3E2ESuite) TestInitS3Cmd_missingBucket() {
|
||||
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
|
||||
sc, err := st.StorageConfig()
|
||||
cfg, err := st.ToS3Config()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
cfg := sc.(*storage.S3Config)
|
||||
|
||||
force := map[string]string{
|
||||
tconfig.TestCfgBucket: "",
|
||||
}
|
||||
@ -189,9 +187,9 @@ func (suite *S3E2ESuite) TestConnectS3Cmd() {
|
||||
defer flush()
|
||||
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
sc, err := st.StorageConfig()
|
||||
|
||||
cfg, err := st.ToS3Config()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
cfg := sc.(*storage.S3Config)
|
||||
|
||||
force := map[string]string{
|
||||
tconfig.TestCfgAccountProvider: account.ProviderM365.String(),
|
||||
@ -234,60 +232,65 @@ func (suite *S3E2ESuite) TestConnectS3Cmd() {
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *S3E2ESuite) TestConnectS3Cmd_BadBucket() {
|
||||
t := suite.T()
|
||||
ctx, flush := tester.NewContext(t)
|
||||
func (suite *S3E2ESuite) TestConnectS3Cmd_badInputs() {
|
||||
table := []struct {
|
||||
name string
|
||||
bucket string
|
||||
prefix string
|
||||
expectErr func(t *testing.T, err error)
|
||||
}{
|
||||
{
|
||||
name: "bucket",
|
||||
bucket: "wrong",
|
||||
expectErr: func(t *testing.T, err error) {
|
||||
assert.ErrorIs(t, err, storage.ErrVerifyingConfigStorage, clues.ToCore(err))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "prefix",
|
||||
prefix: "wrong",
|
||||
expectErr: func(t *testing.T, err error) {
|
||||
assert.ErrorIs(t, err, storage.ErrVerifyingConfigStorage, clues.ToCore(err))
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
defer flush()
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
sc, err := st.StorageConfig()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
cfg, err := st.ToS3Config()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
cfg := sc.(*storage.S3Config)
|
||||
bucket := str.First(test.bucket, cfg.Bucket)
|
||||
prefix := str.First(test.prefix, cfg.Prefix)
|
||||
|
||||
vpr, configFP := tconfig.MakeTempTestConfigClone(t, nil)
|
||||
over := map[string]string{}
|
||||
acct := tconfig.NewM365Account(t)
|
||||
|
||||
ctx = config.SetViper(ctx, vpr)
|
||||
maps.Copy(over, acct.Config)
|
||||
over[account.AccountProviderTypeKey] = account.ProviderM365.String()
|
||||
over[storage.StorageProviderTypeKey] = storage.ProviderS3.String()
|
||||
|
||||
cmd := cliTD.StubRootCmd(
|
||||
"repo", "connect", "s3",
|
||||
"--config-file", configFP,
|
||||
"--bucket", "wrong",
|
||||
"--prefix", cfg.Prefix)
|
||||
cli.BuildCommandTree(cmd)
|
||||
vpr, configFP := tconfig.MakeTempTestConfigClone(t, over)
|
||||
ctx = config.SetViper(ctx, vpr)
|
||||
|
||||
// run the command
|
||||
err = cmd.ExecuteContext(ctx)
|
||||
require.Error(t, err, clues.ToCore(err))
|
||||
}
|
||||
cmd := cliTD.StubRootCmd(
|
||||
"repo", "connect", "s3",
|
||||
"--config-file", configFP,
|
||||
"--bucket", bucket,
|
||||
"--prefix", prefix)
|
||||
cli.BuildCommandTree(cmd)
|
||||
|
||||
func (suite *S3E2ESuite) TestConnectS3Cmd_BadPrefix() {
|
||||
t := suite.T()
|
||||
ctx, flush := tester.NewContext(t)
|
||||
|
||||
defer flush()
|
||||
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
sc, err := st.StorageConfig()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
cfg := sc.(*storage.S3Config)
|
||||
|
||||
vpr, configFP := tconfig.MakeTempTestConfigClone(t, nil)
|
||||
|
||||
ctx = config.SetViper(ctx, vpr)
|
||||
|
||||
cmd := cliTD.StubRootCmd(
|
||||
"repo", "connect", "s3",
|
||||
"--config-file", configFP,
|
||||
"--bucket", cfg.Bucket,
|
||||
"--prefix", "wrong")
|
||||
cli.BuildCommandTree(cmd)
|
||||
|
||||
// run the command
|
||||
err = cmd.ExecuteContext(ctx)
|
||||
require.Error(t, err, clues.ToCore(err))
|
||||
// run the command
|
||||
err = cmd.ExecuteContext(ctx)
|
||||
require.Error(t, err, clues.ToCore(err))
|
||||
test.expectErr(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *S3E2ESuite) TestUpdateS3Cmd() {
|
||||
|
||||
@ -66,11 +66,9 @@ func (suite *RestoreExchangeE2ESuite) SetupSuite() {
|
||||
suite.acct = tconfig.NewM365Account(t)
|
||||
suite.st = storeTD.NewPrefixedS3Storage(t)
|
||||
|
||||
sc, err := suite.st.StorageConfig()
|
||||
cfg, err := suite.st.ToS3Config()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
cfg := sc.(*storage.S3Config)
|
||||
|
||||
force := map[string]string{
|
||||
tconfig.TestCfgAccountProvider: account.ProviderM365.String(),
|
||||
tconfig.TestCfgStorageProvider: storage.ProviderS3.String(),
|
||||
|
||||
@ -1,17 +1,15 @@
|
||||
package restore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/cli/flags"
|
||||
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
|
||||
cliTD "github.com/alcionai/corso/src/cli/testdata"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
)
|
||||
@ -39,80 +37,64 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() {
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
parent := &cobra.Command{Use: restoreCommand}
|
||||
|
||||
cmd := &cobra.Command{Use: test.use}
|
||||
|
||||
// persistent flags not added by addCommands
|
||||
flags.AddRunModeFlag(cmd, true)
|
||||
|
||||
c := addExchangeCommands(cmd)
|
||||
require.NotNil(t, c)
|
||||
|
||||
// non-persistent flags not added by addCommands
|
||||
flags.AddAllProviderFlags(c)
|
||||
flags.AddAllStorageFlags(c)
|
||||
|
||||
cmds := cmd.Commands()
|
||||
require.Len(t, cmds, 1)
|
||||
|
||||
child := cmds[0]
|
||||
assert.Equal(t, test.expectUse, child.Use)
|
||||
assert.Equal(t, test.expectShort, child.Short)
|
||||
tester.AreSameFunc(t, test.expectRunE, child.RunE)
|
||||
|
||||
flagsTD.WithFlags(
|
||||
cmd,
|
||||
exchangeServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
|
||||
"--" + flags.ContactFN, flagsTD.FlgInputs(flagsTD.ContactInput),
|
||||
"--" + flags.ContactFolderFN, flagsTD.FlgInputs(flagsTD.ContactFldInput),
|
||||
"--" + flags.ContactNameFN, flagsTD.ContactNameInput,
|
||||
|
||||
"--" + flags.EmailFN, flagsTD.FlgInputs(flagsTD.EmailInput),
|
||||
"--" + flags.EmailFolderFN, flagsTD.FlgInputs(flagsTD.EmailFldInput),
|
||||
"--" + flags.EmailReceivedAfterFN, flagsTD.EmailReceivedAfterInput,
|
||||
"--" + flags.EmailReceivedBeforeFN, flagsTD.EmailReceivedBeforeInput,
|
||||
"--" + flags.EmailSenderFN, flagsTD.EmailSenderInput,
|
||||
"--" + flags.EmailSubjectFN, flagsTD.EmailSubjectInput,
|
||||
|
||||
"--" + flags.EventFN, flagsTD.FlgInputs(flagsTD.EventInput),
|
||||
"--" + flags.EventCalendarFN, flagsTD.FlgInputs(flagsTD.EventCalInput),
|
||||
"--" + flags.EventOrganizerFN, flagsTD.EventOrganizerInput,
|
||||
"--" + flags.EventRecursFN, flagsTD.EventRecursInput,
|
||||
"--" + flags.EventStartsAfterFN, flagsTD.EventStartsAfterInput,
|
||||
"--" + flags.EventStartsBeforeFN, flagsTD.EventStartsBeforeInput,
|
||||
"--" + flags.EventSubjectFN, flagsTD.EventSubjectInput,
|
||||
|
||||
"--" + flags.CollisionsFN, flagsTD.Collisions,
|
||||
"--" + flags.DestinationFN, flagsTD.Destination,
|
||||
"--" + flags.ToResourceFN, flagsTD.ToResource,
|
||||
cmd := cliTD.SetUpCmdHasFlags(
|
||||
t,
|
||||
parent,
|
||||
addExchangeCommands,
|
||||
[]cliTD.UseCobraCommandFn{
|
||||
flags.AddAllProviderFlags,
|
||||
flags.AddAllStorageFlags,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags())
|
||||
flagsTD.WithFlags(
|
||||
exchangeServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
"--" + flags.ContactFN, flagsTD.FlgInputs(flagsTD.ContactInput),
|
||||
"--" + flags.ContactFolderFN, flagsTD.FlgInputs(flagsTD.ContactFldInput),
|
||||
"--" + flags.ContactNameFN, flagsTD.ContactNameInput,
|
||||
"--" + flags.EmailFN, flagsTD.FlgInputs(flagsTD.EmailInput),
|
||||
"--" + flags.EmailFolderFN, flagsTD.FlgInputs(flagsTD.EmailFldInput),
|
||||
"--" + flags.EmailReceivedAfterFN, flagsTD.EmailReceivedAfterInput,
|
||||
"--" + flags.EmailReceivedBeforeFN, flagsTD.EmailReceivedBeforeInput,
|
||||
"--" + flags.EmailSenderFN, flagsTD.EmailSenderInput,
|
||||
"--" + flags.EmailSubjectFN, flagsTD.EmailSubjectInput,
|
||||
"--" + flags.EventFN, flagsTD.FlgInputs(flagsTD.EventInput),
|
||||
"--" + flags.EventCalendarFN, flagsTD.FlgInputs(flagsTD.EventCalInput),
|
||||
"--" + flags.EventOrganizerFN, flagsTD.EventOrganizerInput,
|
||||
"--" + flags.EventRecursFN, flagsTD.EventRecursInput,
|
||||
"--" + flags.EventStartsAfterFN, flagsTD.EventStartsAfterInput,
|
||||
"--" + flags.EventStartsBeforeFN, flagsTD.EventStartsBeforeInput,
|
||||
"--" + flags.EventSubjectFN, flagsTD.EventSubjectInput,
|
||||
"--" + flags.CollisionsFN, flagsTD.Collisions,
|
||||
"--" + flags.DestinationFN, flagsTD.Destination,
|
||||
"--" + flags.ToResourceFN, flagsTD.ToResource,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags()))
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
cmd.SetErr(new(bytes.Buffer)) // drop output
|
||||
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
cliTD.CheckCmdChild(
|
||||
t,
|
||||
parent,
|
||||
3,
|
||||
test.expectUse,
|
||||
test.expectShort,
|
||||
test.expectRunE)
|
||||
|
||||
opts := utils.MakeExchangeOpts(cmd)
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
assert.ElementsMatch(t, flagsTD.ContactInput, opts.Contact)
|
||||
assert.ElementsMatch(t, flagsTD.ContactFldInput, opts.ContactFolder)
|
||||
assert.Equal(t, flagsTD.ContactNameInput, opts.ContactName)
|
||||
|
||||
assert.ElementsMatch(t, flagsTD.EmailInput, opts.Email)
|
||||
assert.ElementsMatch(t, flagsTD.EmailFldInput, opts.EmailFolder)
|
||||
assert.Equal(t, flagsTD.EmailReceivedAfterInput, opts.EmailReceivedAfter)
|
||||
assert.Equal(t, flagsTD.EmailReceivedBeforeInput, opts.EmailReceivedBefore)
|
||||
assert.Equal(t, flagsTD.EmailSenderInput, opts.EmailSender)
|
||||
assert.Equal(t, flagsTD.EmailSubjectInput, opts.EmailSubject)
|
||||
|
||||
assert.ElementsMatch(t, flagsTD.EventInput, opts.Event)
|
||||
assert.ElementsMatch(t, flagsTD.EventCalInput, opts.EventCalendar)
|
||||
assert.Equal(t, flagsTD.EventOrganizerInput, opts.EventOrganizer)
|
||||
@ -120,11 +102,9 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() {
|
||||
assert.Equal(t, flagsTD.EventStartsAfterInput, opts.EventStartsAfter)
|
||||
assert.Equal(t, flagsTD.EventStartsBeforeInput, opts.EventStartsBefore)
|
||||
assert.Equal(t, flagsTD.EventSubjectInput, opts.EventSubject)
|
||||
|
||||
assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions)
|
||||
assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination)
|
||||
assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource)
|
||||
|
||||
flagsTD.AssertProviderFlags(t, cmd)
|
||||
flagsTD.AssertStorageFlags(t, cmd)
|
||||
})
|
||||
|
||||
@ -1,17 +1,15 @@
|
||||
package restore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/cli/flags"
|
||||
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
|
||||
cliTD "github.com/alcionai/corso/src/cli/testdata"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
)
|
||||
@ -39,65 +37,51 @@ func (suite *GroupsUnitSuite) TestAddGroupsCommands() {
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
parent := &cobra.Command{Use: restoreCommand}
|
||||
|
||||
cmd := &cobra.Command{Use: test.use}
|
||||
|
||||
// persistent flags not added by addCommands
|
||||
flags.AddRunModeFlag(cmd, true)
|
||||
|
||||
c := addGroupsCommands(cmd)
|
||||
require.NotNil(t, c)
|
||||
|
||||
// non-persistent flags not added by addCommands
|
||||
flags.AddAllProviderFlags(c)
|
||||
flags.AddAllStorageFlags(c)
|
||||
|
||||
cmds := cmd.Commands()
|
||||
require.Len(t, cmds, 1)
|
||||
|
||||
child := cmds[0]
|
||||
assert.Equal(t, test.expectUse, child.Use)
|
||||
assert.Equal(t, test.expectShort, child.Short)
|
||||
tester.AreSameFunc(t, test.expectRunE, child.RunE)
|
||||
|
||||
flagsTD.WithFlags(
|
||||
cmd,
|
||||
groupsServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
|
||||
"--" + flags.LibraryFN, flagsTD.LibraryInput,
|
||||
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
|
||||
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
|
||||
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
|
||||
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
|
||||
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
|
||||
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
|
||||
"--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput),
|
||||
"--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput),
|
||||
"--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput),
|
||||
"--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput),
|
||||
|
||||
"--" + flags.CollisionsFN, flagsTD.Collisions,
|
||||
"--" + flags.DestinationFN, flagsTD.Destination,
|
||||
"--" + flags.ToResourceFN, flagsTD.ToResource,
|
||||
|
||||
// bool flags
|
||||
"--" + flags.NoPermissionsFN,
|
||||
cmd := cliTD.SetUpCmdHasFlags(
|
||||
t,
|
||||
parent,
|
||||
addGroupsCommands,
|
||||
[]cliTD.UseCobraCommandFn{
|
||||
flags.AddAllProviderFlags,
|
||||
flags.AddAllStorageFlags,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags())
|
||||
flagsTD.WithFlags(
|
||||
groupsServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
"--" + flags.LibraryFN, flagsTD.LibraryInput,
|
||||
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
|
||||
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
|
||||
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
|
||||
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
|
||||
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
|
||||
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
|
||||
"--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput),
|
||||
"--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput),
|
||||
"--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput),
|
||||
"--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput),
|
||||
"--" + flags.CollisionsFN, flagsTD.Collisions,
|
||||
"--" + flags.DestinationFN, flagsTD.Destination,
|
||||
"--" + flags.ToResourceFN, flagsTD.ToResource,
|
||||
"--" + flags.NoPermissionsFN,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags()))
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
cmd.SetErr(new(bytes.Buffer)) // drop output
|
||||
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
cliTD.CheckCmdChild(
|
||||
t,
|
||||
parent,
|
||||
3,
|
||||
test.expectUse,
|
||||
test.expectShort,
|
||||
test.expectRunE)
|
||||
|
||||
opts := utils.MakeGroupsOpts(cmd)
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
assert.Equal(t, flagsTD.LibraryInput, opts.Library)
|
||||
assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName)
|
||||
assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath)
|
||||
@ -105,14 +89,10 @@ func (suite *GroupsUnitSuite) TestAddGroupsCommands() {
|
||||
assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore)
|
||||
assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter)
|
||||
assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore)
|
||||
|
||||
assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions)
|
||||
assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination)
|
||||
assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource)
|
||||
|
||||
// bool flags
|
||||
assert.True(t, flags.NoPermissionsFV)
|
||||
|
||||
flagsTD.AssertProviderFlags(t, cmd)
|
||||
flagsTD.AssertStorageFlags(t, cmd)
|
||||
})
|
||||
|
||||
@ -1,17 +1,15 @@
|
||||
package restore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/cli/flags"
|
||||
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
|
||||
cliTD "github.com/alcionai/corso/src/cli/testdata"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
)
|
||||
@ -39,73 +37,56 @@ func (suite *OneDriveUnitSuite) TestAddOneDriveCommands() {
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
parent := &cobra.Command{Use: restoreCommand}
|
||||
|
||||
cmd := &cobra.Command{Use: test.use}
|
||||
|
||||
// persistent flags not added by addCommands
|
||||
flags.AddRunModeFlag(cmd, true)
|
||||
|
||||
c := addOneDriveCommands(cmd)
|
||||
require.NotNil(t, c)
|
||||
|
||||
// non-persistent flags not added by addCommands
|
||||
flags.AddAllProviderFlags(c)
|
||||
flags.AddAllStorageFlags(c)
|
||||
|
||||
cmds := cmd.Commands()
|
||||
require.Len(t, cmds, 1)
|
||||
|
||||
child := cmds[0]
|
||||
assert.Equal(t, test.expectUse, child.Use)
|
||||
assert.Equal(t, test.expectShort, child.Short)
|
||||
tester.AreSameFunc(t, test.expectRunE, child.RunE)
|
||||
|
||||
flagsTD.WithFlags(
|
||||
cmd,
|
||||
oneDriveServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
|
||||
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
|
||||
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
|
||||
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
|
||||
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
|
||||
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
|
||||
|
||||
"--" + flags.CollisionsFN, flagsTD.Collisions,
|
||||
"--" + flags.DestinationFN, flagsTD.Destination,
|
||||
"--" + flags.ToResourceFN, flagsTD.ToResource,
|
||||
|
||||
// bool flags
|
||||
"--" + flags.NoPermissionsFN,
|
||||
cmd := cliTD.SetUpCmdHasFlags(
|
||||
t,
|
||||
parent,
|
||||
addOneDriveCommands,
|
||||
[]cliTD.UseCobraCommandFn{
|
||||
flags.AddAllProviderFlags,
|
||||
flags.AddAllStorageFlags,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags())
|
||||
flagsTD.WithFlags(
|
||||
oneDriveServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
|
||||
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
|
||||
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
|
||||
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
|
||||
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
|
||||
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
|
||||
"--" + flags.CollisionsFN, flagsTD.Collisions,
|
||||
"--" + flags.DestinationFN, flagsTD.Destination,
|
||||
"--" + flags.ToResourceFN, flagsTD.ToResource,
|
||||
"--" + flags.NoPermissionsFN,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags()))
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
cmd.SetErr(new(bytes.Buffer)) // drop output
|
||||
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
cliTD.CheckCmdChild(
|
||||
t,
|
||||
parent,
|
||||
3,
|
||||
test.expectUse,
|
||||
test.expectShort,
|
||||
test.expectRunE)
|
||||
|
||||
opts := utils.MakeOneDriveOpts(cmd)
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName)
|
||||
assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath)
|
||||
assert.Equal(t, flagsTD.FileCreatedAfterInput, opts.FileCreatedAfter)
|
||||
assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore)
|
||||
assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter)
|
||||
assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore)
|
||||
|
||||
assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions)
|
||||
assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination)
|
||||
assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource)
|
||||
|
||||
// bool flags
|
||||
assert.True(t, flags.NoPermissionsFV)
|
||||
|
||||
flagsTD.AssertProviderFlags(t, cmd)
|
||||
flagsTD.AssertStorageFlags(t, cmd)
|
||||
})
|
||||
|
||||
@ -25,12 +25,12 @@ var restoreCommands = []func(cmd *cobra.Command) *cobra.Command{
|
||||
// AddCommands attaches all `corso restore * *` commands to the parent.
|
||||
func AddCommands(cmd *cobra.Command) {
|
||||
subCommand := restoreCmd()
|
||||
flags.AddAllProviderFlags(subCommand)
|
||||
flags.AddAllStorageFlags(subCommand)
|
||||
cmd.AddCommand(subCommand)
|
||||
|
||||
for _, addRestoreTo := range restoreCommands {
|
||||
addRestoreTo(subCommand)
|
||||
sc := addRestoreTo(subCommand)
|
||||
flags.AddAllProviderFlags(sc)
|
||||
flags.AddAllStorageFlags(sc)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1,17 +1,15 @@
|
||||
package restore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/cli/flags"
|
||||
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
|
||||
cliTD "github.com/alcionai/corso/src/cli/testdata"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
)
|
||||
@ -39,64 +37,51 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
parent := &cobra.Command{Use: restoreCommand}
|
||||
|
||||
cmd := &cobra.Command{Use: test.use}
|
||||
|
||||
// persistent flags not added by addCommands
|
||||
flags.AddRunModeFlag(cmd, true)
|
||||
|
||||
c := addSharePointCommands(cmd)
|
||||
require.NotNil(t, c)
|
||||
|
||||
// non-persistent flags not added by addCommands
|
||||
flags.AddAllProviderFlags(c)
|
||||
flags.AddAllStorageFlags(c)
|
||||
|
||||
cmds := cmd.Commands()
|
||||
require.Len(t, cmds, 1)
|
||||
|
||||
child := cmds[0]
|
||||
assert.Equal(t, test.expectUse, child.Use)
|
||||
assert.Equal(t, test.expectShort, child.Short)
|
||||
tester.AreSameFunc(t, test.expectRunE, child.RunE)
|
||||
|
||||
flagsTD.WithFlags(
|
||||
cmd,
|
||||
sharePointServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
"--" + flags.LibraryFN, flagsTD.LibraryInput,
|
||||
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
|
||||
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
|
||||
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
|
||||
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
|
||||
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
|
||||
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
|
||||
"--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput),
|
||||
"--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput),
|
||||
"--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput),
|
||||
"--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput),
|
||||
|
||||
"--" + flags.CollisionsFN, flagsTD.Collisions,
|
||||
"--" + flags.DestinationFN, flagsTD.Destination,
|
||||
"--" + flags.ToResourceFN, flagsTD.ToResource,
|
||||
|
||||
// bool flags
|
||||
"--" + flags.NoPermissionsFN,
|
||||
cmd := cliTD.SetUpCmdHasFlags(
|
||||
t,
|
||||
parent,
|
||||
addSharePointCommands,
|
||||
[]cliTD.UseCobraCommandFn{
|
||||
flags.AddAllProviderFlags,
|
||||
flags.AddAllStorageFlags,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags())
|
||||
flagsTD.WithFlags(
|
||||
sharePointServiceCommand,
|
||||
[]string{
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, flagsTD.BackupInput,
|
||||
"--" + flags.LibraryFN, flagsTD.LibraryInput,
|
||||
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
|
||||
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
|
||||
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
|
||||
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
|
||||
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
|
||||
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
|
||||
"--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput),
|
||||
"--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput),
|
||||
"--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput),
|
||||
"--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput),
|
||||
"--" + flags.CollisionsFN, flagsTD.Collisions,
|
||||
"--" + flags.DestinationFN, flagsTD.Destination,
|
||||
"--" + flags.ToResourceFN, flagsTD.ToResource,
|
||||
"--" + flags.NoPermissionsFN,
|
||||
},
|
||||
flagsTD.PreparedProviderFlags(),
|
||||
flagsTD.PreparedStorageFlags()))
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
cmd.SetErr(new(bytes.Buffer)) // drop output
|
||||
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
cliTD.CheckCmdChild(
|
||||
t,
|
||||
parent,
|
||||
3,
|
||||
test.expectUse,
|
||||
test.expectShort,
|
||||
test.expectRunE)
|
||||
|
||||
opts := utils.MakeSharePointOpts(cmd)
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
|
||||
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
|
||||
assert.Equal(t, flagsTD.LibraryInput, opts.Library)
|
||||
assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName)
|
||||
assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath)
|
||||
@ -104,20 +89,14 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
|
||||
assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore)
|
||||
assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter)
|
||||
assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore)
|
||||
|
||||
assert.ElementsMatch(t, flagsTD.ListItemInput, opts.ListItem)
|
||||
assert.ElementsMatch(t, flagsTD.ListFolderInput, opts.ListFolder)
|
||||
|
||||
assert.ElementsMatch(t, flagsTD.PageInput, opts.Page)
|
||||
assert.ElementsMatch(t, flagsTD.PageFolderInput, opts.PageFolder)
|
||||
|
||||
assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions)
|
||||
assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination)
|
||||
assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource)
|
||||
|
||||
// bool flags
|
||||
assert.True(t, flags.NoPermissionsFV)
|
||||
|
||||
flagsTD.AssertProviderFlags(t, cmd)
|
||||
flagsTD.AssertStorageFlags(t, cmd)
|
||||
})
|
||||
|
||||
88
src/cli/testdata/cli.go
vendored
88
src/cli/testdata/cli.go
vendored
@ -1,11 +1,20 @@
|
||||
package testdata
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/google/uuid"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/alcionai/corso/src/cli/flags"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
)
|
||||
|
||||
// StubRootCmd builds a stub cobra command to be used as
|
||||
@ -27,3 +36,82 @@ func StubRootCmd(args ...string) *cobra.Command {
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
type UseCobraCommandFn func(*cobra.Command)
|
||||
|
||||
func SetUpCmdHasFlags(
|
||||
t *testing.T,
|
||||
parentCmd *cobra.Command,
|
||||
addChildCommand func(*cobra.Command) *cobra.Command,
|
||||
addFlags []UseCobraCommandFn,
|
||||
setArgs UseCobraCommandFn,
|
||||
) *cobra.Command {
|
||||
parentCmd.PersistentPreRun = func(c *cobra.Command, args []string) {
|
||||
t.Log("testing args:")
|
||||
|
||||
for _, arg := range args {
|
||||
t.Log(arg)
|
||||
}
|
||||
}
|
||||
|
||||
// persistent flags not added by addCommands
|
||||
flags.AddRunModeFlag(parentCmd, true)
|
||||
|
||||
cmd := addChildCommand(parentCmd)
|
||||
require.NotNil(t, cmd)
|
||||
|
||||
cul := cmd.UseLine()
|
||||
require.Truef(
|
||||
t,
|
||||
strings.HasPrefix(cul, parentCmd.Use+" "+cmd.Use),
|
||||
"child command has expected usage format 'parent child', got %q",
|
||||
cul)
|
||||
|
||||
for _, af := range addFlags {
|
||||
af(cmd)
|
||||
}
|
||||
|
||||
setArgs(parentCmd)
|
||||
|
||||
parentCmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
parentCmd.SetErr(new(bytes.Buffer)) // drop output
|
||||
|
||||
err := parentCmd.Execute()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
type CobraRunEFn func(cmd *cobra.Command, args []string) error
|
||||
|
||||
func CheckCmdChild(
|
||||
t *testing.T,
|
||||
cmd *cobra.Command,
|
||||
expectChildCount int,
|
||||
expectUse string,
|
||||
expectShort string,
|
||||
expectRunE CobraRunEFn,
|
||||
) {
|
||||
var (
|
||||
cmds = cmd.Commands()
|
||||
child *cobra.Command
|
||||
)
|
||||
|
||||
for _, cc := range cmds {
|
||||
if cc.Use == expectUse {
|
||||
child = cc
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
require.Len(
|
||||
t,
|
||||
cmds,
|
||||
expectChildCount,
|
||||
"parent command should have the correct child command count")
|
||||
|
||||
require.NotNil(t, child, "should have found expected child command")
|
||||
|
||||
assert.Equal(t, expectShort, child.Short)
|
||||
tester.AreSameFunc(t, expectRunE, child.RunE)
|
||||
}
|
||||
|
||||
@ -229,7 +229,7 @@ elseif (![string]::IsNullOrEmpty($Site)) {
|
||||
}
|
||||
}
|
||||
else {
|
||||
Write-Host "User (for OneDrvie) or Site (for Sharpeoint) is required"
|
||||
Write-Host "User (for OneDrive) or Site (for Sharepoint) is required"
|
||||
Exit
|
||||
}
|
||||
|
||||
|
||||
@ -197,13 +197,11 @@ func handleCheckerCommand(cmd *cobra.Command, args []string, f flags) error {
|
||||
return clues.Wrap(err, "getting storage config")
|
||||
}
|
||||
|
||||
sc, err := repoDetails.Storage.StorageConfig()
|
||||
cfg, err := repoDetails.Storage.ToS3Config()
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "getting S3 config")
|
||||
}
|
||||
|
||||
cfg := sc.(*storage.S3Config)
|
||||
|
||||
endpoint := defaultS3Endpoint
|
||||
if len(cfg.Endpoint) > 0 {
|
||||
endpoint = cfg.Endpoint
|
||||
|
||||
@ -1,6 +1,68 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/credentials"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
type PermissionInfo struct {
|
||||
EntityID string
|
||||
Roles []string
|
||||
}
|
||||
|
||||
const (
|
||||
sanityBaseBackup = "SANITY_BASE_BACKUP"
|
||||
sanityTestData = "SANITY_TEST_DATA"
|
||||
sanityTestFolder = "SANITY_TEST_FOLDER"
|
||||
sanityTestService = "SANITY_TEST_SERVICE"
|
||||
)
|
||||
|
||||
type Envs struct {
|
||||
BaseBackupFolder string
|
||||
DataFolder string
|
||||
FolderName string
|
||||
Service string
|
||||
SiteID string
|
||||
StartTime time.Time
|
||||
UserID string
|
||||
}
|
||||
|
||||
func EnvVars(ctx context.Context) Envs {
|
||||
folder := strings.TrimSpace(os.Getenv(sanityTestFolder))
|
||||
startTime, _ := MustGetTimeFromName(ctx, folder)
|
||||
|
||||
e := Envs{
|
||||
BaseBackupFolder: os.Getenv(sanityBaseBackup),
|
||||
DataFolder: os.Getenv(sanityTestData),
|
||||
FolderName: folder,
|
||||
SiteID: tconfig.GetM365SiteID(ctx),
|
||||
Service: os.Getenv(sanityTestService),
|
||||
StartTime: startTime,
|
||||
UserID: tconfig.GetM365UserID(ctx),
|
||||
}
|
||||
|
||||
fmt.Printf("\n-----\nenvs %+v\n-----\n", e)
|
||||
|
||||
logger.Ctx(ctx).Info("envs", e)
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
func GetAC() (api.Client, error) {
|
||||
creds := account.M365Config{
|
||||
M365: credentials.GetM365(),
|
||||
AzureTenantID: os.Getenv(account.AzureTenantID),
|
||||
}
|
||||
|
||||
return api.NewClient(creds, control.DefaultOptions())
|
||||
}
|
||||
|
||||
38
src/cmd/sanity_test/common/filepath.go
Normal file
38
src/cmd/sanity_test/common/filepath.go
Normal file
@ -0,0 +1,38 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
)
|
||||
|
||||
func FilepathWalker(
|
||||
folderName string,
|
||||
exportFileSizes map[string]int64,
|
||||
startTime time.Time,
|
||||
) filepath.WalkFunc {
|
||||
return func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return clues.Stack(err)
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
relPath, err := filepath.Rel(folderName, path)
|
||||
if err != nil {
|
||||
return clues.Stack(err)
|
||||
}
|
||||
|
||||
exportFileSizes[relPath] = info.Size()
|
||||
|
||||
if startTime.After(info.ModTime()) {
|
||||
startTime = info.ModTime()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
69
src/cmd/sanity_test/common/sanitree.go
Normal file
69
src/cmd/sanity_test/common/sanitree.go
Normal file
@ -0,0 +1,69 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
// Sanitree is used to build out a hierarchical tree of items
|
||||
// for comparison against each other. Primarily so that a restore
|
||||
// can compare two subtrees easily.
|
||||
type Sanitree[T any] struct {
|
||||
Container T
|
||||
ContainerID string
|
||||
ContainerName string
|
||||
// non-containers only
|
||||
ContainsItems int
|
||||
// name -> node
|
||||
Children map[string]*Sanitree[T]
|
||||
}
|
||||
|
||||
func AssertEqualTrees[T any](
|
||||
ctx context.Context,
|
||||
expect, other *Sanitree[T],
|
||||
) {
|
||||
if expect == nil && other == nil {
|
||||
return
|
||||
}
|
||||
|
||||
Assert(
|
||||
ctx,
|
||||
func() bool { return expect != nil && other != nil },
|
||||
"non nil nodes",
|
||||
expect,
|
||||
other)
|
||||
|
||||
Assert(
|
||||
ctx,
|
||||
func() bool { return expect.ContainerName == other.ContainerName },
|
||||
"container names match",
|
||||
expect.ContainerName,
|
||||
other.ContainerName)
|
||||
|
||||
Assert(
|
||||
ctx,
|
||||
func() bool { return expect.ContainsItems == other.ContainsItems },
|
||||
"count of items in container matches",
|
||||
expect.ContainsItems,
|
||||
other.ContainsItems)
|
||||
|
||||
Assert(
|
||||
ctx,
|
||||
func() bool { return len(expect.Children) == len(other.Children) },
|
||||
"count of child containers matches",
|
||||
len(expect.Children),
|
||||
len(other.Children))
|
||||
|
||||
for name, s := range expect.Children {
|
||||
ch, ok := other.Children[name]
|
||||
Assert(
|
||||
ctx,
|
||||
func() bool { return ok },
|
||||
"found matching child container",
|
||||
name,
|
||||
maps.Keys(other.Children))
|
||||
|
||||
AssertEqualTrees(ctx, s, ch)
|
||||
}
|
||||
}
|
||||
@ -22,7 +22,7 @@ func Assert(
|
||||
return
|
||||
}
|
||||
|
||||
header = "Error: " + header
|
||||
header = "TEST FAILURE: " + header
|
||||
expected := fmt.Sprintf("* Expected: %+v", expect)
|
||||
got := fmt.Sprintf("* Current: %+v", current)
|
||||
|
||||
@ -37,7 +37,7 @@ func Assert(
|
||||
|
||||
func Fatal(ctx context.Context, msg string, err error) {
|
||||
logger.CtxErr(ctx, err).Error("test failure: " + msg)
|
||||
fmt.Println(msg+": ", err)
|
||||
fmt.Println("TEST FAILURE: "+msg+": ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
||||
16
src/cmd/sanity_test/export/groups.go
Normal file
16
src/cmd/sanity_test/export/groups.go
Normal file
@ -0,0 +1,16 @@
|
||||
package export
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/alcionai/corso/src/cmd/sanity_test/common"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
func CheckGroupsExport(
|
||||
ctx context.Context,
|
||||
ac api.Client,
|
||||
envs common.Envs,
|
||||
) {
|
||||
// TODO
|
||||
}
|
||||
@ -3,28 +3,21 @@ package export
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
|
||||
|
||||
"github.com/alcionai/corso/src/cmd/sanity_test/common"
|
||||
"github.com/alcionai/corso/src/cmd/sanity_test/restore"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
func CheckOneDriveExport(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
userID, folderName, dataFolder string,
|
||||
ac api.Client,
|
||||
envs common.Envs,
|
||||
) {
|
||||
drive, err := client.
|
||||
Users().
|
||||
ByUserId(userID).
|
||||
Drive().
|
||||
Get(ctx, nil)
|
||||
drive, err := ac.Users().GetDefaultDrive(ctx, envs.UserID)
|
||||
if err != nil {
|
||||
common.Fatal(ctx, "getting the drive:", err)
|
||||
}
|
||||
@ -36,37 +29,19 @@ func CheckOneDriveExport(
|
||||
startTime = time.Now()
|
||||
)
|
||||
|
||||
err = filepath.Walk(folderName, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return clues.Stack(err)
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
relPath, err := filepath.Rel(folderName, path)
|
||||
if err != nil {
|
||||
return clues.Stack(err)
|
||||
}
|
||||
|
||||
exportFileSizes[relPath] = info.Size()
|
||||
if startTime.After(info.ModTime()) {
|
||||
startTime = info.ModTime()
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
err = filepath.Walk(
|
||||
envs.FolderName,
|
||||
common.FilepathWalker(envs.FolderName, exportFileSizes, startTime))
|
||||
if err != nil {
|
||||
fmt.Println("Error walking the path:", err)
|
||||
}
|
||||
|
||||
_ = restore.PopulateDriveDetails(
|
||||
ctx,
|
||||
client,
|
||||
ac,
|
||||
ptr.Val(drive.GetId()),
|
||||
folderName,
|
||||
dataFolder,
|
||||
envs.FolderName,
|
||||
envs.DataFolder,
|
||||
fileSizes,
|
||||
map[string][]common.PermissionInfo{},
|
||||
startTime)
|
||||
|
||||
@ -3,28 +3,21 @@ package export
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
|
||||
|
||||
"github.com/alcionai/corso/src/cmd/sanity_test/common"
|
||||
"github.com/alcionai/corso/src/cmd/sanity_test/restore"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
func CheckSharePointExport(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
siteID, folderName, dataFolder string,
|
||||
ac api.Client,
|
||||
envs common.Envs,
|
||||
) {
|
||||
drive, err := client.
|
||||
Sites().
|
||||
BySiteId(siteID).
|
||||
Drive().
|
||||
Get(ctx, nil)
|
||||
drive, err := ac.Sites().GetDefaultDrive(ctx, envs.SiteID)
|
||||
if err != nil {
|
||||
common.Fatal(ctx, "getting the drive:", err)
|
||||
}
|
||||
@ -36,37 +29,19 @@ func CheckSharePointExport(
|
||||
startTime = time.Now()
|
||||
)
|
||||
|
||||
err = filepath.Walk(folderName, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return clues.Stack(err)
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
relPath, err := filepath.Rel(folderName, path)
|
||||
if err != nil {
|
||||
return clues.Stack(err)
|
||||
}
|
||||
|
||||
exportFileSizes[relPath] = info.Size()
|
||||
if startTime.After(info.ModTime()) {
|
||||
startTime = info.ModTime()
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
err = filepath.Walk(
|
||||
envs.FolderName,
|
||||
common.FilepathWalker(envs.FolderName, exportFileSizes, startTime))
|
||||
if err != nil {
|
||||
fmt.Println("Error walking the path:", err)
|
||||
}
|
||||
|
||||
_ = restore.PopulateDriveDetails(
|
||||
ctx,
|
||||
client,
|
||||
ac,
|
||||
ptr.Val(drive.GetId()),
|
||||
folderName,
|
||||
dataFolder,
|
||||
envs.FolderName,
|
||||
envs.DataFolder,
|
||||
fileSizes,
|
||||
map[string][]common.PermissionInfo{},
|
||||
startTime)
|
||||
|
||||
@ -3,99 +3,43 @@ package restore
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/users"
|
||||
|
||||
"github.com/alcionai/corso/src/cmd/sanity_test/common"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/pkg/filters"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
// CheckEmailRestoration verifies that the emails count in restored folder is equivalent to
|
||||
// emails in actual m365 account
|
||||
func CheckEmailRestoration(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
testUser, folderName, dataFolder, baseBackupFolder string,
|
||||
startTime time.Time,
|
||||
ac api.Client,
|
||||
envs common.Envs,
|
||||
) {
|
||||
var (
|
||||
restoreFolder models.MailFolderable
|
||||
itemCount = make(map[string]int32)
|
||||
restoreItemCount = make(map[string]int32)
|
||||
builder = client.Users().ByUserId(testUser).MailFolders()
|
||||
folderNameToItemCount = make(map[string]int32)
|
||||
folderNameToRestoreItemCount = make(map[string]int32)
|
||||
)
|
||||
|
||||
for {
|
||||
result, err := builder.Get(ctx, nil)
|
||||
if err != nil {
|
||||
common.Fatal(ctx, "getting mail folders", err)
|
||||
}
|
||||
restoredTree := buildSanitree(ctx, ac, envs.UserID, envs.FolderName)
|
||||
dataTree := buildSanitree(ctx, ac, envs.UserID, envs.DataFolder)
|
||||
|
||||
values := result.GetValue()
|
||||
|
||||
for _, v := range values {
|
||||
itemName := ptr.Val(v.GetDisplayName())
|
||||
|
||||
if itemName == folderName {
|
||||
restoreFolder = v
|
||||
continue
|
||||
}
|
||||
|
||||
if itemName == dataFolder || itemName == baseBackupFolder {
|
||||
// otherwise, recursively aggregate all child folders.
|
||||
getAllMailSubFolders(ctx, client, testUser, v, itemName, dataFolder, itemCount)
|
||||
|
||||
itemCount[itemName] = ptr.Val(v.GetTotalItemCount())
|
||||
}
|
||||
}
|
||||
|
||||
link, ok := ptr.ValOK(result.GetOdataNextLink())
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
builder = users.NewItemMailFoldersRequestBuilder(link, client.GetAdapter())
|
||||
}
|
||||
|
||||
folderID := ptr.Val(restoreFolder.GetId())
|
||||
folderName = ptr.Val(restoreFolder.GetDisplayName())
|
||||
ctx = clues.Add(
|
||||
ctx,
|
||||
"restore_folder_id", folderID,
|
||||
"restore_folder_name", folderName)
|
||||
"restore_folder_id", restoredTree.ContainerID,
|
||||
"restore_folder_name", restoredTree.ContainerName,
|
||||
"original_folder_id", dataTree.ContainerID,
|
||||
"original_folder_name", dataTree.ContainerName)
|
||||
|
||||
childFolder, err := client.
|
||||
Users().
|
||||
ByUserId(testUser).
|
||||
MailFolders().
|
||||
ByMailFolderId(folderID).
|
||||
ChildFolders().
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
common.Fatal(ctx, "getting restore folder child folders", err)
|
||||
}
|
||||
verifyEmailData(ctx, folderNameToRestoreItemCount, folderNameToItemCount)
|
||||
|
||||
for _, fld := range childFolder.GetValue() {
|
||||
restoreDisplayName := ptr.Val(fld.GetDisplayName())
|
||||
|
||||
// check if folder is the data folder we loaded or the base backup to verify
|
||||
// the incremental backup worked fine
|
||||
if strings.EqualFold(restoreDisplayName, dataFolder) || strings.EqualFold(restoreDisplayName, baseBackupFolder) {
|
||||
count, _ := ptr.ValOK(fld.GetTotalItemCount())
|
||||
|
||||
restoreItemCount[restoreDisplayName] = count
|
||||
checkAllSubFolder(ctx, client, fld, testUser, restoreDisplayName, dataFolder, restoreItemCount)
|
||||
}
|
||||
}
|
||||
|
||||
verifyEmailData(ctx, restoreItemCount, itemCount)
|
||||
common.AssertEqualTrees[models.MailFolderable](
|
||||
ctx,
|
||||
dataTree,
|
||||
restoredTree.Children[envs.DataFolder])
|
||||
}
|
||||
|
||||
func verifyEmailData(ctx context.Context, restoreMessageCount, messageCount map[string]int32) {
|
||||
@ -111,109 +55,71 @@ func verifyEmailData(ctx context.Context, restoreMessageCount, messageCount map[
|
||||
}
|
||||
}
|
||||
|
||||
// getAllSubFolder will recursively check for all subfolders and get the corresponding
|
||||
// email count.
|
||||
func getAllMailSubFolders(
|
||||
func buildSanitree(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
testUser string,
|
||||
r models.MailFolderable,
|
||||
parentFolder,
|
||||
dataFolder string,
|
||||
messageCount map[string]int32,
|
||||
) {
|
||||
var (
|
||||
folderID = ptr.Val(r.GetId())
|
||||
count int32 = 99
|
||||
options = &users.ItemMailFoldersItemChildFoldersRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: &users.ItemMailFoldersItemChildFoldersRequestBuilderGetQueryParameters{
|
||||
Top: &count,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
ctx = clues.Add(ctx, "parent_folder_id", folderID)
|
||||
|
||||
childFolder, err := client.
|
||||
Users().
|
||||
ByUserId(testUser).
|
||||
MailFolders().
|
||||
ByMailFolderId(folderID).
|
||||
ChildFolders().
|
||||
Get(ctx, options)
|
||||
ac api.Client,
|
||||
userID, folderName string,
|
||||
) *common.Sanitree[models.MailFolderable] {
|
||||
gcc, err := ac.Mail().GetContainerByName(
|
||||
ctx,
|
||||
userID,
|
||||
api.MsgFolderRoot,
|
||||
folderName)
|
||||
if err != nil {
|
||||
common.Fatal(ctx, "getting mail subfolders", err)
|
||||
common.Fatal(
|
||||
ctx,
|
||||
fmt.Sprintf("finding folder by name %q", folderName),
|
||||
err)
|
||||
}
|
||||
|
||||
for _, child := range childFolder.GetValue() {
|
||||
var (
|
||||
childDisplayName = ptr.Val(child.GetDisplayName())
|
||||
childFolderCount = ptr.Val(child.GetChildFolderCount())
|
||||
//nolint:forbidigo
|
||||
fullFolderName = stdpath.Join(parentFolder, childDisplayName)
|
||||
)
|
||||
mmf, ok := gcc.(models.MailFolderable)
|
||||
if !ok {
|
||||
common.Fatal(
|
||||
ctx,
|
||||
"mail folderable required",
|
||||
clues.New("casting "+*gcc.GetDisplayName()+" to models.MailFolderable"))
|
||||
}
|
||||
|
||||
if filters.PathContains([]string{dataFolder}).Compare(fullFolderName) {
|
||||
messageCount[fullFolderName] = ptr.Val(child.GetTotalItemCount())
|
||||
// recursively check for subfolders
|
||||
if childFolderCount > 0 {
|
||||
parentFolder := fullFolderName
|
||||
root := &common.Sanitree[models.MailFolderable]{
|
||||
Container: mmf,
|
||||
ContainerID: ptr.Val(mmf.GetId()),
|
||||
ContainerName: ptr.Val(mmf.GetDisplayName()),
|
||||
ContainsItems: int(ptr.Val(mmf.GetTotalItemCount())),
|
||||
Children: map[string]*common.Sanitree[models.MailFolderable]{},
|
||||
}
|
||||
|
||||
getAllMailSubFolders(ctx, client, testUser, child, parentFolder, dataFolder, messageCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checkAllSubFolder will recursively traverse inside the restore folder and
|
||||
// verify that data matched in all subfolders
|
||||
func checkAllSubFolder(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
r models.MailFolderable,
|
||||
testUser,
|
||||
parentFolder,
|
||||
dataFolder string,
|
||||
restoreMessageCount map[string]int32,
|
||||
) {
|
||||
var (
|
||||
folderID = ptr.Val(r.GetId())
|
||||
count int32 = 99
|
||||
options = &users.ItemMailFoldersItemChildFoldersRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: &users.ItemMailFoldersItemChildFoldersRequestBuilderGetQueryParameters{
|
||||
Top: &count,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
childFolder, err := client.
|
||||
Users().
|
||||
ByUserId(testUser).
|
||||
MailFolders().
|
||||
ByMailFolderId(folderID).
|
||||
ChildFolders().
|
||||
Get(ctx, options)
|
||||
if err != nil {
|
||||
common.Fatal(ctx, "getting mail subfolders", err)
|
||||
}
|
||||
|
||||
for _, child := range childFolder.GetValue() {
|
||||
var (
|
||||
childDisplayName = ptr.Val(child.GetDisplayName())
|
||||
//nolint:forbidigo
|
||||
fullFolderName = stdpath.Join(parentFolder, childDisplayName)
|
||||
)
|
||||
|
||||
if filters.PathContains([]string{dataFolder}).Compare(fullFolderName) {
|
||||
childTotalCount, _ := ptr.ValOK(child.GetTotalItemCount())
|
||||
restoreMessageCount[fullFolderName] = childTotalCount
|
||||
}
|
||||
|
||||
childFolderCount := ptr.Val(child.GetChildFolderCount())
|
||||
|
||||
if childFolderCount > 0 {
|
||||
parentFolder := fullFolderName
|
||||
checkAllSubFolder(ctx, client, child, testUser, parentFolder, dataFolder, restoreMessageCount)
|
||||
recurseSubfolders(ctx, ac, root, userID)
|
||||
|
||||
return root
|
||||
}
|
||||
|
||||
func recurseSubfolders(
|
||||
ctx context.Context,
|
||||
ac api.Client,
|
||||
parent *common.Sanitree[models.MailFolderable],
|
||||
userID string,
|
||||
) {
|
||||
childFolders, err := ac.Mail().GetContainerChildren(
|
||||
ctx,
|
||||
userID,
|
||||
parent.ContainerID)
|
||||
if err != nil {
|
||||
common.Fatal(ctx, "getting subfolders", err)
|
||||
}
|
||||
|
||||
for _, child := range childFolders {
|
||||
c := &common.Sanitree[models.MailFolderable]{
|
||||
Container: child,
|
||||
ContainerID: ptr.Val(child.GetId()),
|
||||
ContainerName: ptr.Val(child.GetDisplayName()),
|
||||
ContainsItems: int(ptr.Val(child.GetTotalItemCount())),
|
||||
Children: map[string]*common.Sanitree[models.MailFolderable]{},
|
||||
}
|
||||
|
||||
parent.Children[c.ContainerName] = c
|
||||
|
||||
if ptr.Val(child.GetChildFolderCount()) > 0 {
|
||||
recurseSubfolders(ctx, ac, c, userID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
16
src/cmd/sanity_test/restore/groups.go
Normal file
16
src/cmd/sanity_test/restore/groups.go
Normal file
@ -0,0 +1,16 @@
|
||||
package restore
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/alcionai/corso/src/cmd/sanity_test/common"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
func CheckGroupsRestoration(
|
||||
ctx context.Context,
|
||||
ac api.Client,
|
||||
envs common.Envs,
|
||||
) {
|
||||
// TODO
|
||||
}
|
||||
@ -7,12 +7,12 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/alcionai/corso/src/cmd/sanity_test/common"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -21,34 +21,29 @@ const (
|
||||
|
||||
func CheckOneDriveRestoration(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
userID, folderName, dataFolder string,
|
||||
startTime time.Time,
|
||||
ac api.Client,
|
||||
envs common.Envs,
|
||||
) {
|
||||
drive, err := client.
|
||||
Users().
|
||||
ByUserId(userID).
|
||||
Drive().
|
||||
Get(ctx, nil)
|
||||
drive, err := ac.Users().GetDefaultDrive(ctx, envs.UserID)
|
||||
if err != nil {
|
||||
common.Fatal(ctx, "getting the drive:", err)
|
||||
}
|
||||
|
||||
checkDriveRestoration(
|
||||
ctx,
|
||||
client,
|
||||
ac,
|
||||
path.OneDriveService,
|
||||
folderName,
|
||||
envs.FolderName,
|
||||
ptr.Val(drive.GetId()),
|
||||
ptr.Val(drive.GetName()),
|
||||
dataFolder,
|
||||
startTime,
|
||||
envs.DataFolder,
|
||||
envs.StartTime,
|
||||
false)
|
||||
}
|
||||
|
||||
func checkDriveRestoration(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
ac api.Client,
|
||||
service path.ServiceType,
|
||||
folderName,
|
||||
driveID,
|
||||
@ -70,7 +65,7 @@ func checkDriveRestoration(
|
||||
|
||||
restoreFolderID := PopulateDriveDetails(
|
||||
ctx,
|
||||
client,
|
||||
ac,
|
||||
driveID,
|
||||
folderName,
|
||||
dataFolder,
|
||||
@ -78,7 +73,14 @@ func checkDriveRestoration(
|
||||
folderPermissions,
|
||||
startTime)
|
||||
|
||||
getRestoredDrive(ctx, client, driveID, restoreFolderID, restoreFile, restoredFolderPermissions, startTime)
|
||||
getRestoredDrive(
|
||||
ctx,
|
||||
ac,
|
||||
driveID,
|
||||
restoreFolderID,
|
||||
restoreFile,
|
||||
restoredFolderPermissions,
|
||||
startTime)
|
||||
|
||||
checkRestoredDriveItemPermissions(
|
||||
ctx,
|
||||
@ -105,7 +107,7 @@ func checkDriveRestoration(
|
||||
|
||||
func PopulateDriveDetails(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
ac api.Client,
|
||||
driveID, folderName, dataFolder string,
|
||||
fileSizes map[string]int64,
|
||||
folderPermissions map[string][]common.PermissionInfo,
|
||||
@ -113,18 +115,12 @@ func PopulateDriveDetails(
|
||||
) string {
|
||||
var restoreFolderID string
|
||||
|
||||
response, err := client.
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().
|
||||
ByDriveItemId("root").
|
||||
Children().
|
||||
Get(ctx, nil)
|
||||
children, err := ac.Drives().GetFolderChildren(ctx, driveID, "root")
|
||||
if err != nil {
|
||||
common.Fatal(ctx, "getting drive by id", err)
|
||||
}
|
||||
|
||||
for _, driveItem := range response.GetValue() {
|
||||
for _, driveItem := range children {
|
||||
var (
|
||||
itemID = ptr.Val(driveItem.GetId())
|
||||
itemName = ptr.Val(driveItem.GetName())
|
||||
@ -156,8 +152,17 @@ func PopulateDriveDetails(
|
||||
continue
|
||||
}
|
||||
|
||||
folderPermissions[itemName] = permissionIn(ctx, client, driveID, itemID)
|
||||
getOneDriveChildFolder(ctx, client, driveID, itemID, itemName, fileSizes, folderPermissions, startTime)
|
||||
folderPermissions[itemName] = permissionIn(ctx, ac, driveID, itemID)
|
||||
|
||||
getOneDriveChildFolder(
|
||||
ctx,
|
||||
ac,
|
||||
driveID,
|
||||
itemID,
|
||||
itemName,
|
||||
fileSizes,
|
||||
folderPermissions,
|
||||
startTime)
|
||||
}
|
||||
|
||||
return restoreFolderID
|
||||
@ -228,18 +233,18 @@ func checkRestoredDriveItemPermissions(
|
||||
|
||||
func getOneDriveChildFolder(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
ac api.Client,
|
||||
driveID, itemID, parentName string,
|
||||
fileSizes map[string]int64,
|
||||
folderPermission map[string][]common.PermissionInfo,
|
||||
startTime time.Time,
|
||||
) {
|
||||
response, err := client.Drives().ByDriveId(driveID).Items().ByDriveItemId(itemID).Children().Get(ctx, nil)
|
||||
children, err := ac.Drives().GetFolderChildren(ctx, driveID, itemID)
|
||||
if err != nil {
|
||||
common.Fatal(ctx, "getting child folder", err)
|
||||
}
|
||||
|
||||
for _, driveItem := range response.GetValue() {
|
||||
for _, driveItem := range children {
|
||||
var (
|
||||
itemID = ptr.Val(driveItem.GetId())
|
||||
itemName = ptr.Val(driveItem.GetName())
|
||||
@ -268,31 +273,33 @@ func getOneDriveChildFolder(
|
||||
continue
|
||||
}
|
||||
|
||||
folderPermission[fullName] = permissionIn(ctx, client, driveID, itemID)
|
||||
getOneDriveChildFolder(ctx, client, driveID, itemID, fullName, fileSizes, folderPermission, startTime)
|
||||
folderPermission[fullName] = permissionIn(ctx, ac, driveID, itemID)
|
||||
getOneDriveChildFolder(
|
||||
ctx,
|
||||
ac,
|
||||
driveID,
|
||||
itemID,
|
||||
fullName,
|
||||
fileSizes,
|
||||
folderPermission,
|
||||
startTime)
|
||||
}
|
||||
}
|
||||
|
||||
func getRestoredDrive(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
ac api.Client,
|
||||
driveID, restoreFolderID string,
|
||||
restoreFile map[string]int64,
|
||||
restoreFolder map[string][]common.PermissionInfo,
|
||||
startTime time.Time,
|
||||
) {
|
||||
restored, err := client.
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().
|
||||
ByDriveItemId(restoreFolderID).
|
||||
Children().
|
||||
Get(ctx, nil)
|
||||
children, err := ac.Drives().GetFolderChildren(ctx, driveID, restoreFolderID)
|
||||
if err != nil {
|
||||
common.Fatal(ctx, "getting child folder", err)
|
||||
}
|
||||
|
||||
for _, item := range restored.GetValue() {
|
||||
for _, item := range children {
|
||||
var (
|
||||
itemID = ptr.Val(item.GetId())
|
||||
itemName = ptr.Val(item.GetName())
|
||||
@ -308,8 +315,16 @@ func getRestoredDrive(
|
||||
continue
|
||||
}
|
||||
|
||||
restoreFolder[itemName] = permissionIn(ctx, client, driveID, itemID)
|
||||
getOneDriveChildFolder(ctx, client, driveID, itemID, itemName, restoreFile, restoreFolder, startTime)
|
||||
restoreFolder[itemName] = permissionIn(ctx, ac, driveID, itemID)
|
||||
getOneDriveChildFolder(
|
||||
ctx,
|
||||
ac,
|
||||
driveID,
|
||||
itemID,
|
||||
itemName,
|
||||
restoreFile,
|
||||
restoreFolder,
|
||||
startTime)
|
||||
}
|
||||
}
|
||||
|
||||
@ -319,18 +334,12 @@ func getRestoredDrive(
|
||||
|
||||
func permissionIn(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
ac api.Client,
|
||||
driveID, itemID string,
|
||||
) []common.PermissionInfo {
|
||||
pi := []common.PermissionInfo{}
|
||||
|
||||
pcr, err := client.
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().
|
||||
ByDriveItemId(itemID).
|
||||
Permissions().
|
||||
Get(ctx, nil)
|
||||
pcr, err := ac.Drives().GetItemPermission(ctx, driveID, itemID)
|
||||
if err != nil {
|
||||
common.Fatal(ctx, "getting permission", err)
|
||||
}
|
||||
|
||||
@ -2,38 +2,31 @@ package restore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
|
||||
|
||||
"github.com/alcionai/corso/src/cmd/sanity_test/common"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
func CheckSharePointRestoration(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
siteID, userID, folderName, dataFolder string,
|
||||
startTime time.Time,
|
||||
ac api.Client,
|
||||
envs common.Envs,
|
||||
) {
|
||||
drive, err := client.
|
||||
Sites().
|
||||
BySiteId(siteID).
|
||||
Drive().
|
||||
Get(ctx, nil)
|
||||
drive, err := ac.Sites().GetDefaultDrive(ctx, envs.SiteID)
|
||||
if err != nil {
|
||||
common.Fatal(ctx, "getting the drive:", err)
|
||||
}
|
||||
|
||||
checkDriveRestoration(
|
||||
ctx,
|
||||
client,
|
||||
ac,
|
||||
path.SharePointService,
|
||||
folderName,
|
||||
envs.FolderName,
|
||||
ptr.Val(drive.GetId()),
|
||||
ptr.Val(drive.GetName()),
|
||||
dataFolder,
|
||||
startTime,
|
||||
envs.DataFolder,
|
||||
envs.StartTime,
|
||||
true)
|
||||
}
|
||||
|
||||
@ -2,21 +2,40 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/alcionai/corso/src/cli/print"
|
||||
"github.com/alcionai/corso/src/cmd/sanity_test/common"
|
||||
"github.com/alcionai/corso/src/cmd/sanity_test/export"
|
||||
"github.com/alcionai/corso/src/cmd/sanity_test/restore"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// root command
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func rootCMD() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "sanity-test",
|
||||
Short: "run the sanity tests",
|
||||
DisableAutoGenTag: true,
|
||||
RunE: sanityTestRoot,
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Println("running", cmd.UseLine())
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func sanityTestRoot(cmd *cobra.Command, args []string) error {
|
||||
return print.Only(cmd.Context(), clues.New("must specify a kind of test"))
|
||||
}
|
||||
|
||||
func main() {
|
||||
ls := logger.Settings{
|
||||
File: logger.GetLogFile(""),
|
||||
@ -29,60 +48,226 @@ func main() {
|
||||
_ = log.Sync() // flush all logs in the buffer
|
||||
}()
|
||||
|
||||
// TODO: only needed for exchange
|
||||
graph.InitializeConcurrencyLimiter(ctx, true, 4)
|
||||
|
||||
adapter, err := graph.CreateAdapter(
|
||||
tconfig.GetM365TenantID(ctx),
|
||||
os.Getenv("AZURE_CLIENT_ID"),
|
||||
os.Getenv("AZURE_CLIENT_SECRET"))
|
||||
if err != nil {
|
||||
common.Fatal(ctx, "creating adapter", err)
|
||||
}
|
||||
root := rootCMD()
|
||||
|
||||
var (
|
||||
client = msgraphsdk.NewGraphServiceClient(adapter)
|
||||
testUser = tconfig.GetM365UserID(ctx)
|
||||
testSite = tconfig.GetM365SiteID(ctx)
|
||||
testKind = os.Getenv("SANITY_TEST_KIND") // restore or export (cli arg?)
|
||||
testService = os.Getenv("SANITY_TEST_SERVICE")
|
||||
folder = strings.TrimSpace(os.Getenv("SANITY_TEST_FOLDER"))
|
||||
dataFolder = os.Getenv("TEST_DATA")
|
||||
baseBackupFolder = os.Getenv("BASE_BACKUP")
|
||||
)
|
||||
restCMD := restoreCMD()
|
||||
|
||||
ctx = clues.Add(
|
||||
ctx,
|
||||
"resource_owner", testUser,
|
||||
"service", testService,
|
||||
"sanity_restore_folder", folder)
|
||||
restCMD.AddCommand(restoreExchangeCMD())
|
||||
restCMD.AddCommand(restoreOneDriveCMD())
|
||||
restCMD.AddCommand(restoreSharePointCMD())
|
||||
restCMD.AddCommand(restoreGroupsCMD())
|
||||
root.AddCommand(restCMD)
|
||||
|
||||
logger.Ctx(ctx).Info("starting sanity test check")
|
||||
expCMD := exportCMD()
|
||||
|
||||
switch testKind {
|
||||
case "restore":
|
||||
startTime, _ := common.MustGetTimeFromName(ctx, folder)
|
||||
clues.Add(ctx, "sanity_restore_start_time", startTime.Format(time.RFC3339))
|
||||
expCMD.AddCommand(exportOneDriveCMD())
|
||||
expCMD.AddCommand(exportSharePointCMD())
|
||||
expCMD.AddCommand(exportGroupsCMD())
|
||||
root.AddCommand(expCMD)
|
||||
|
||||
switch testService {
|
||||
case "exchange":
|
||||
restore.CheckEmailRestoration(ctx, client, testUser, folder, dataFolder, baseBackupFolder, startTime)
|
||||
case "onedrive":
|
||||
restore.CheckOneDriveRestoration(ctx, client, testUser, folder, dataFolder, startTime)
|
||||
case "sharepoint":
|
||||
restore.CheckSharePointRestoration(ctx, client, testSite, testUser, folder, dataFolder, startTime)
|
||||
default:
|
||||
common.Fatal(ctx, "unknown service for restore sanity tests", nil)
|
||||
}
|
||||
case "export":
|
||||
switch testService {
|
||||
case "onedrive":
|
||||
export.CheckOneDriveExport(ctx, client, testUser, folder, dataFolder)
|
||||
case "sharepoint":
|
||||
export.CheckSharePointExport(ctx, client, testSite, folder, dataFolder)
|
||||
default:
|
||||
common.Fatal(ctx, "unknown service for export sanity tests", nil)
|
||||
}
|
||||
default:
|
||||
common.Fatal(ctx, "unknown test kind (expected restore or export)", nil)
|
||||
if err := root.Execute(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// restore/export command
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func exportCMD() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "restore",
|
||||
Short: "run the post-export sanity tests",
|
||||
DisableAutoGenTag: true,
|
||||
RunE: sanityTestExport,
|
||||
}
|
||||
}
|
||||
|
||||
func sanityTestExport(cmd *cobra.Command, args []string) error {
|
||||
return print.Only(cmd.Context(), clues.New("must specify a service"))
|
||||
}
|
||||
|
||||
func restoreCMD() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "restore",
|
||||
Short: "run the post-restore sanity tests",
|
||||
DisableAutoGenTag: true,
|
||||
RunE: sanityTestRestore,
|
||||
}
|
||||
}
|
||||
|
||||
func sanityTestRestore(cmd *cobra.Command, args []string) error {
|
||||
return print.Only(cmd.Context(), clues.New("must specify a service"))
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// service commands - export
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func exportGroupsCMD() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "groups",
|
||||
Short: "run the groups export sanity tests",
|
||||
DisableAutoGenTag: true,
|
||||
RunE: sanityTestExportGroups,
|
||||
}
|
||||
}
|
||||
|
||||
func sanityTestExportGroups(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
envs := common.EnvVars(ctx)
|
||||
|
||||
ac, err := common.GetAC()
|
||||
if err != nil {
|
||||
return print.Only(ctx, err)
|
||||
}
|
||||
|
||||
export.CheckGroupsExport(ctx, ac, envs)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func exportOneDriveCMD() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "onedrive",
|
||||
Short: "run the onedrive export sanity tests",
|
||||
DisableAutoGenTag: true,
|
||||
RunE: sanityTestExportOneDrive,
|
||||
}
|
||||
}
|
||||
|
||||
func sanityTestExportOneDrive(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
envs := common.EnvVars(ctx)
|
||||
|
||||
ac, err := common.GetAC()
|
||||
if err != nil {
|
||||
return print.Only(ctx, err)
|
||||
}
|
||||
|
||||
export.CheckOneDriveExport(ctx, ac, envs)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func exportSharePointCMD() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "sharepoint",
|
||||
Short: "run the sharepoint export sanity tests",
|
||||
DisableAutoGenTag: true,
|
||||
RunE: sanityTestExportSharePoint,
|
||||
}
|
||||
}
|
||||
|
||||
func sanityTestExportSharePoint(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
envs := common.EnvVars(ctx)
|
||||
|
||||
ac, err := common.GetAC()
|
||||
if err != nil {
|
||||
return print.Only(ctx, err)
|
||||
}
|
||||
|
||||
export.CheckSharePointExport(ctx, ac, envs)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// service commands - restore
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func restoreExchangeCMD() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "exchange",
|
||||
Short: "run the exchange restore sanity tests",
|
||||
DisableAutoGenTag: true,
|
||||
RunE: sanityTestRestoreExchange,
|
||||
}
|
||||
}
|
||||
|
||||
func sanityTestRestoreExchange(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
envs := common.EnvVars(ctx)
|
||||
|
||||
ac, err := common.GetAC()
|
||||
if err != nil {
|
||||
return print.Only(ctx, err)
|
||||
}
|
||||
|
||||
restore.CheckEmailRestoration(ctx, ac, envs)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func restoreOneDriveCMD() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "onedrive",
|
||||
Short: "run the onedrive restore sanity tests",
|
||||
DisableAutoGenTag: true,
|
||||
RunE: sanityTestRestoreOneDrive,
|
||||
}
|
||||
}
|
||||
|
||||
func sanityTestRestoreOneDrive(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
envs := common.EnvVars(ctx)
|
||||
|
||||
ac, err := common.GetAC()
|
||||
if err != nil {
|
||||
return print.Only(ctx, err)
|
||||
}
|
||||
|
||||
restore.CheckOneDriveRestoration(ctx, ac, envs)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func restoreSharePointCMD() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "sharepoint",
|
||||
Short: "run the sharepoint restore sanity tests",
|
||||
DisableAutoGenTag: true,
|
||||
RunE: sanityTestRestoreSharePoint,
|
||||
}
|
||||
}
|
||||
|
||||
func sanityTestRestoreSharePoint(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
envs := common.EnvVars(ctx)
|
||||
|
||||
ac, err := common.GetAC()
|
||||
if err != nil {
|
||||
return print.Only(ctx, err)
|
||||
}
|
||||
|
||||
restore.CheckSharePointRestoration(ctx, ac, envs)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func restoreGroupsCMD() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "groups",
|
||||
Short: "run the groups restore sanity tests",
|
||||
DisableAutoGenTag: true,
|
||||
RunE: sanityTestRestoreGroups,
|
||||
}
|
||||
}
|
||||
|
||||
func sanityTestRestoreGroups(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
envs := common.EnvVars(ctx)
|
||||
|
||||
ac, err := common.GetAC()
|
||||
if err != nil {
|
||||
return print.Only(ctx, err)
|
||||
}
|
||||
|
||||
restore.CheckGroupsRestoration(ctx, ac, envs)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -27,7 +27,7 @@ func NewPrefixMap(m map[string]map[string]struct{}) *PrefixMap {
|
||||
|
||||
func (pm PrefixMap) AssertEqual(t *testing.T, r prefixmatcher.StringSetReader) {
|
||||
if pm.Empty() {
|
||||
require.True(t, r.Empty(), "both prefix maps are empty")
|
||||
require.True(t, r.Empty(), "result prefixMap should be empty but contains keys: %+v", r.Keys())
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
@ -15,16 +16,23 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
_ Item = &unindexedPrefetchedItem{}
|
||||
_ ItemModTime = &unindexedPrefetchedItem{}
|
||||
|
||||
_ Item = &prefetchedItem{}
|
||||
_ ItemInfo = &prefetchedItem{}
|
||||
_ ItemModTime = &prefetchedItem{}
|
||||
|
||||
_ Item = &unindexedLazyItem{}
|
||||
_ ItemModTime = &unindexedLazyItem{}
|
||||
|
||||
_ Item = &lazyItem{}
|
||||
_ ItemInfo = &lazyItem{}
|
||||
_ ItemModTime = &lazyItem{}
|
||||
)
|
||||
|
||||
func NewDeletedItem(itemID string) Item {
|
||||
return &prefetchedItem{
|
||||
return &unindexedPrefetchedItem{
|
||||
id: itemID,
|
||||
deleted: true,
|
||||
// TODO(ashmrtn): This really doesn't need to be set since deleted items are
|
||||
@ -34,24 +42,26 @@ func NewDeletedItem(itemID string) Item {
|
||||
}
|
||||
}
|
||||
|
||||
func NewPrefetchedItem(
|
||||
func NewUnindexedPrefetchedItem(
|
||||
reader io.ReadCloser,
|
||||
itemID string,
|
||||
info details.ItemInfo,
|
||||
modTime time.Time,
|
||||
) Item {
|
||||
return &prefetchedItem{
|
||||
return &unindexedPrefetchedItem{
|
||||
id: itemID,
|
||||
reader: reader,
|
||||
info: info,
|
||||
modTime: info.Modified(),
|
||||
modTime: modTime,
|
||||
}
|
||||
}
|
||||
|
||||
// prefetchedItem represents a single item retrieved from the remote service.
|
||||
type prefetchedItem struct {
|
||||
// unindexedPrefetchedItem represents a single item retrieved from the remote
|
||||
// service.
|
||||
//
|
||||
// This item doesn't implement ItemInfo so it's safe to use for items like
|
||||
// metadata that shouldn't appear in backup details.
|
||||
type unindexedPrefetchedItem struct {
|
||||
id string
|
||||
reader io.ReadCloser
|
||||
info details.ItemInfo
|
||||
// modTime is the modified time of the item. It should match the modTime in
|
||||
// info if info is present. Here as a separate field so that deleted items
|
||||
// don't error out by trying to source it from info.
|
||||
@ -62,26 +72,50 @@ type prefetchedItem struct {
|
||||
deleted bool
|
||||
}
|
||||
|
||||
func (i prefetchedItem) ID() string {
|
||||
func (i unindexedPrefetchedItem) ID() string {
|
||||
return i.id
|
||||
}
|
||||
|
||||
func (i *prefetchedItem) ToReader() io.ReadCloser {
|
||||
func (i *unindexedPrefetchedItem) ToReader() io.ReadCloser {
|
||||
return i.reader
|
||||
}
|
||||
|
||||
func (i prefetchedItem) Deleted() bool {
|
||||
func (i unindexedPrefetchedItem) Deleted() bool {
|
||||
return i.deleted
|
||||
}
|
||||
|
||||
func (i unindexedPrefetchedItem) ModTime() time.Time {
|
||||
return i.modTime
|
||||
}
|
||||
|
||||
func NewPrefetchedItem(
|
||||
reader io.ReadCloser,
|
||||
itemID string,
|
||||
info details.ItemInfo,
|
||||
) Item {
|
||||
return &prefetchedItem{
|
||||
unindexedPrefetchedItem: unindexedPrefetchedItem{
|
||||
id: itemID,
|
||||
reader: reader,
|
||||
modTime: info.Modified(),
|
||||
},
|
||||
info: info,
|
||||
}
|
||||
}
|
||||
|
||||
// prefetchedItem represents a single item retrieved from the remote service.
|
||||
//
|
||||
// This item implements ItemInfo so it should be used for things that need to
|
||||
// appear in backup details.
|
||||
type prefetchedItem struct {
|
||||
unindexedPrefetchedItem
|
||||
info details.ItemInfo
|
||||
}
|
||||
|
||||
func (i prefetchedItem) Info() (details.ItemInfo, error) {
|
||||
return i.info, nil
|
||||
}
|
||||
|
||||
func (i prefetchedItem) ModTime() time.Time {
|
||||
return i.modTime
|
||||
}
|
||||
|
||||
type ItemDataGetter interface {
|
||||
GetData(
|
||||
context.Context,
|
||||
@ -89,14 +123,14 @@ type ItemDataGetter interface {
|
||||
) (io.ReadCloser, *details.ItemInfo, bool, error)
|
||||
}
|
||||
|
||||
func NewLazyItem(
|
||||
func NewUnindexedLazyItem(
|
||||
ctx context.Context,
|
||||
itemGetter ItemDataGetter,
|
||||
itemID string,
|
||||
modTime time.Time,
|
||||
errs *fault.Bus,
|
||||
) Item {
|
||||
return &lazyItem{
|
||||
return &unindexedLazyItem{
|
||||
ctx: ctx,
|
||||
id: itemID,
|
||||
itemGetter: itemGetter,
|
||||
@ -105,11 +139,15 @@ func NewLazyItem(
|
||||
}
|
||||
}
|
||||
|
||||
// lazyItem represents a single item retrieved from the remote service. It
|
||||
// lazily fetches the item's data when the first call to ToReader().Read() is
|
||||
// unindexedLazyItem represents a single item retrieved from the remote service.
|
||||
// It lazily fetches the item's data when the first call to ToReader().Read() is
|
||||
// made.
|
||||
type lazyItem struct {
|
||||
//
|
||||
// This item doesn't implement ItemInfo so it's safe to use for items like
|
||||
// metadata that shouldn't appear in backup details.
|
||||
type unindexedLazyItem struct {
|
||||
ctx context.Context
|
||||
mu sync.Mutex
|
||||
id string
|
||||
errs *fault.Bus
|
||||
itemGetter ItemDataGetter
|
||||
@ -127,12 +165,18 @@ type lazyItem struct {
|
||||
delInFlight bool
|
||||
}
|
||||
|
||||
func (i lazyItem) ID() string {
|
||||
func (i *unindexedLazyItem) ID() string {
|
||||
return i.id
|
||||
}
|
||||
|
||||
func (i *lazyItem) ToReader() io.ReadCloser {
|
||||
func (i *unindexedLazyItem) ToReader() io.ReadCloser {
|
||||
return lazy.NewLazyReadCloser(func() (io.ReadCloser, error) {
|
||||
// Don't allow getting Item info while trying to initialize said info.
|
||||
// GetData could be a long running call, but in theory nothing should happen
|
||||
// with the item until a reader is returned anyway.
|
||||
i.mu.Lock()
|
||||
defer i.mu.Unlock()
|
||||
|
||||
reader, info, delInFlight, err := i.itemGetter.GetData(i.ctx, i.errs)
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err)
|
||||
@ -159,11 +203,46 @@ func (i *lazyItem) ToReader() io.ReadCloser {
|
||||
})
|
||||
}
|
||||
|
||||
func (i lazyItem) Deleted() bool {
|
||||
func (i *unindexedLazyItem) Deleted() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (i lazyItem) Info() (details.ItemInfo, error) {
|
||||
func (i *unindexedLazyItem) ModTime() time.Time {
|
||||
return i.modTime
|
||||
}
|
||||
|
||||
func NewLazyItem(
|
||||
ctx context.Context,
|
||||
itemGetter ItemDataGetter,
|
||||
itemID string,
|
||||
modTime time.Time,
|
||||
errs *fault.Bus,
|
||||
) Item {
|
||||
return &lazyItem{
|
||||
unindexedLazyItem: unindexedLazyItem{
|
||||
ctx: ctx,
|
||||
id: itemID,
|
||||
itemGetter: itemGetter,
|
||||
modTime: modTime,
|
||||
errs: errs,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// lazyItem represents a single item retrieved from the remote service. It
|
||||
// lazily fetches the item's data when the first call to ToReader().Read() is
|
||||
// made.
|
||||
//
|
||||
// This item implements ItemInfo so it should be used for things that need to
|
||||
// appear in backup details.
|
||||
type lazyItem struct {
|
||||
unindexedLazyItem
|
||||
}
|
||||
|
||||
func (i *lazyItem) Info() (details.ItemInfo, error) {
|
||||
i.mu.Lock()
|
||||
defer i.mu.Unlock()
|
||||
|
||||
if i.delInFlight {
|
||||
return details.ItemInfo{}, clues.Stack(ErrNotFound).WithClues(i.ctx)
|
||||
} else if i.info == nil {
|
||||
@ -173,7 +252,3 @@ func (i lazyItem) Info() (details.ItemInfo, error) {
|
||||
|
||||
return *i.info, nil
|
||||
}
|
||||
|
||||
func (i lazyItem) ModTime() time.Time {
|
||||
return i.modTime
|
||||
}
|
||||
|
||||
@ -49,6 +49,31 @@ func TestItemUnitSuite(t *testing.T) {
|
||||
suite.Run(t, &ItemUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
func (suite *ItemUnitSuite) TestUnindexedPrefetchedItem() {
|
||||
prefetch := data.NewUnindexedPrefetchedItem(
|
||||
io.NopCloser(bytes.NewReader([]byte{})),
|
||||
"foo",
|
||||
time.Time{})
|
||||
_, ok := prefetch.(data.ItemInfo)
|
||||
assert.False(suite.T(), ok, "unindexedPrefetchedItem implements Info()")
|
||||
}
|
||||
|
||||
func (suite *ItemUnitSuite) TestUnindexedLazyItem() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
lazy := data.NewUnindexedLazyItem(
|
||||
ctx,
|
||||
nil,
|
||||
"foo",
|
||||
time.Time{},
|
||||
fault.New(true))
|
||||
_, ok := lazy.(data.ItemInfo)
|
||||
assert.False(t, ok, "unindexedLazyItem implements Info()")
|
||||
}
|
||||
|
||||
func (suite *ItemUnitSuite) TestDeletedItem() {
|
||||
var (
|
||||
t = suite.T()
|
||||
|
||||
@ -205,7 +205,7 @@ func (w *conn) commonConnect(
|
||||
bst,
|
||||
password,
|
||||
kopiaOpts); err != nil {
|
||||
return clues.Wrap(err, "connecting to repo").WithClues(ctx)
|
||||
return clues.Wrap(err, "connecting to kopia repo").WithClues(ctx)
|
||||
}
|
||||
|
||||
if err := w.open(ctx, cfgFile, password); err != nil {
|
||||
|
||||
@ -16,12 +16,11 @@ func filesystemStorage(
|
||||
repoOpts repository.Options,
|
||||
s storage.Storage,
|
||||
) (blob.Storage, error) {
|
||||
cfg, err := s.StorageConfig()
|
||||
fsCfg, err := s.ToFilesystemConfig()
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
fsCfg := cfg.(*storage.FilesystemConfig)
|
||||
opts := filesystem.Options{
|
||||
Path: fsCfg.Path,
|
||||
}
|
||||
|
||||
@ -20,13 +20,11 @@ func s3BlobStorage(
|
||||
repoOpts repository.Options,
|
||||
s storage.Storage,
|
||||
) (blob.Storage, error) {
|
||||
sc, err := s.StorageConfig()
|
||||
cfg, err := s.ToS3Config()
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
cfg := sc.(*storage.S3Config)
|
||||
|
||||
endpoint := defaultS3Endpoint
|
||||
if len(cfg.Endpoint) > 0 {
|
||||
endpoint = cfg.Endpoint
|
||||
|
||||
@ -33,11 +33,7 @@ const (
|
||||
MaxOneNoteFileSize = 2 * 1024 * 1024 * 1024
|
||||
)
|
||||
|
||||
var (
|
||||
_ data.BackupCollection = &Collection{}
|
||||
_ data.Item = &metadata.Item{}
|
||||
_ data.ItemModTime = &metadata.Item{}
|
||||
)
|
||||
var _ data.BackupCollection = &Collection{}
|
||||
|
||||
// Collection represents a set of OneDrive objects retrieved from M365
|
||||
type Collection struct {
|
||||
@ -588,13 +584,15 @@ func (oc *Collection) streamDriveItem(
|
||||
return progReader, nil
|
||||
})
|
||||
|
||||
oc.data <- &metadata.Item{
|
||||
ItemID: metaFileName + metaSuffix,
|
||||
Data: metaReader,
|
||||
// We wrap the reader with a lazy reader so that the progress bar is only
|
||||
// initialized if the file is read. Since we're not actually lazily reading
|
||||
// data just use the eager item implementation.
|
||||
oc.data <- data.NewUnindexedPrefetchedItem(
|
||||
metaReader,
|
||||
metaFileName+metaSuffix,
|
||||
// Metadata file should always use the latest time as
|
||||
// permissions change does not update mod time.
|
||||
Mod: time.Now(),
|
||||
}
|
||||
time.Now())
|
||||
|
||||
// Item read successfully, add to collection
|
||||
if isFile {
|
||||
|
||||
@ -230,16 +230,16 @@ func (c *Collections) Get(
|
||||
ssmb *prefixmatcher.StringSetMatchBuilder,
|
||||
errs *fault.Bus,
|
||||
) ([]data.BackupCollection, bool, error) {
|
||||
prevDeltas, oldPathsByDriveID, canUsePreviousBackup, err := deserializeMetadata(ctx, prevMetadata)
|
||||
prevDriveIDToDelta, oldPrevPathsByDriveID, canUsePrevBackup, err := deserializeMetadata(ctx, prevMetadata)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "can_use_previous_backup", canUsePreviousBackup)
|
||||
ctx = clues.Add(ctx, "can_use_previous_backup", canUsePrevBackup)
|
||||
|
||||
driveTombstones := map[string]struct{}{}
|
||||
|
||||
for driveID := range oldPathsByDriveID {
|
||||
for driveID := range oldPrevPathsByDriveID {
|
||||
driveTombstones[driveID] = struct{}{}
|
||||
}
|
||||
|
||||
@ -257,76 +257,88 @@ func (c *Collections) Get(
|
||||
}
|
||||
|
||||
var (
|
||||
// Drive ID -> delta URL for drive
|
||||
deltaURLs = map[string]string{}
|
||||
// Drive ID -> folder ID -> folder path
|
||||
folderPaths = map[string]map[string]string{}
|
||||
numPrevItems = 0
|
||||
driveIDToDeltaLink = map[string]string{}
|
||||
driveIDToPrevPaths = map[string]map[string]string{}
|
||||
numPrevItems = 0
|
||||
)
|
||||
|
||||
for _, d := range drives {
|
||||
var (
|
||||
driveID = ptr.Val(d.GetId())
|
||||
driveName = ptr.Val(d.GetName())
|
||||
prevDelta = prevDeltas[driveID]
|
||||
oldPaths = oldPathsByDriveID[driveID]
|
||||
numOldDelta = 0
|
||||
ictx = clues.Add(ctx, "drive_id", driveID, "drive_name", driveName)
|
||||
driveID = ptr.Val(d.GetId())
|
||||
driveName = ptr.Val(d.GetName())
|
||||
ictx = clues.Add(
|
||||
ctx,
|
||||
"drive_id", driveID,
|
||||
"drive_name", clues.Hide(driveName))
|
||||
|
||||
excludedItemIDs = map[string]struct{}{}
|
||||
oldPrevPaths = oldPrevPathsByDriveID[driveID]
|
||||
prevDeltaLink = prevDriveIDToDelta[driveID]
|
||||
|
||||
// itemCollection is used to identify which collection a
|
||||
// file belongs to. This is useful to delete a file from the
|
||||
// collection it was previously in, in case it was moved to a
|
||||
// different collection within the same delta query
|
||||
// item ID -> item ID
|
||||
itemCollection = map[string]string{}
|
||||
)
|
||||
|
||||
delete(driveTombstones, driveID)
|
||||
|
||||
if _, ok := driveIDToPrevPaths[driveID]; !ok {
|
||||
driveIDToPrevPaths[driveID] = map[string]string{}
|
||||
}
|
||||
|
||||
if _, ok := c.CollectionMap[driveID]; !ok {
|
||||
c.CollectionMap[driveID] = map[string]*Collection{}
|
||||
}
|
||||
|
||||
if len(prevDelta) > 0 {
|
||||
numOldDelta++
|
||||
}
|
||||
|
||||
logger.Ctx(ictx).Infow(
|
||||
"previous metadata for drive",
|
||||
"num_paths_entries", len(oldPaths),
|
||||
"num_deltas_entries", numOldDelta)
|
||||
"num_paths_entries", len(oldPrevPaths))
|
||||
|
||||
delta, paths, excluded, err := collectItems(
|
||||
items, du, err := c.handler.EnumerateDriveItemsDelta(
|
||||
ictx,
|
||||
c.handler.NewItemPager(driveID, "", api.DriveItemSelectDefault()),
|
||||
driveID,
|
||||
driveName,
|
||||
c.UpdateCollections,
|
||||
oldPaths,
|
||||
prevDelta,
|
||||
errs)
|
||||
prevDeltaLink)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
// Used for logging below.
|
||||
numDeltas := 0
|
||||
|
||||
// It's alright to have an empty folders map (i.e. no folders found) but not
|
||||
// an empty delta token. This is because when deserializing the metadata we
|
||||
// remove entries for which there is no corresponding delta token/folder. If
|
||||
// we leave empty delta tokens then we may end up setting the State field
|
||||
// for collections when not actually getting delta results.
|
||||
if len(delta.URL) > 0 {
|
||||
deltaURLs[driveID] = delta.URL
|
||||
numDeltas++
|
||||
if len(du.URL) > 0 {
|
||||
driveIDToDeltaLink[driveID] = du.URL
|
||||
}
|
||||
|
||||
newPrevPaths, err := c.UpdateCollections(
|
||||
ctx,
|
||||
driveID,
|
||||
driveName,
|
||||
items,
|
||||
oldPrevPaths,
|
||||
itemCollection,
|
||||
excludedItemIDs,
|
||||
du.Reset,
|
||||
errs)
|
||||
if err != nil {
|
||||
return nil, false, clues.Stack(err)
|
||||
}
|
||||
|
||||
// Avoid the edge case where there's no paths but we do have a valid delta
|
||||
// token. We can accomplish this by adding an empty paths map for this
|
||||
// drive. If we don't have this then the next backup won't use the delta
|
||||
// token because it thinks the folder paths weren't persisted.
|
||||
folderPaths[driveID] = map[string]string{}
|
||||
maps.Copy(folderPaths[driveID], paths)
|
||||
driveIDToPrevPaths[driveID] = map[string]string{}
|
||||
maps.Copy(driveIDToPrevPaths[driveID], newPrevPaths)
|
||||
|
||||
logger.Ctx(ictx).Infow(
|
||||
"persisted metadata for drive",
|
||||
"num_paths_entries", len(paths),
|
||||
"num_deltas_entries", numDeltas,
|
||||
"delta_reset", delta.Reset)
|
||||
"num_new_paths_entries", len(newPrevPaths),
|
||||
"delta_reset", du.Reset)
|
||||
|
||||
numDriveItems := c.NumItems - numPrevItems
|
||||
numPrevItems = c.NumItems
|
||||
@ -338,7 +350,7 @@ func (c *Collections) Get(
|
||||
err = c.addURLCacheToDriveCollections(
|
||||
ictx,
|
||||
driveID,
|
||||
prevDelta,
|
||||
prevDeltaLink,
|
||||
errs)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
@ -347,8 +359,8 @@ func (c *Collections) Get(
|
||||
|
||||
// For both cases we don't need to do set difference on folder map if the
|
||||
// delta token was valid because we should see all the changes.
|
||||
if !delta.Reset {
|
||||
if len(excluded) == 0 {
|
||||
if !du.Reset {
|
||||
if len(excludedItemIDs) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -357,7 +369,7 @@ func (c *Collections) Get(
|
||||
return nil, false, clues.Wrap(err, "making exclude prefix").WithClues(ictx)
|
||||
}
|
||||
|
||||
ssmb.Add(p.String(), excluded)
|
||||
ssmb.Add(p.String(), excludedItemIDs)
|
||||
|
||||
continue
|
||||
}
|
||||
@ -372,13 +384,11 @@ func (c *Collections) Get(
|
||||
foundFolders[id] = struct{}{}
|
||||
}
|
||||
|
||||
for fldID, p := range oldPaths {
|
||||
for fldID, p := range oldPrevPaths {
|
||||
if _, ok := foundFolders[fldID]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
delete(paths, fldID)
|
||||
|
||||
prevPath, err := path.FromDataLayerPath(p, false)
|
||||
if err != nil {
|
||||
err = clues.Wrap(err, "invalid previous path").WithClues(ictx).With("deleted_path", p)
|
||||
@ -446,14 +456,14 @@ func (c *Collections) Get(
|
||||
// empty/missing and default to a full backup.
|
||||
logger.CtxErr(ctx, err).Info("making metadata collection path prefixes")
|
||||
|
||||
return collections, canUsePreviousBackup, nil
|
||||
return collections, canUsePrevBackup, nil
|
||||
}
|
||||
|
||||
md, err := graph.MakeMetadataCollection(
|
||||
pathPrefix,
|
||||
[]graph.MetadataCollectionEntry{
|
||||
graph.NewMetadataEntry(bupMD.PreviousPathFileName, folderPaths),
|
||||
graph.NewMetadataEntry(bupMD.DeltaURLsFileName, deltaURLs),
|
||||
graph.NewMetadataEntry(bupMD.PreviousPathFileName, driveIDToPrevPaths),
|
||||
graph.NewMetadataEntry(bupMD.DeltaURLsFileName, driveIDToDeltaLink),
|
||||
},
|
||||
c.statusUpdater)
|
||||
|
||||
@ -466,7 +476,7 @@ func (c *Collections) Get(
|
||||
collections = append(collections, md)
|
||||
}
|
||||
|
||||
return collections, canUsePreviousBackup, nil
|
||||
return collections, canUsePrevBackup, nil
|
||||
}
|
||||
|
||||
// addURLCacheToDriveCollections adds an URL cache to all collections belonging to
|
||||
@ -480,7 +490,7 @@ func (c *Collections) addURLCacheToDriveCollections(
|
||||
driveID,
|
||||
prevDelta,
|
||||
urlCacheRefreshInterval,
|
||||
c.handler.NewItemPager(driveID, "", api.DriveItemSelectURLCache()),
|
||||
c.handler,
|
||||
errs)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -536,22 +546,21 @@ func updateCollectionPaths(
|
||||
|
||||
func (c *Collections) handleDelete(
|
||||
itemID, driveID string,
|
||||
oldPaths, newPaths map[string]string,
|
||||
oldPrevPaths, currPrevPaths, newPrevPaths map[string]string,
|
||||
isFolder bool,
|
||||
excluded map[string]struct{},
|
||||
itemCollection map[string]map[string]string,
|
||||
invalidPrevDelta bool,
|
||||
) error {
|
||||
if !isFolder {
|
||||
// Try to remove the item from the Collection if an entry exists for this
|
||||
// item. This handles cases where an item was created and deleted during the
|
||||
// same delta query.
|
||||
if parentID, ok := itemCollection[driveID][itemID]; ok {
|
||||
if parentID, ok := currPrevPaths[itemID]; ok {
|
||||
if col := c.CollectionMap[driveID][parentID]; col != nil {
|
||||
col.Remove(itemID)
|
||||
}
|
||||
|
||||
delete(itemCollection[driveID], itemID)
|
||||
delete(currPrevPaths, itemID)
|
||||
}
|
||||
|
||||
// Don't need to add to exclude list if the delta is invalid since the
|
||||
@ -572,7 +581,7 @@ func (c *Collections) handleDelete(
|
||||
|
||||
var prevPath path.Path
|
||||
|
||||
prevPathStr, ok := oldPaths[itemID]
|
||||
prevPathStr, ok := oldPrevPaths[itemID]
|
||||
if ok {
|
||||
var err error
|
||||
|
||||
@ -589,7 +598,7 @@ func (c *Collections) handleDelete(
|
||||
// Nested folders also return deleted delta results so we don't have to
|
||||
// worry about doing a prefix search in the map to remove the subtree of
|
||||
// the deleted folder/package.
|
||||
delete(newPaths, itemID)
|
||||
delete(newPrevPaths, itemID)
|
||||
|
||||
if prevPath == nil || invalidPrevDelta {
|
||||
// It is possible that an item was created and deleted between two delta
|
||||
@ -679,21 +688,29 @@ func (c *Collections) getCollectionPath(
|
||||
|
||||
// UpdateCollections initializes and adds the provided drive items to Collections
|
||||
// A new collection is created for every drive folder (or package).
|
||||
// oldPaths is the unchanged data that was loaded from the metadata file.
|
||||
// newPaths starts as a copy of oldPaths and is updated as changes are found in
|
||||
// the returned results.
|
||||
// oldPrevPaths is the unchanged data that was loaded from the metadata file.
|
||||
// This map is not modified during the call.
|
||||
// currPrevPaths starts as a copy of oldPaths and is updated as changes are found in
|
||||
// the returned results. Items are added to this collection throughout the call.
|
||||
// newPrevPaths, ie: the items added during this call, get returned as a map.
|
||||
func (c *Collections) UpdateCollections(
|
||||
ctx context.Context,
|
||||
driveID, driveName string,
|
||||
items []models.DriveItemable,
|
||||
oldPaths map[string]string,
|
||||
newPaths map[string]string,
|
||||
oldPrevPaths map[string]string,
|
||||
currPrevPaths map[string]string,
|
||||
excluded map[string]struct{},
|
||||
itemCollection map[string]map[string]string,
|
||||
invalidPrevDelta bool,
|
||||
errs *fault.Bus,
|
||||
) error {
|
||||
el := errs.Local()
|
||||
) (map[string]string, error) {
|
||||
var (
|
||||
el = errs.Local()
|
||||
newPrevPaths = map[string]string{}
|
||||
)
|
||||
|
||||
if !invalidPrevDelta {
|
||||
maps.Copy(newPrevPaths, oldPrevPaths)
|
||||
}
|
||||
|
||||
for _, item := range items {
|
||||
if el.Failure() != nil {
|
||||
@ -703,8 +720,12 @@ func (c *Collections) UpdateCollections(
|
||||
var (
|
||||
itemID = ptr.Val(item.GetId())
|
||||
itemName = ptr.Val(item.GetName())
|
||||
ictx = clues.Add(ctx, "item_id", itemID, "item_name", clues.Hide(itemName))
|
||||
isFolder = item.GetFolder() != nil || item.GetPackageEscaped() != nil
|
||||
ictx = clues.Add(
|
||||
ctx,
|
||||
"item_id", itemID,
|
||||
"item_name", clues.Hide(itemName),
|
||||
"item_is_folder", isFolder)
|
||||
)
|
||||
|
||||
if item.GetMalware() != nil {
|
||||
@ -726,13 +747,13 @@ func (c *Collections) UpdateCollections(
|
||||
if err := c.handleDelete(
|
||||
itemID,
|
||||
driveID,
|
||||
oldPaths,
|
||||
newPaths,
|
||||
oldPrevPaths,
|
||||
currPrevPaths,
|
||||
newPrevPaths,
|
||||
isFolder,
|
||||
excluded,
|
||||
itemCollection,
|
||||
invalidPrevDelta); err != nil {
|
||||
return clues.Stack(err).WithClues(ictx)
|
||||
return nil, clues.Stack(err).WithClues(ictx)
|
||||
}
|
||||
|
||||
continue
|
||||
@ -758,13 +779,13 @@ func (c *Collections) UpdateCollections(
|
||||
// Deletions are handled above so this is just moves/renames.
|
||||
var prevPath path.Path
|
||||
|
||||
prevPathStr, ok := oldPaths[itemID]
|
||||
prevPathStr, ok := oldPrevPaths[itemID]
|
||||
if ok {
|
||||
prevPath, err = path.FromDataLayerPath(prevPathStr, false)
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "invalid previous path").
|
||||
WithClues(ictx).
|
||||
With("path_string", prevPathStr))
|
||||
With("prev_path_string", path.LoggableDir(prevPathStr)))
|
||||
}
|
||||
} else if item.GetRoot() != nil {
|
||||
// Root doesn't move or get renamed.
|
||||
@ -774,11 +795,11 @@ func (c *Collections) UpdateCollections(
|
||||
// Moved folders don't cause delta results for any subfolders nested in
|
||||
// them. We need to go through and update paths to handle that. We only
|
||||
// update newPaths so we don't accidentally clobber previous deletes.
|
||||
updatePath(newPaths, itemID, collectionPath.String())
|
||||
updatePath(newPrevPaths, itemID, collectionPath.String())
|
||||
|
||||
found, err := updateCollectionPaths(driveID, itemID, c.CollectionMap, collectionPath)
|
||||
if err != nil {
|
||||
return clues.Stack(err).WithClues(ictx)
|
||||
return nil, clues.Stack(err).WithClues(ictx)
|
||||
}
|
||||
|
||||
if found {
|
||||
@ -801,7 +822,7 @@ func (c *Collections) UpdateCollections(
|
||||
invalidPrevDelta,
|
||||
nil)
|
||||
if err != nil {
|
||||
return clues.Stack(err).WithClues(ictx)
|
||||
return nil, clues.Stack(err).WithClues(ictx)
|
||||
}
|
||||
|
||||
col.driveName = driveName
|
||||
@ -823,35 +844,38 @@ func (c *Collections) UpdateCollections(
|
||||
case item.GetFile() != nil:
|
||||
// Deletions are handled above so this is just moves/renames.
|
||||
if len(ptr.Val(item.GetParentReference().GetId())) == 0 {
|
||||
return clues.New("file without parent ID").WithClues(ictx)
|
||||
return nil, clues.New("file without parent ID").WithClues(ictx)
|
||||
}
|
||||
|
||||
// Get the collection for this item.
|
||||
parentID := ptr.Val(item.GetParentReference().GetId())
|
||||
ictx = clues.Add(ictx, "parent_id", parentID)
|
||||
|
||||
collection, found := c.CollectionMap[driveID][parentID]
|
||||
if !found {
|
||||
return clues.New("item seen before parent folder").WithClues(ictx)
|
||||
collection, ok := c.CollectionMap[driveID][parentID]
|
||||
if !ok {
|
||||
return nil, clues.New("item seen before parent folder").WithClues(ictx)
|
||||
}
|
||||
|
||||
// Delete the file from previous collection. This will
|
||||
// only kick in if the file was moved multiple times
|
||||
// within a single delta query
|
||||
icID, found := itemCollection[driveID][itemID]
|
||||
if found {
|
||||
pcollection, found := c.CollectionMap[driveID][icID]
|
||||
// This will only kick in if the file was moved multiple times
|
||||
// within a single delta query. We delete the file from the previous
|
||||
// collection so that it doesn't appear in two places.
|
||||
prevParentContainerID, ok := currPrevPaths[itemID]
|
||||
if ok {
|
||||
prevColl, found := c.CollectionMap[driveID][prevParentContainerID]
|
||||
if !found {
|
||||
return clues.New("previous collection not found").WithClues(ictx)
|
||||
return nil, clues.New("previous collection not found").
|
||||
With("prev_parent_container_id", prevParentContainerID).
|
||||
WithClues(ictx)
|
||||
}
|
||||
|
||||
removed := pcollection.Remove(itemID)
|
||||
if !removed {
|
||||
return clues.New("removing from prev collection").WithClues(ictx)
|
||||
if ok := prevColl.Remove(itemID); !ok {
|
||||
return nil, clues.New("removing item from prev collection").
|
||||
With("prev_parent_container_id", prevParentContainerID).
|
||||
WithClues(ictx)
|
||||
}
|
||||
}
|
||||
|
||||
itemCollection[driveID][itemID] = parentID
|
||||
currPrevPaths[itemID] = parentID
|
||||
|
||||
if collection.Add(item) {
|
||||
c.NumItems++
|
||||
@ -872,11 +896,13 @@ func (c *Collections) UpdateCollections(
|
||||
}
|
||||
|
||||
default:
|
||||
return clues.New("item type not supported").WithClues(ictx)
|
||||
el.AddRecoverable(ictx, clues.New("item is neither folder nor file").
|
||||
WithClues(ictx).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
}
|
||||
}
|
||||
|
||||
return el.Failure()
|
||||
return newPrevPaths, el.Failure()
|
||||
}
|
||||
|
||||
type dirScopeChecker interface {
|
||||
|
||||
@ -8,7 +8,6 @@ import (
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/google/uuid"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
@ -136,7 +135,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedStatePath := getExpectedStatePathGenerator(suite.T(), bh, tenant, testBaseDrivePath)
|
||||
|
||||
tests := []struct {
|
||||
testCase string
|
||||
name string
|
||||
items []models.DriveItemable
|
||||
inputFolderMap map[string]string
|
||||
scope selectors.OneDriveScope
|
||||
@ -146,11 +145,11 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedContainerCount int
|
||||
expectedFileCount int
|
||||
expectedSkippedCount int
|
||||
expectedMetadataPaths map[string]string
|
||||
expectedPrevPaths map[string]string
|
||||
expectedExcludes map[string]struct{}
|
||||
}{
|
||||
{
|
||||
testCase: "Invalid item",
|
||||
name: "Invalid item",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("item", "item", testBaseDrivePath, "root", false, false, false),
|
||||
@ -162,13 +161,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
"root": expectedStatePath(data.NotMovedState, ""),
|
||||
},
|
||||
expectedContainerCount: 1,
|
||||
expectedMetadataPaths: map[string]string{
|
||||
expectedPrevPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
},
|
||||
expectedExcludes: map[string]struct{}{},
|
||||
},
|
||||
{
|
||||
testCase: "Single File",
|
||||
name: "Single File",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("file", "file", testBaseDrivePath, "root", true, false, false),
|
||||
@ -183,13 +182,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedFileCount: 1,
|
||||
expectedContainerCount: 1,
|
||||
// Root folder is skipped since it's always present.
|
||||
expectedMetadataPaths: map[string]string{
|
||||
expectedPrevPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
},
|
||||
expectedExcludes: getDelList("file"),
|
||||
},
|
||||
{
|
||||
testCase: "Single Folder",
|
||||
name: "Single Folder",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||
@ -201,7 +200,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
"root": expectedStatePath(data.NotMovedState, ""),
|
||||
"folder": expectedStatePath(data.NewState, folder),
|
||||
},
|
||||
expectedMetadataPaths: map[string]string{
|
||||
expectedPrevPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
"folder": expectedPath("/folder"),
|
||||
},
|
||||
@ -210,7 +209,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedExcludes: map[string]struct{}{},
|
||||
},
|
||||
{
|
||||
testCase: "Single Package",
|
||||
name: "Single Package",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("package", "package", testBaseDrivePath, "root", false, false, true),
|
||||
@ -222,7 +221,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
"root": expectedStatePath(data.NotMovedState, ""),
|
||||
"package": expectedStatePath(data.NewState, pkg),
|
||||
},
|
||||
expectedMetadataPaths: map[string]string{
|
||||
expectedPrevPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
"package": expectedPath("/package"),
|
||||
},
|
||||
@ -231,7 +230,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedExcludes: map[string]struct{}{},
|
||||
},
|
||||
{
|
||||
testCase: "1 root file, 1 folder, 1 package, 2 files, 3 collections",
|
||||
name: "1 root file, 1 folder, 1 package, 2 files, 3 collections",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
||||
@ -251,7 +250,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 5,
|
||||
expectedFileCount: 3,
|
||||
expectedContainerCount: 3,
|
||||
expectedMetadataPaths: map[string]string{
|
||||
expectedPrevPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
"folder": expectedPath("/folder"),
|
||||
"package": expectedPath("/package"),
|
||||
@ -259,7 +258,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedExcludes: getDelList("fileInRoot", "fileInFolder", "fileInPackage"),
|
||||
},
|
||||
{
|
||||
testCase: "contains folder selector",
|
||||
name: "contains folder selector",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
||||
@ -284,7 +283,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedContainerCount: 3,
|
||||
// just "folder" isn't added here because the include check is done on the
|
||||
// parent path since we only check later if something is a folder or not.
|
||||
expectedMetadataPaths: map[string]string{
|
||||
expectedPrevPaths: map[string]string{
|
||||
"folder": expectedPath(folder),
|
||||
"subfolder": expectedPath(folderSub),
|
||||
"folder2": expectedPath(folderSub + folder),
|
||||
@ -292,7 +291,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedExcludes: getDelList("fileInFolder", "fileInFolder2"),
|
||||
},
|
||||
{
|
||||
testCase: "prefix subfolder selector",
|
||||
name: "prefix subfolder selector",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
||||
@ -315,14 +314,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 3,
|
||||
expectedFileCount: 1,
|
||||
expectedContainerCount: 2,
|
||||
expectedMetadataPaths: map[string]string{
|
||||
expectedPrevPaths: map[string]string{
|
||||
"subfolder": expectedPath(folderSub),
|
||||
"folder2": expectedPath(folderSub + folder),
|
||||
},
|
||||
expectedExcludes: getDelList("fileInFolder2"),
|
||||
},
|
||||
{
|
||||
testCase: "match subfolder selector",
|
||||
name: "match subfolder selector",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
||||
@ -343,13 +342,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedFileCount: 1,
|
||||
expectedContainerCount: 1,
|
||||
// No child folders for subfolder so nothing here.
|
||||
expectedMetadataPaths: map[string]string{
|
||||
expectedPrevPaths: map[string]string{
|
||||
"subfolder": expectedPath(folderSub),
|
||||
},
|
||||
expectedExcludes: getDelList("fileInSubfolder"),
|
||||
},
|
||||
{
|
||||
testCase: "not moved folder tree",
|
||||
name: "not moved folder tree",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||
@ -367,7 +366,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 1,
|
||||
expectedFileCount: 0,
|
||||
expectedContainerCount: 2,
|
||||
expectedMetadataPaths: map[string]string{
|
||||
expectedPrevPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
"folder": expectedPath(folder),
|
||||
"subfolder": expectedPath(folderSub),
|
||||
@ -375,7 +374,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedExcludes: map[string]struct{}{},
|
||||
},
|
||||
{
|
||||
testCase: "moved folder tree",
|
||||
name: "moved folder tree",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||
@ -393,7 +392,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 1,
|
||||
expectedFileCount: 0,
|
||||
expectedContainerCount: 2,
|
||||
expectedMetadataPaths: map[string]string{
|
||||
expectedPrevPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
"folder": expectedPath(folder),
|
||||
"subfolder": expectedPath(folderSub),
|
||||
@ -401,7 +400,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedExcludes: map[string]struct{}{},
|
||||
},
|
||||
{
|
||||
testCase: "moved folder tree with file no previous",
|
||||
name: "moved folder tree with file no previous",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||
@ -418,14 +417,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 2,
|
||||
expectedFileCount: 1,
|
||||
expectedContainerCount: 2,
|
||||
expectedMetadataPaths: map[string]string{
|
||||
expectedPrevPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
"folder": expectedPath("/folder2"),
|
||||
},
|
||||
expectedExcludes: getDelList("file"),
|
||||
},
|
||||
{
|
||||
testCase: "moved folder tree with file no previous 1",
|
||||
name: "moved folder tree with file no previous 1",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||
@ -441,14 +440,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 2,
|
||||
expectedFileCount: 1,
|
||||
expectedContainerCount: 2,
|
||||
expectedMetadataPaths: map[string]string{
|
||||
expectedPrevPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
"folder": expectedPath(folder),
|
||||
},
|
||||
expectedExcludes: getDelList("file"),
|
||||
},
|
||||
{
|
||||
testCase: "moved folder tree and subfolder 1",
|
||||
name: "moved folder tree and subfolder 1",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||
@ -468,7 +467,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 2,
|
||||
expectedFileCount: 0,
|
||||
expectedContainerCount: 3,
|
||||
expectedMetadataPaths: map[string]string{
|
||||
expectedPrevPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
"folder": expectedPath(folder),
|
||||
"subfolder": expectedPath("/subfolder"),
|
||||
@ -476,7 +475,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedExcludes: map[string]struct{}{},
|
||||
},
|
||||
{
|
||||
testCase: "moved folder tree and subfolder 2",
|
||||
name: "moved folder tree and subfolder 2",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("subfolder", "subfolder", testBaseDrivePath, "root", false, true, false),
|
||||
@ -496,7 +495,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 2,
|
||||
expectedFileCount: 0,
|
||||
expectedContainerCount: 3,
|
||||
expectedMetadataPaths: map[string]string{
|
||||
expectedPrevPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
"folder": expectedPath(folder),
|
||||
"subfolder": expectedPath("/subfolder"),
|
||||
@ -504,7 +503,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedExcludes: map[string]struct{}{},
|
||||
},
|
||||
{
|
||||
testCase: "move subfolder when moving parent",
|
||||
name: "move subfolder when moving parent",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("folder2", "folder2", testBaseDrivePath, "root", false, true, false),
|
||||
@ -538,7 +537,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 5,
|
||||
expectedFileCount: 2,
|
||||
expectedContainerCount: 4,
|
||||
expectedMetadataPaths: map[string]string{
|
||||
expectedPrevPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
"folder": expectedPath("/folder"),
|
||||
"folder2": expectedPath("/folder2"),
|
||||
@ -547,7 +546,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedExcludes: getDelList("itemInSubfolder", "itemInFolder2"),
|
||||
},
|
||||
{
|
||||
testCase: "moved folder tree multiple times",
|
||||
name: "moved folder tree multiple times",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||
@ -567,7 +566,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 2,
|
||||
expectedFileCount: 1,
|
||||
expectedContainerCount: 2,
|
||||
expectedMetadataPaths: map[string]string{
|
||||
expectedPrevPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
"folder": expectedPath("/folder2"),
|
||||
"subfolder": expectedPath("/folder2/subfolder"),
|
||||
@ -575,7 +574,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedExcludes: getDelList("file"),
|
||||
},
|
||||
{
|
||||
testCase: "deleted folder and package",
|
||||
name: "deleted folder and package",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"), // root is always present, but not necessary here
|
||||
delItem("folder", testBaseDrivePath, "root", false, true, false),
|
||||
@ -596,13 +595,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 0,
|
||||
expectedFileCount: 0,
|
||||
expectedContainerCount: 1,
|
||||
expectedMetadataPaths: map[string]string{
|
||||
expectedPrevPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
},
|
||||
expectedExcludes: map[string]struct{}{},
|
||||
},
|
||||
{
|
||||
testCase: "delete folder without previous",
|
||||
name: "delete folder without previous",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
delItem("folder", testBaseDrivePath, "root", false, true, false),
|
||||
@ -618,13 +617,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 0,
|
||||
expectedFileCount: 0,
|
||||
expectedContainerCount: 1,
|
||||
expectedMetadataPaths: map[string]string{
|
||||
expectedPrevPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
},
|
||||
expectedExcludes: map[string]struct{}{},
|
||||
},
|
||||
{
|
||||
testCase: "delete folder tree move subfolder",
|
||||
name: "delete folder tree move subfolder",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
delItem("folder", testBaseDrivePath, "root", false, true, false),
|
||||
@ -645,14 +644,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 1,
|
||||
expectedFileCount: 0,
|
||||
expectedContainerCount: 2,
|
||||
expectedMetadataPaths: map[string]string{
|
||||
expectedPrevPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
"subfolder": expectedPath("/subfolder"),
|
||||
},
|
||||
expectedExcludes: map[string]struct{}{},
|
||||
},
|
||||
{
|
||||
testCase: "delete file",
|
||||
name: "delete file",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
delItem("item", testBaseDrivePath, "root", true, false, false),
|
||||
@ -668,13 +667,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 1,
|
||||
expectedFileCount: 1,
|
||||
expectedContainerCount: 1,
|
||||
expectedMetadataPaths: map[string]string{
|
||||
expectedPrevPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
},
|
||||
expectedExcludes: getDelList("item"),
|
||||
},
|
||||
{
|
||||
testCase: "item before parent errors",
|
||||
name: "item before parent errors",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("file", "file", testBaseDrivePath+"/folder", "folder", true, false, false),
|
||||
@ -689,13 +688,11 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 0,
|
||||
expectedFileCount: 0,
|
||||
expectedContainerCount: 1,
|
||||
expectedMetadataPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
},
|
||||
expectedExcludes: map[string]struct{}{},
|
||||
expectedPrevPaths: nil,
|
||||
expectedExcludes: map[string]struct{}{},
|
||||
},
|
||||
{
|
||||
testCase: "1 root file, 1 folder, 1 package, 1 good file, 1 malware",
|
||||
name: "1 root file, 1 folder, 1 package, 1 good file, 1 malware",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
||||
@ -716,7 +713,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedFileCount: 2,
|
||||
expectedContainerCount: 3,
|
||||
expectedSkippedCount: 1,
|
||||
expectedMetadataPaths: map[string]string{
|
||||
expectedPrevPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
"folder": expectedPath("/folder"),
|
||||
"package": expectedPath("/package"),
|
||||
@ -725,26 +722,23 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
suite.Run(tt.testCase, func() {
|
||||
for _, test := range tests {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
var (
|
||||
excludes = map[string]struct{}{}
|
||||
outputFolderMap = map[string]string{}
|
||||
itemCollection = map[string]map[string]string{
|
||||
driveID: {},
|
||||
}
|
||||
errs = fault.New(true)
|
||||
excludes = map[string]struct{}{}
|
||||
currPrevPaths = map[string]string{}
|
||||
errs = fault.New(true)
|
||||
)
|
||||
|
||||
maps.Copy(outputFolderMap, tt.inputFolderMap)
|
||||
maps.Copy(currPrevPaths, test.inputFolderMap)
|
||||
|
||||
c := NewCollections(
|
||||
&itemBackupHandler{api.Drives{}, user, tt.scope},
|
||||
&itemBackupHandler{api.Drives{}, user, test.scope},
|
||||
tenant,
|
||||
user,
|
||||
nil,
|
||||
@ -752,25 +746,24 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
|
||||
c.CollectionMap[driveID] = map[string]*Collection{}
|
||||
|
||||
err := c.UpdateCollections(
|
||||
newPrevPaths, err := c.UpdateCollections(
|
||||
ctx,
|
||||
driveID,
|
||||
"General",
|
||||
tt.items,
|
||||
tt.inputFolderMap,
|
||||
outputFolderMap,
|
||||
test.items,
|
||||
test.inputFolderMap,
|
||||
currPrevPaths,
|
||||
excludes,
|
||||
itemCollection,
|
||||
false,
|
||||
errs)
|
||||
tt.expect(t, err, clues.ToCore(err))
|
||||
assert.Equal(t, len(tt.expectedCollectionIDs), len(c.CollectionMap[driveID]), "total collections")
|
||||
assert.Equal(t, tt.expectedItemCount, c.NumItems, "item count")
|
||||
assert.Equal(t, tt.expectedFileCount, c.NumFiles, "file count")
|
||||
assert.Equal(t, tt.expectedContainerCount, c.NumContainers, "container count")
|
||||
assert.Equal(t, tt.expectedSkippedCount, len(errs.Skipped()), "skipped items")
|
||||
test.expect(t, err, clues.ToCore(err))
|
||||
assert.Equal(t, len(test.expectedCollectionIDs), len(c.CollectionMap[driveID]), "total collections")
|
||||
assert.Equal(t, test.expectedItemCount, c.NumItems, "item count")
|
||||
assert.Equal(t, test.expectedFileCount, c.NumFiles, "file count")
|
||||
assert.Equal(t, test.expectedContainerCount, c.NumContainers, "container count")
|
||||
assert.Equal(t, test.expectedSkippedCount, len(errs.Skipped()), "skipped items")
|
||||
|
||||
for id, sp := range tt.expectedCollectionIDs {
|
||||
for id, sp := range test.expectedCollectionIDs {
|
||||
if !assert.Containsf(t, c.CollectionMap[driveID], id, "missing collection with id %s", id) {
|
||||
// Skip collections we don't find so we don't get an NPE.
|
||||
continue
|
||||
@ -781,8 +774,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
assert.Equalf(t, sp.prevPath, c.CollectionMap[driveID][id].PreviousPath(), "prev path for collection %s", id)
|
||||
}
|
||||
|
||||
assert.Equal(t, tt.expectedMetadataPaths, outputFolderMap, "metadata paths")
|
||||
assert.Equal(t, tt.expectedExcludes, excludes, "exclude list")
|
||||
assert.Equal(t, test.expectedPrevPaths, newPrevPaths, "metadata paths")
|
||||
assert.Equal(t, test.expectedExcludes, excludes, "exclude list")
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -1300,7 +1293,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
DeltaLink: &delta,
|
||||
ResetDelta: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1338,7 +1332,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||
driveItem("file", "file2", driveBasePath1+"/folder", "folder", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
DeltaLink: &delta,
|
||||
ResetDelta: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1415,7 +1410,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||
},
|
||||
DeltaLink: &empty, // probably will never happen with graph
|
||||
DeltaLink: &empty, // probably will never happen with graph
|
||||
ResetDelta: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1452,7 +1448,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||
},
|
||||
NextLink: &next,
|
||||
NextLink: &next,
|
||||
ResetDelta: true,
|
||||
},
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
@ -1460,7 +1457,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||
driveItem("file2", "file2", driveBasePath1+"/folder", "folder", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
DeltaLink: &delta,
|
||||
ResetDelta: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1502,7 +1500,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
DeltaLink: &delta,
|
||||
ResetDelta: true,
|
||||
},
|
||||
},
|
||||
driveID2: {
|
||||
@ -1512,7 +1511,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
driveItem("folder2", "folder", driveBasePath2, "root2", false, true, false),
|
||||
driveItem("file2", "file", driveBasePath2+"/folder", "folder2", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta2,
|
||||
DeltaLink: &delta2,
|
||||
ResetDelta: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1564,7 +1564,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
DeltaLink: &delta,
|
||||
ResetDelta: true,
|
||||
},
|
||||
},
|
||||
driveID2: {
|
||||
@ -1574,7 +1575,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
driveItem("folder", "folder", driveBasePath2, "root", false, true, false),
|
||||
driveItem("file2", "file", driveBasePath2+"/folder", "folder", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta2,
|
||||
DeltaLink: &delta2,
|
||||
ResetDelta: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1632,87 +1634,6 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
expectedFolderPaths: nil,
|
||||
expectedDelList: nil,
|
||||
},
|
||||
{
|
||||
name: "OneDrive_OneItemPage_DeltaError",
|
||||
drives: []models.Driveable{drive1},
|
||||
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
driveID1: {
|
||||
{
|
||||
Err: getDeltaError(),
|
||||
},
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("file", "file", driveBasePath1, "root", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
},
|
||||
},
|
||||
},
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
expectedCollections: map[string]map[data.CollectionState][]string{
|
||||
rootFolderPath1: {data.NotMovedState: {"file"}},
|
||||
},
|
||||
expectedDeltaURLs: map[string]string{
|
||||
driveID1: delta,
|
||||
},
|
||||
expectedFolderPaths: map[string]map[string]string{
|
||||
driveID1: {
|
||||
"root": rootFolderPath1,
|
||||
},
|
||||
},
|
||||
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}),
|
||||
doNotMergeItems: map[string]bool{
|
||||
rootFolderPath1: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "OneDrive_TwoItemPage_DeltaError",
|
||||
drives: []models.Driveable{drive1},
|
||||
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
driveID1: {
|
||||
{
|
||||
Err: getDeltaError(),
|
||||
},
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("file", "file", driveBasePath1, "root", true, false, false),
|
||||
},
|
||||
NextLink: &next,
|
||||
},
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||
driveItem("file2", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
},
|
||||
},
|
||||
},
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
expectedCollections: map[string]map[data.CollectionState][]string{
|
||||
rootFolderPath1: {data.NotMovedState: {"file"}},
|
||||
expectedPath1("/folder"): {data.NewState: {"folder", "file2"}},
|
||||
},
|
||||
expectedDeltaURLs: map[string]string{
|
||||
driveID1: delta,
|
||||
},
|
||||
expectedFolderPaths: map[string]map[string]string{
|
||||
driveID1: {
|
||||
"root": rootFolderPath1,
|
||||
"folder": folderPath1,
|
||||
},
|
||||
},
|
||||
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}),
|
||||
doNotMergeItems: map[string]bool{
|
||||
rootFolderPath1: true,
|
||||
folderPath1: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "OneDrive_TwoItemPage_NoDeltaError",
|
||||
drives: []models.Driveable{drive1},
|
||||
@ -1765,16 +1686,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
drives: []models.Driveable{drive1},
|
||||
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
driveID1: {
|
||||
{
|
||||
Err: getDeltaError(),
|
||||
},
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("folder2", "folder2", driveBasePath1, "root", false, true, false),
|
||||
driveItem("file", "file", driveBasePath1+"/folder2", "folder2", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
DeltaLink: &delta,
|
||||
ResetDelta: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1812,16 +1731,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
drives: []models.Driveable{drive1},
|
||||
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
driveID1: {
|
||||
{
|
||||
Err: getDeltaError(),
|
||||
},
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("folder2", "folder", driveBasePath1, "root", false, true, false),
|
||||
driveItem("file", "file", driveBasePath1+"/folder", "folder2", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
DeltaLink: &delta,
|
||||
ResetDelta: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1878,7 +1795,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
driveItem("file2", "file2", driveBasePath1+"/folder", "folder", true, false, false),
|
||||
malwareItem("malware2", "malware2", driveBasePath1+"/folder", "folder", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
DeltaLink: &delta,
|
||||
ResetDelta: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1908,13 +1826,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
expectedSkippedCount: 2,
|
||||
},
|
||||
{
|
||||
name: "One Drive Delta Error Deleted Folder In New Results",
|
||||
name: "One Drive Deleted Folder In New Results",
|
||||
drives: []models.Driveable{drive1},
|
||||
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
driveID1: {
|
||||
{
|
||||
Err: getDeltaError(),
|
||||
},
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
@ -1931,7 +1846,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
delItem("folder2", driveBasePath1, "root", false, true, false),
|
||||
delItem("file2", driveBasePath1, "root", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta2,
|
||||
DeltaLink: &delta2,
|
||||
ResetDelta: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1966,19 +1882,17 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "One Drive Delta Error Random Folder Delete",
|
||||
name: "One Drive Random Folder Delete",
|
||||
drives: []models.Driveable{drive1},
|
||||
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
driveID1: {
|
||||
{
|
||||
Err: getDeltaError(),
|
||||
},
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
delItem("folder", driveBasePath1, "root", false, true, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
DeltaLink: &delta,
|
||||
ResetDelta: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -2009,19 +1923,17 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "One Drive Delta Error Random Item Delete",
|
||||
name: "One Drive Random Item Delete",
|
||||
drives: []models.Driveable{drive1},
|
||||
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
driveID1: {
|
||||
{
|
||||
Err: getDeltaError(),
|
||||
},
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
delItem("file", driveBasePath1, "root", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
DeltaLink: &delta,
|
||||
ResetDelta: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -2067,7 +1979,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
delItem("folder", driveBasePath1, "root", false, true, false),
|
||||
delItem("file", driveBasePath1, "root", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta2,
|
||||
DeltaLink: &delta2,
|
||||
ResetDelta: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -2110,7 +2023,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
driveRootItem("root"),
|
||||
delItem("file", driveBasePath1, "root", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
DeltaLink: &delta,
|
||||
ResetDelta: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -2148,7 +2062,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
driveRootItem("root"),
|
||||
delItem("folder", driveBasePath1, "root", false, true, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
DeltaLink: &delta,
|
||||
ResetDelta: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -2183,7 +2098,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
driveRootItem("root"),
|
||||
delItem("file", driveBasePath1, "root", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
DeltaLink: &delta,
|
||||
ResetDelta: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -2265,6 +2181,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
mbh := mock.DefaultOneDriveBH("a-user")
|
||||
mbh.DrivePagerV = mockDrivePager
|
||||
mbh.ItemPagerV = itemPagers
|
||||
mbh.DriveItemEnumeration = mock.PagerResultToEDID(test.items)
|
||||
|
||||
c := NewCollections(
|
||||
mbh,
|
||||
@ -2491,121 +2408,6 @@ func delItem(
|
||||
return item
|
||||
}
|
||||
|
||||
func getDeltaError() error {
|
||||
syncStateNotFound := "SyncStateNotFound"
|
||||
me := odataerrors.NewMainError()
|
||||
me.SetCode(&syncStateNotFound)
|
||||
|
||||
deltaError := odataerrors.NewODataError()
|
||||
deltaError.SetErrorEscaped(me)
|
||||
|
||||
return deltaError
|
||||
}
|
||||
|
||||
func (suite *OneDriveCollectionsUnitSuite) TestCollectItems() {
|
||||
next := "next"
|
||||
delta := "delta"
|
||||
prevDelta := "prev-delta"
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
items []apiMock.PagerResult[models.DriveItemable]
|
||||
deltaURL string
|
||||
prevDeltaSuccess bool
|
||||
prevDelta string
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "delta on first run",
|
||||
deltaURL: delta,
|
||||
items: []apiMock.PagerResult[models.DriveItemable]{
|
||||
{DeltaLink: &delta},
|
||||
},
|
||||
prevDeltaSuccess: true,
|
||||
prevDelta: prevDelta,
|
||||
},
|
||||
{
|
||||
name: "empty prev delta",
|
||||
deltaURL: delta,
|
||||
items: []apiMock.PagerResult[models.DriveItemable]{
|
||||
{DeltaLink: &delta},
|
||||
},
|
||||
prevDeltaSuccess: false,
|
||||
prevDelta: "",
|
||||
},
|
||||
{
|
||||
name: "next then delta",
|
||||
deltaURL: delta,
|
||||
items: []apiMock.PagerResult[models.DriveItemable]{
|
||||
{NextLink: &next},
|
||||
{DeltaLink: &delta},
|
||||
},
|
||||
prevDeltaSuccess: true,
|
||||
prevDelta: prevDelta,
|
||||
},
|
||||
{
|
||||
name: "invalid prev delta",
|
||||
deltaURL: delta,
|
||||
items: []apiMock.PagerResult[models.DriveItemable]{
|
||||
{Err: getDeltaError()},
|
||||
{DeltaLink: &delta}, // works on retry
|
||||
},
|
||||
prevDelta: prevDelta,
|
||||
prevDeltaSuccess: false,
|
||||
},
|
||||
{
|
||||
name: "fail a normal delta query",
|
||||
items: []apiMock.PagerResult[models.DriveItemable]{
|
||||
{NextLink: &next},
|
||||
{Err: assert.AnError},
|
||||
},
|
||||
prevDelta: prevDelta,
|
||||
prevDeltaSuccess: true,
|
||||
err: assert.AnError,
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
itemPager := &apiMock.DeltaPager[models.DriveItemable]{
|
||||
ToReturn: test.items,
|
||||
}
|
||||
|
||||
collectorFunc := func(
|
||||
ctx context.Context,
|
||||
driveID, driveName string,
|
||||
driveItems []models.DriveItemable,
|
||||
oldPaths map[string]string,
|
||||
newPaths map[string]string,
|
||||
excluded map[string]struct{},
|
||||
itemCollection map[string]map[string]string,
|
||||
doNotMergeItems bool,
|
||||
errs *fault.Bus,
|
||||
) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
delta, _, _, err := collectItems(
|
||||
ctx,
|
||||
itemPager,
|
||||
"",
|
||||
"General",
|
||||
collectorFunc,
|
||||
map[string]string{},
|
||||
test.prevDelta,
|
||||
fault.New(true))
|
||||
|
||||
require.ErrorIs(t, err, test.err, "delta fetch err", clues.ToCore(err))
|
||||
require.Equal(t, test.deltaURL, delta.URL, "delta url")
|
||||
require.Equal(t, !test.prevDeltaSuccess, delta.Reset, "delta reset")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *OneDriveCollectionsUnitSuite) TestAddURLCacheToDriveCollections() {
|
||||
driveID := "test-drive"
|
||||
collCount := 3
|
||||
|
||||
@ -36,6 +36,7 @@ type BackupHandler interface {
|
||||
GetItemPermissioner
|
||||
GetItemer
|
||||
NewDrivePagerer
|
||||
EnumerateDriveItemsDeltaer
|
||||
|
||||
// PathPrefix constructs the service and category specific path prefix for
|
||||
// the given values.
|
||||
@ -50,7 +51,7 @@ type BackupHandler interface {
|
||||
|
||||
// ServiceCat returns the service and category used by this implementation.
|
||||
ServiceCat() (path.ServiceType, path.CategoryType)
|
||||
NewItemPager(driveID, link string, fields []string) api.DeltaPager[models.DriveItemable]
|
||||
|
||||
// FormatDisplayPath creates a human-readable string to represent the
|
||||
// provided path.
|
||||
FormatDisplayPath(driveName string, parentPath *path.Builder) string
|
||||
@ -79,6 +80,17 @@ type GetItemer interface {
|
||||
) (models.DriveItemable, error)
|
||||
}
|
||||
|
||||
type EnumerateDriveItemsDeltaer interface {
|
||||
EnumerateDriveItemsDelta(
|
||||
ctx context.Context,
|
||||
driveID, prevDeltaLink string,
|
||||
) (
|
||||
[]models.DriveItemable,
|
||||
api.DeltaUpdate,
|
||||
error,
|
||||
)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// restore
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
@ -1,142 +0,0 @@
|
||||
package drive
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
// DeltaUpdate holds the results of a current delta token. It normally
|
||||
// gets produced when aggregating the addition and removal of items in
|
||||
// a delta-queryable folder.
|
||||
// FIXME: This is same as exchange.api.DeltaUpdate
|
||||
type DeltaUpdate struct {
|
||||
// the deltaLink itself
|
||||
URL string
|
||||
// true if the old delta was marked as invalid
|
||||
Reset bool
|
||||
}
|
||||
|
||||
// itemCollector functions collect the items found in a drive
|
||||
type itemCollector func(
|
||||
ctx context.Context,
|
||||
driveID, driveName string,
|
||||
driveItems []models.DriveItemable,
|
||||
oldPaths map[string]string,
|
||||
newPaths map[string]string,
|
||||
excluded map[string]struct{},
|
||||
itemCollections map[string]map[string]string,
|
||||
validPrevDelta bool,
|
||||
errs *fault.Bus,
|
||||
) error
|
||||
|
||||
// collectItems will enumerate all items in the specified drive and hand them to the
|
||||
// provided `collector` method
|
||||
func collectItems(
|
||||
ctx context.Context,
|
||||
pager api.DeltaPager[models.DriveItemable],
|
||||
driveID, driveName string,
|
||||
collector itemCollector,
|
||||
oldPaths map[string]string,
|
||||
prevDelta string,
|
||||
errs *fault.Bus,
|
||||
) (
|
||||
DeltaUpdate,
|
||||
map[string]string, // newPaths
|
||||
map[string]struct{}, // excluded
|
||||
error,
|
||||
) {
|
||||
var (
|
||||
newDeltaURL = ""
|
||||
newPaths = map[string]string{}
|
||||
excluded = map[string]struct{}{}
|
||||
invalidPrevDelta = len(prevDelta) == 0
|
||||
|
||||
// itemCollection is used to identify which collection a
|
||||
// file belongs to. This is useful to delete a file from the
|
||||
// collection it was previously in, in case it was moved to a
|
||||
// different collection within the same delta query
|
||||
// drive ID -> item ID -> item ID
|
||||
itemCollection = map[string]map[string]string{
|
||||
driveID: {},
|
||||
}
|
||||
)
|
||||
|
||||
if !invalidPrevDelta {
|
||||
maps.Copy(newPaths, oldPaths)
|
||||
pager.SetNextLink(prevDelta)
|
||||
}
|
||||
|
||||
for {
|
||||
// assume delta urls here, which allows single-token consumption
|
||||
page, err := pager.GetPage(graph.ConsumeNTokens(ctx, graph.SingleGetOrDeltaLC))
|
||||
|
||||
if graph.IsErrInvalidDelta(err) {
|
||||
logger.Ctx(ctx).Infow("Invalid previous delta link", "link", prevDelta)
|
||||
|
||||
invalidPrevDelta = true
|
||||
newPaths = map[string]string{}
|
||||
|
||||
pager.Reset(ctx)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return DeltaUpdate{}, nil, nil, graph.Wrap(ctx, err, "getting page")
|
||||
}
|
||||
|
||||
vals := page.GetValue()
|
||||
|
||||
err = collector(
|
||||
ctx,
|
||||
driveID,
|
||||
driveName,
|
||||
vals,
|
||||
oldPaths,
|
||||
newPaths,
|
||||
excluded,
|
||||
itemCollection,
|
||||
invalidPrevDelta,
|
||||
errs)
|
||||
if err != nil {
|
||||
return DeltaUpdate{}, nil, nil, err
|
||||
}
|
||||
|
||||
nextLink, deltaLink := api.NextAndDeltaLink(page)
|
||||
|
||||
if len(deltaLink) > 0 {
|
||||
newDeltaURL = deltaLink
|
||||
}
|
||||
|
||||
// Check if there are more items
|
||||
if len(nextLink) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
logger.Ctx(ctx).Debugw("Found nextLink", "link", nextLink)
|
||||
pager.SetNextLink(nextLink)
|
||||
}
|
||||
|
||||
return DeltaUpdate{URL: newDeltaURL, Reset: invalidPrevDelta}, newPaths, excluded, nil
|
||||
}
|
||||
|
||||
// newItem initializes a `models.DriveItemable` that can be used as input to `createItem`
|
||||
func newItem(name string, folder bool) *models.DriveItem {
|
||||
itemToCreate := models.NewDriveItem()
|
||||
itemToCreate.SetName(&name)
|
||||
|
||||
if folder {
|
||||
itemToCreate.SetFolder(models.NewFolder())
|
||||
} else {
|
||||
itemToCreate.SetFile(models.NewFile())
|
||||
}
|
||||
|
||||
return itemToCreate
|
||||
}
|
||||
@ -87,13 +87,6 @@ func (h itemBackupHandler) NewDrivePager(
|
||||
return h.ac.NewUserDrivePager(resourceOwner, fields)
|
||||
}
|
||||
|
||||
func (h itemBackupHandler) NewItemPager(
|
||||
driveID, link string,
|
||||
fields []string,
|
||||
) api.DeltaPager[models.DriveItemable] {
|
||||
return h.ac.NewDriveItemDeltaPager(driveID, link, fields)
|
||||
}
|
||||
|
||||
func (h itemBackupHandler) AugmentItemInfo(
|
||||
dii details.ItemInfo,
|
||||
item models.DriveItemable,
|
||||
@ -139,6 +132,13 @@ func (h itemBackupHandler) IncludesDir(dir string) bool {
|
||||
return h.scope.Matches(selectors.OneDriveFolder, dir)
|
||||
}
|
||||
|
||||
func (h itemBackupHandler) EnumerateDriveItemsDelta(
|
||||
ctx context.Context,
|
||||
driveID, prevDeltaLink string,
|
||||
) ([]models.DriveItemable, api.DeltaUpdate, error) {
|
||||
return h.ac.EnumerateDriveItemsDelta(ctx, driveID, prevDeltaLink)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Restore
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
@ -20,8 +20,6 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/control/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
@ -60,83 +58,6 @@ func (suite *ItemIntegrationSuite) SetupSuite() {
|
||||
suite.userDriveID = ptr.Val(odDrives[0].GetId())
|
||||
}
|
||||
|
||||
// TestItemReader is an integration test that makes a few assumptions
|
||||
// about the test environment
|
||||
// 1) It assumes the test user has a drive
|
||||
// 2) It assumes the drive has a file it can use to test `driveItemReader`
|
||||
// The test checks these in below
|
||||
func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
var driveItem models.DriveItemable
|
||||
// This item collector tries to find "a" drive item that is a non-empty
|
||||
// file to test the reader function
|
||||
itemCollector := func(
|
||||
_ context.Context,
|
||||
_, _ string,
|
||||
items []models.DriveItemable,
|
||||
_ map[string]string,
|
||||
_ map[string]string,
|
||||
_ map[string]struct{},
|
||||
_ map[string]map[string]string,
|
||||
_ bool,
|
||||
_ *fault.Bus,
|
||||
) error {
|
||||
if driveItem != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, item := range items {
|
||||
if item.GetFile() != nil && ptr.Val(item.GetSize()) > 0 {
|
||||
driveItem = item
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
ip := suite.service.ac.
|
||||
Drives().
|
||||
NewDriveItemDeltaPager(suite.userDriveID, "", api.DriveItemSelectDefault())
|
||||
|
||||
_, _, _, err := collectItems(
|
||||
ctx,
|
||||
ip,
|
||||
suite.userDriveID,
|
||||
"General",
|
||||
itemCollector,
|
||||
map[string]string{},
|
||||
"",
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// Test Requirement 2: Need a file
|
||||
require.NotEmpty(
|
||||
t,
|
||||
driveItem,
|
||||
"no file item found for user %s drive %s",
|
||||
suite.user,
|
||||
suite.userDriveID)
|
||||
|
||||
bh := itemBackupHandler{
|
||||
suite.service.ac.Drives(),
|
||||
suite.user,
|
||||
(&selectors.OneDriveBackup{}).Folders(selectors.Any())[0],
|
||||
}
|
||||
|
||||
// Read data for the file
|
||||
itemData, err := downloadItem(ctx, bh, driveItem)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
size, err := io.Copy(io.Discard, itemData)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
require.NotZero(t, size)
|
||||
}
|
||||
|
||||
// TestItemWriter is an integration test for uploading data to OneDrive
|
||||
// It creates a new folder with a new item and writes data to it
|
||||
func (suite *ItemIntegrationSuite) TestItemWriter() {
|
||||
@ -171,7 +92,7 @@ func (suite *ItemIntegrationSuite) TestItemWriter() {
|
||||
ctx,
|
||||
test.driveID,
|
||||
ptr.Val(root.GetId()),
|
||||
newItem(newFolderName, true),
|
||||
api.NewDriveItem(newFolderName, true),
|
||||
control.Copy)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
require.NotNil(t, newFolder.GetId())
|
||||
@ -183,7 +104,7 @@ func (suite *ItemIntegrationSuite) TestItemWriter() {
|
||||
ctx,
|
||||
test.driveID,
|
||||
ptr.Val(newFolder.GetId()),
|
||||
newItem(newItemName, false),
|
||||
api.NewDriveItem(newItemName, false),
|
||||
control.Copy)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
require.NotNil(t, newItem.GetId())
|
||||
@ -317,7 +238,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
|
||||
{
|
||||
name: "success",
|
||||
itemFunc: func() models.DriveItemable {
|
||||
di := newItem("test", false)
|
||||
di := api.NewDriveItem("test", false)
|
||||
di.SetAdditionalData(map[string]any{
|
||||
"@microsoft.graph.downloadUrl": url,
|
||||
})
|
||||
@ -336,7 +257,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
|
||||
{
|
||||
name: "success, content url set instead of download url",
|
||||
itemFunc: func() models.DriveItemable {
|
||||
di := newItem("test", false)
|
||||
di := api.NewDriveItem("test", false)
|
||||
di.SetAdditionalData(map[string]any{
|
||||
"@content.downloadUrl": url,
|
||||
})
|
||||
@ -355,7 +276,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
|
||||
{
|
||||
name: "api getter returns error",
|
||||
itemFunc: func() models.DriveItemable {
|
||||
di := newItem("test", false)
|
||||
di := api.NewDriveItem("test", false)
|
||||
di.SetAdditionalData(map[string]any{
|
||||
"@microsoft.graph.downloadUrl": url,
|
||||
})
|
||||
@ -371,7 +292,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
|
||||
{
|
||||
name: "download url is empty",
|
||||
itemFunc: func() models.DriveItemable {
|
||||
di := newItem("test", false)
|
||||
di := api.NewDriveItem("test", false)
|
||||
return di
|
||||
},
|
||||
GetFunc: func(ctx context.Context, url string) (*http.Response, error) {
|
||||
@ -386,7 +307,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
|
||||
{
|
||||
name: "malware",
|
||||
itemFunc: func() models.DriveItemable {
|
||||
di := newItem("test", false)
|
||||
di := api.NewDriveItem("test", false)
|
||||
di.SetAdditionalData(map[string]any{
|
||||
"@microsoft.graph.downloadUrl": url,
|
||||
})
|
||||
@ -408,7 +329,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
|
||||
{
|
||||
name: "non-2xx http response",
|
||||
itemFunc: func() models.DriveItemable {
|
||||
di := newItem("test", false)
|
||||
di := api.NewDriveItem("test", false)
|
||||
di.SetAdditionalData(map[string]any{
|
||||
"@microsoft.graph.downloadUrl": url,
|
||||
})
|
||||
@ -457,7 +378,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem_ConnectionResetErrorOnFirstRead
|
||||
url = "https://example.com"
|
||||
|
||||
itemFunc = func() models.DriveItemable {
|
||||
di := newItem("test", false)
|
||||
di := api.NewDriveItem("test", false)
|
||||
di.SetAdditionalData(map[string]any{
|
||||
"@microsoft.graph.downloadUrl": url,
|
||||
})
|
||||
|
||||
@ -92,13 +92,6 @@ func (h libraryBackupHandler) NewDrivePager(
|
||||
return h.ac.NewSiteDrivePager(resourceOwner, fields)
|
||||
}
|
||||
|
||||
func (h libraryBackupHandler) NewItemPager(
|
||||
driveID, link string,
|
||||
fields []string,
|
||||
) api.DeltaPager[models.DriveItemable] {
|
||||
return h.ac.NewDriveItemDeltaPager(driveID, link, fields)
|
||||
}
|
||||
|
||||
func (h libraryBackupHandler) AugmentItemInfo(
|
||||
dii details.ItemInfo,
|
||||
item models.DriveItemable,
|
||||
@ -177,6 +170,13 @@ func (h libraryBackupHandler) IncludesDir(dir string) bool {
|
||||
return h.scope.Matches(selectors.SharePointLibraryFolder, dir)
|
||||
}
|
||||
|
||||
func (h libraryBackupHandler) EnumerateDriveItemsDelta(
|
||||
ctx context.Context,
|
||||
driveID, prevDeltaLink string,
|
||||
) ([]models.DriveItemable, api.DeltaUpdate, error) {
|
||||
return h.ac.EnumerateDriveItemsDelta(ctx, driveID, prevDeltaLink)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Restore
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
package metadata
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
@ -41,17 +40,3 @@ type Metadata struct {
|
||||
Permissions []Permission `json:"permissions,omitempty"`
|
||||
LinkShares []LinkShare `json:"linkShares,omitempty"`
|
||||
}
|
||||
|
||||
type Item struct {
|
||||
ItemID string
|
||||
Data io.ReadCloser
|
||||
Mod time.Time
|
||||
}
|
||||
|
||||
// Deleted implements an interface function. However, OneDrive items are marked
|
||||
// as deleted by adding them to the exclude list so this can always return
|
||||
// false.
|
||||
func (i *Item) Deleted() bool { return false }
|
||||
func (i *Item) ID() string { return i.ItemID }
|
||||
func (i *Item) ToReader() io.ReadCloser { return i.Data }
|
||||
func (i *Item) ModTime() time.Time { return i.Mod }
|
||||
|
||||
@ -671,7 +671,7 @@ func createFolder(
|
||||
ctx,
|
||||
driveID,
|
||||
parentFolderID,
|
||||
newItem(folderName, true),
|
||||
api.NewDriveItem(folderName, true),
|
||||
control.Replace)
|
||||
|
||||
// ErrItemAlreadyExistsConflict can only occur for folders if the
|
||||
@ -692,7 +692,7 @@ func createFolder(
|
||||
ctx,
|
||||
driveID,
|
||||
parentFolderID,
|
||||
newItem(folderName, true),
|
||||
api.NewDriveItem(folderName, true),
|
||||
control.Copy)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "creating folder")
|
||||
@ -733,7 +733,7 @@ func restoreFile(
|
||||
}
|
||||
|
||||
var (
|
||||
item = newItem(name, false)
|
||||
item = api.NewDriveItem(name, false)
|
||||
collisionKey = api.DriveItemCollisionKey(item)
|
||||
collision api.DriveItemIDType
|
||||
shouldDeleteOriginal bool
|
||||
|
||||
@ -12,7 +12,6 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/common/str"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -47,7 +46,7 @@ type urlCache struct {
|
||||
refreshMu sync.Mutex
|
||||
deltaQueryCount int
|
||||
|
||||
itemPager api.DeltaPager[models.DriveItemable]
|
||||
edid EnumerateDriveItemsDeltaer
|
||||
|
||||
errs *fault.Bus
|
||||
}
|
||||
@ -56,13 +55,10 @@ type urlCache struct {
|
||||
func newURLCache(
|
||||
driveID, prevDelta string,
|
||||
refreshInterval time.Duration,
|
||||
itemPager api.DeltaPager[models.DriveItemable],
|
||||
edid EnumerateDriveItemsDeltaer,
|
||||
errs *fault.Bus,
|
||||
) (*urlCache, error) {
|
||||
err := validateCacheParams(
|
||||
driveID,
|
||||
refreshInterval,
|
||||
itemPager)
|
||||
err := validateCacheParams(driveID, refreshInterval, edid)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "cache params")
|
||||
}
|
||||
@ -71,9 +67,9 @@ func newURLCache(
|
||||
idToProps: make(map[string]itemProps),
|
||||
lastRefreshTime: time.Time{},
|
||||
driveID: driveID,
|
||||
edid: edid,
|
||||
prevDelta: prevDelta,
|
||||
refreshInterval: refreshInterval,
|
||||
itemPager: itemPager,
|
||||
errs: errs,
|
||||
},
|
||||
nil
|
||||
@ -83,7 +79,7 @@ func newURLCache(
|
||||
func validateCacheParams(
|
||||
driveID string,
|
||||
refreshInterval time.Duration,
|
||||
itemPager api.DeltaPager[models.DriveItemable],
|
||||
edid EnumerateDriveItemsDeltaer,
|
||||
) error {
|
||||
if len(driveID) == 0 {
|
||||
return clues.New("drive id is empty")
|
||||
@ -93,8 +89,8 @@ func validateCacheParams(
|
||||
return clues.New("invalid refresh interval")
|
||||
}
|
||||
|
||||
if itemPager == nil {
|
||||
return clues.New("nil item pager")
|
||||
if edid == nil {
|
||||
return clues.New("nil item enumerator")
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -160,44 +156,23 @@ func (uc *urlCache) refreshCache(
|
||||
// Issue a delta query to graph
|
||||
logger.Ctx(ctx).Info("refreshing url cache")
|
||||
|
||||
err := uc.deltaQuery(ctx)
|
||||
items, du, err := uc.edid.EnumerateDriveItemsDelta(ctx, uc.driveID, uc.prevDelta)
|
||||
if err != nil {
|
||||
// clear cache
|
||||
uc.idToProps = make(map[string]itemProps)
|
||||
return clues.Stack(err)
|
||||
}
|
||||
|
||||
return err
|
||||
uc.deltaQueryCount++
|
||||
|
||||
if err := uc.updateCache(ctx, items, uc.errs); err != nil {
|
||||
return clues.Stack(err)
|
||||
}
|
||||
|
||||
logger.Ctx(ctx).Info("url cache refreshed")
|
||||
|
||||
// Update last refresh time
|
||||
uc.lastRefreshTime = time.Now()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// deltaQuery performs a delta query on the drive and update the cache
|
||||
func (uc *urlCache) deltaQuery(
|
||||
ctx context.Context,
|
||||
) error {
|
||||
logger.Ctx(ctx).Debug("starting delta query")
|
||||
// Reset item pager to remove any previous state
|
||||
uc.itemPager.Reset(ctx)
|
||||
|
||||
_, _, _, err := collectItems(
|
||||
ctx,
|
||||
uc.itemPager,
|
||||
uc.driveID,
|
||||
"",
|
||||
uc.updateCache,
|
||||
map[string]string{},
|
||||
uc.prevDelta,
|
||||
uc.errs)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "delta query")
|
||||
}
|
||||
|
||||
uc.deltaQueryCount++
|
||||
uc.prevDelta = du.URL
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -224,13 +199,7 @@ func (uc *urlCache) readCache(
|
||||
// It assumes that cacheMu is held by caller in write mode
|
||||
func (uc *urlCache) updateCache(
|
||||
ctx context.Context,
|
||||
_, _ string,
|
||||
items []models.DriveItemable,
|
||||
_ map[string]string,
|
||||
_ map[string]string,
|
||||
_ map[string]struct{},
|
||||
_ map[string]map[string]string,
|
||||
_ bool,
|
||||
errs *fault.Bus,
|
||||
) error {
|
||||
el := errs.Local()
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
package drive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"math/rand"
|
||||
@ -18,15 +17,19 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/service/onedrive/mock"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/control/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
apiMock "github.com/alcionai/corso/src/pkg/services/m365/api/mock"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// integration
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type URLCacheIntegrationSuite struct {
|
||||
tester.Suite
|
||||
ac api.Client
|
||||
@ -68,11 +71,10 @@ func (suite *URLCacheIntegrationSuite) SetupSuite() {
|
||||
// url cache
|
||||
func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
||||
var (
|
||||
t = suite.T()
|
||||
ac = suite.ac.Drives()
|
||||
driveID = suite.driveID
|
||||
newFolderName = testdata.DefaultRestoreConfig("folder").Location
|
||||
driveItemPager = suite.ac.Drives().NewDriveItemDeltaPager(driveID, "", api.DriveItemSelectDefault())
|
||||
t = suite.T()
|
||||
ac = suite.ac.Drives()
|
||||
driveID = suite.driveID
|
||||
newFolderName = testdata.DefaultRestoreConfig("folder").Location
|
||||
)
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
@ -82,11 +84,11 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
||||
root, err := ac.GetRootFolder(ctx, driveID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
newFolder, err := ac.Drives().PostItemInContainer(
|
||||
newFolder, err := ac.PostItemInContainer(
|
||||
ctx,
|
||||
driveID,
|
||||
ptr.Val(root.GetId()),
|
||||
newItem(newFolderName, true),
|
||||
api.NewDriveItem(newFolderName, true),
|
||||
control.Copy)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
@ -94,33 +96,10 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
||||
|
||||
nfid := ptr.Val(newFolder.GetId())
|
||||
|
||||
collectorFunc := func(
|
||||
context.Context,
|
||||
string,
|
||||
string,
|
||||
[]models.DriveItemable,
|
||||
map[string]string,
|
||||
map[string]string,
|
||||
map[string]struct{},
|
||||
map[string]map[string]string,
|
||||
bool,
|
||||
*fault.Bus,
|
||||
) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the previous delta to feed into url cache
|
||||
prevDelta, _, _, err := collectItems(
|
||||
ctx,
|
||||
suite.ac.Drives().NewDriveItemDeltaPager(driveID, "", api.DriveItemSelectURLCache()),
|
||||
suite.driveID,
|
||||
"drive-name",
|
||||
collectorFunc,
|
||||
map[string]string{},
|
||||
"",
|
||||
fault.New(true))
|
||||
_, du, err := ac.EnumerateDriveItemsDelta(ctx, suite.driveID, "")
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
require.NotNil(t, prevDelta.URL)
|
||||
require.NotEmpty(t, du.URL)
|
||||
|
||||
// Create a bunch of files in the new folder
|
||||
var items []models.DriveItemable
|
||||
@ -128,11 +107,11 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
||||
for i := 0; i < 5; i++ {
|
||||
newItemName := "test_url_cache_basic_" + dttm.FormatNow(dttm.SafeForTesting)
|
||||
|
||||
item, err := ac.Drives().PostItemInContainer(
|
||||
item, err := ac.PostItemInContainer(
|
||||
ctx,
|
||||
driveID,
|
||||
nfid,
|
||||
newItem(newItemName, false),
|
||||
api.NewDriveItem(newItemName, false),
|
||||
control.Copy)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
@ -142,9 +121,9 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
||||
// Create a new URL cache with a long TTL
|
||||
uc, err := newURLCache(
|
||||
suite.driveID,
|
||||
prevDelta.URL,
|
||||
du.URL,
|
||||
1*time.Hour,
|
||||
driveItemPager,
|
||||
suite.ac.Drives(),
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
@ -195,6 +174,10 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
||||
require.Equal(t, 1, uc.deltaQueryCount)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// unit
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type URLCacheUnitSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
@ -205,27 +188,20 @@ func TestURLCacheUnitSuite(t *testing.T) {
|
||||
|
||||
func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
||||
deltaString := "delta"
|
||||
next := "next"
|
||||
driveID := "drive1"
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
pagerResult map[string][]apiMock.PagerResult[models.DriveItemable]
|
||||
pagerItems map[string][]models.DriveItemable
|
||||
pagerErr map[string]error
|
||||
expectedItemProps map[string]itemProps
|
||||
expectedErr require.ErrorAssertionFunc
|
||||
cacheAssert func(*urlCache, time.Time)
|
||||
}{
|
||||
{
|
||||
name: "single item in cache",
|
||||
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
driveID: {
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||
},
|
||||
DeltaLink: &deltaString,
|
||||
},
|
||||
},
|
||||
pagerItems: map[string][]models.DriveItemable{
|
||||
driveID: {fileItem("1", "file1", "root", "root", "https://dummy1.com", false)},
|
||||
},
|
||||
expectedItemProps: map[string]itemProps{
|
||||
"1": {
|
||||
@ -242,18 +218,13 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
||||
},
|
||||
{
|
||||
name: "multiple items in cache",
|
||||
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
pagerItems: map[string][]models.DriveItemable{
|
||||
driveID: {
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
||||
fileItem("3", "file3", "root", "root", "https://dummy3.com", false),
|
||||
fileItem("4", "file4", "root", "root", "https://dummy4.com", false),
|
||||
fileItem("5", "file5", "root", "root", "https://dummy5.com", false),
|
||||
},
|
||||
DeltaLink: &deltaString,
|
||||
},
|
||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
||||
fileItem("3", "file3", "root", "root", "https://dummy3.com", false),
|
||||
fileItem("4", "file4", "root", "root", "https://dummy4.com", false),
|
||||
fileItem("5", "file5", "root", "root", "https://dummy5.com", false),
|
||||
},
|
||||
},
|
||||
expectedItemProps: map[string]itemProps{
|
||||
@ -287,18 +258,13 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
||||
},
|
||||
{
|
||||
name: "duplicate items with potentially new urls",
|
||||
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
pagerItems: map[string][]models.DriveItemable{
|
||||
driveID: {
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
||||
fileItem("3", "file3", "root", "root", "https://dummy3.com", false),
|
||||
fileItem("1", "file1", "root", "root", "https://test1.com", false),
|
||||
fileItem("2", "file2", "root", "root", "https://test2.com", false),
|
||||
},
|
||||
DeltaLink: &deltaString,
|
||||
},
|
||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
||||
fileItem("3", "file3", "root", "root", "https://dummy3.com", false),
|
||||
fileItem("1", "file1", "root", "root", "https://test1.com", false),
|
||||
fileItem("2", "file2", "root", "root", "https://test2.com", false),
|
||||
},
|
||||
},
|
||||
expectedItemProps: map[string]itemProps{
|
||||
@ -324,16 +290,11 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
||||
},
|
||||
{
|
||||
name: "deleted items",
|
||||
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
pagerItems: map[string][]models.DriveItemable{
|
||||
driveID: {
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", true),
|
||||
},
|
||||
DeltaLink: &deltaString,
|
||||
},
|
||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", true),
|
||||
},
|
||||
},
|
||||
expectedItemProps: map[string]itemProps{
|
||||
@ -355,15 +316,8 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
||||
},
|
||||
{
|
||||
name: "item not found in cache",
|
||||
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
driveID: {
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||
},
|
||||
DeltaLink: &deltaString,
|
||||
},
|
||||
},
|
||||
pagerItems: map[string][]models.DriveItemable{
|
||||
driveID: {fileItem("1", "file1", "root", "root", "https://dummy1.com", false)},
|
||||
},
|
||||
expectedItemProps: map[string]itemProps{
|
||||
"2": {},
|
||||
@ -376,23 +330,10 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multi-page delta query error",
|
||||
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
driveID: {
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||
},
|
||||
NextLink: &next,
|
||||
},
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
||||
},
|
||||
DeltaLink: &deltaString,
|
||||
Err: errors.New("delta query error"),
|
||||
},
|
||||
},
|
||||
name: "delta query error",
|
||||
pagerItems: map[string][]models.DriveItemable{},
|
||||
pagerErr: map[string]error{
|
||||
driveID: errors.New("delta query error"),
|
||||
},
|
||||
expectedItemProps: map[string]itemProps{
|
||||
"1": {},
|
||||
@ -408,15 +349,10 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
||||
|
||||
{
|
||||
name: "folder item",
|
||||
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
pagerItems: map[string][]models.DriveItemable{
|
||||
driveID: {
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||
driveItem("2", "folder2", "root", "root", false, true, false),
|
||||
},
|
||||
DeltaLink: &deltaString,
|
||||
},
|
||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||
driveItem("2", "folder2", "root", "root", false, true, false),
|
||||
},
|
||||
},
|
||||
expectedItemProps: map[string]itemProps{
|
||||
@ -437,15 +373,17 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
itemPager := &apiMock.DeltaPager[models.DriveItemable]{
|
||||
ToReturn: test.pagerResult[driveID],
|
||||
medi := mock.EnumeratesDriveItemsDelta{
|
||||
Items: test.pagerItems,
|
||||
Err: test.pagerErr,
|
||||
DeltaUpdate: map[string]api.DeltaUpdate{driveID: {URL: deltaString}},
|
||||
}
|
||||
|
||||
cache, err := newURLCache(
|
||||
driveID,
|
||||
"",
|
||||
1*time.Hour,
|
||||
itemPager,
|
||||
&medi,
|
||||
fault.New(true))
|
||||
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
@ -480,15 +418,17 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
||||
|
||||
// Test needsRefresh
|
||||
func (suite *URLCacheUnitSuite) TestNeedsRefresh() {
|
||||
driveID := "drive1"
|
||||
t := suite.T()
|
||||
refreshInterval := 1 * time.Second
|
||||
var (
|
||||
t = suite.T()
|
||||
driveID = "drive1"
|
||||
refreshInterval = 1 * time.Second
|
||||
)
|
||||
|
||||
cache, err := newURLCache(
|
||||
driveID,
|
||||
"",
|
||||
refreshInterval,
|
||||
&apiMock.DeltaPager[models.DriveItemable]{},
|
||||
&mock.EnumeratesDriveItemsDelta{},
|
||||
fault.New(true))
|
||||
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
@ -510,14 +450,12 @@ func (suite *URLCacheUnitSuite) TestNeedsRefresh() {
|
||||
require.False(t, cache.needsRefresh())
|
||||
}
|
||||
|
||||
// Test newURLCache
|
||||
func (suite *URLCacheUnitSuite) TestNewURLCache() {
|
||||
// table driven tests
|
||||
table := []struct {
|
||||
name string
|
||||
driveID string
|
||||
refreshInt time.Duration
|
||||
itemPager api.DeltaPager[models.DriveItemable]
|
||||
itemPager EnumerateDriveItemsDeltaer
|
||||
errors *fault.Bus
|
||||
expectedErr require.ErrorAssertionFunc
|
||||
}{
|
||||
@ -525,7 +463,7 @@ func (suite *URLCacheUnitSuite) TestNewURLCache() {
|
||||
name: "invalid driveID",
|
||||
driveID: "",
|
||||
refreshInt: 1 * time.Hour,
|
||||
itemPager: &apiMock.DeltaPager[models.DriveItemable]{},
|
||||
itemPager: &mock.EnumeratesDriveItemsDelta{},
|
||||
errors: fault.New(true),
|
||||
expectedErr: require.Error,
|
||||
},
|
||||
@ -533,12 +471,12 @@ func (suite *URLCacheUnitSuite) TestNewURLCache() {
|
||||
name: "invalid refresh interval",
|
||||
driveID: "drive1",
|
||||
refreshInt: 100 * time.Millisecond,
|
||||
itemPager: &apiMock.DeltaPager[models.DriveItemable]{},
|
||||
itemPager: &mock.EnumeratesDriveItemsDelta{},
|
||||
errors: fault.New(true),
|
||||
expectedErr: require.Error,
|
||||
},
|
||||
{
|
||||
name: "invalid itemPager",
|
||||
name: "invalid item enumerator",
|
||||
driveID: "drive1",
|
||||
refreshInt: 1 * time.Hour,
|
||||
itemPager: nil,
|
||||
@ -549,7 +487,7 @@ func (suite *URLCacheUnitSuite) TestNewURLCache() {
|
||||
name: "valid",
|
||||
driveID: "drive1",
|
||||
refreshInt: 1 * time.Hour,
|
||||
itemPager: &apiMock.DeltaPager[models.DriveItemable]{},
|
||||
itemPager: &mock.EnumeratesDriveItemsDelta{},
|
||||
errors: fault.New(true),
|
||||
expectedErr: require.NoError,
|
||||
},
|
||||
|
||||
@ -2,7 +2,6 @@ package groups
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -527,8 +526,6 @@ func (suite *BackupIntgSuite) TestCreateCollections() {
|
||||
|
||||
require.NotEmpty(t, c.FullPath().Folder(false))
|
||||
|
||||
fmt.Printf("\n-----\nfolder %+v\n-----\n", c.FullPath().Folder(false))
|
||||
|
||||
// TODO(ashmrtn): Remove when LocationPath is made part of BackupCollection
|
||||
// interface.
|
||||
if !assert.Implements(t, (*data.LocationPather)(nil), c) {
|
||||
@ -537,8 +534,6 @@ func (suite *BackupIntgSuite) TestCreateCollections() {
|
||||
|
||||
loc := c.(data.LocationPather).LocationPath().String()
|
||||
|
||||
fmt.Printf("\n-----\nloc %+v\n-----\n", c.(data.LocationPather).LocationPath().String())
|
||||
|
||||
require.NotEmpty(t, loc)
|
||||
|
||||
delete(test.channelNames, loc)
|
||||
|
||||
@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/microsoft/kiota-abstractions-go/serialization"
|
||||
@ -40,12 +39,7 @@ const (
|
||||
Pages DataCategory = 2
|
||||
)
|
||||
|
||||
var (
|
||||
_ data.BackupCollection = &Collection{}
|
||||
_ data.Item = &Item{}
|
||||
_ data.ItemInfo = &Item{}
|
||||
_ data.ItemModTime = &Item{}
|
||||
)
|
||||
var _ data.BackupCollection = &Collection{}
|
||||
|
||||
// Collection is the SharePoint.List implementation of data.Collection. SharePoint.Libraries collections are supported
|
||||
// by the oneDrive.Collection as the calls are identical for populating the Collection
|
||||
@ -120,43 +114,6 @@ func (sc *Collection) Items(
|
||||
return sc.data
|
||||
}
|
||||
|
||||
type Item struct {
|
||||
id string
|
||||
data io.ReadCloser
|
||||
info *details.SharePointInfo
|
||||
modTime time.Time
|
||||
|
||||
// true if the item was marked by graph as deleted.
|
||||
deleted bool
|
||||
}
|
||||
|
||||
func NewItem(name string, d io.ReadCloser) *Item {
|
||||
return &Item{
|
||||
id: name,
|
||||
data: d,
|
||||
}
|
||||
}
|
||||
|
||||
func (sd *Item) ID() string {
|
||||
return sd.id
|
||||
}
|
||||
|
||||
func (sd *Item) ToReader() io.ReadCloser {
|
||||
return sd.data
|
||||
}
|
||||
|
||||
func (sd Item) Deleted() bool {
|
||||
return sd.deleted
|
||||
}
|
||||
|
||||
func (sd *Item) Info() (details.ItemInfo, error) {
|
||||
return details.ItemInfo{SharePoint: sd.info}, nil
|
||||
}
|
||||
|
||||
func (sd *Item) ModTime() time.Time {
|
||||
return sd.modTime
|
||||
}
|
||||
|
||||
func (sc *Collection) finishPopulation(
|
||||
ctx context.Context,
|
||||
metrics support.CollectionMetrics,
|
||||
@ -251,20 +208,13 @@ func (sc *Collection) retrieveLists(
|
||||
size := int64(len(byteArray))
|
||||
|
||||
if size > 0 {
|
||||
t := time.Now()
|
||||
if t1 := lst.GetLastModifiedDateTime(); t1 != nil {
|
||||
t = *t1
|
||||
}
|
||||
|
||||
metrics.Bytes += size
|
||||
|
||||
metrics.Successes++
|
||||
sc.data <- &Item{
|
||||
id: ptr.Val(lst.GetId()),
|
||||
data: io.NopCloser(bytes.NewReader(byteArray)),
|
||||
info: ListToSPInfo(lst, size),
|
||||
modTime: t,
|
||||
}
|
||||
sc.data <- data.NewPrefetchedItem(
|
||||
io.NopCloser(bytes.NewReader(byteArray)),
|
||||
ptr.Val(lst.GetId()),
|
||||
details.ItemInfo{SharePoint: ListToSPInfo(lst, size)})
|
||||
|
||||
progress <- struct{}{}
|
||||
}
|
||||
@ -322,12 +272,10 @@ func (sc *Collection) retrievePages(
|
||||
if size > 0 {
|
||||
metrics.Bytes += size
|
||||
metrics.Successes++
|
||||
sc.data <- &Item{
|
||||
id: ptr.Val(pg.GetId()),
|
||||
data: io.NopCloser(bytes.NewReader(byteArray)),
|
||||
info: pageToSPInfo(pg, root, size),
|
||||
modTime: ptr.OrNow(pg.GetLastModifiedDateTime()),
|
||||
}
|
||||
sc.data <- data.NewPrefetchedItem(
|
||||
io.NopCloser(bytes.NewReader(byteArray)),
|
||||
ptr.Val(pg.GetId()),
|
||||
details.ItemInfo{SharePoint: pageToSPInfo(pg, root, size)})
|
||||
|
||||
progress <- struct{}{}
|
||||
}
|
||||
|
||||
@ -19,6 +19,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/control/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
@ -58,21 +59,6 @@ func TestSharePointCollectionSuite(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func (suite *SharePointCollectionSuite) TestCollection_Item_Read() {
|
||||
t := suite.T()
|
||||
m := []byte("test message")
|
||||
name := "aFile"
|
||||
sc := &Item{
|
||||
id: name,
|
||||
data: io.NopCloser(bytes.NewReader(m)),
|
||||
}
|
||||
readData, err := io.ReadAll(sc.ToReader())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, name, sc.id)
|
||||
assert.Equal(t, readData, m)
|
||||
}
|
||||
|
||||
// TestListCollection tests basic functionality to create
|
||||
// SharePoint collection and to use the data stream channel.
|
||||
func (suite *SharePointCollectionSuite) TestCollection_Items() {
|
||||
@ -88,7 +74,7 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() {
|
||||
name, itemName string
|
||||
scope selectors.SharePointScope
|
||||
getDir func(t *testing.T) path.Path
|
||||
getItem func(t *testing.T, itemName string) *Item
|
||||
getItem func(t *testing.T, itemName string) data.Item
|
||||
}{
|
||||
{
|
||||
name: "List",
|
||||
@ -106,7 +92,7 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() {
|
||||
|
||||
return dir
|
||||
},
|
||||
getItem: func(t *testing.T, name string) *Item {
|
||||
getItem: func(t *testing.T, name string) data.Item {
|
||||
ow := kioser.NewJsonSerializationWriter()
|
||||
listing := spMock.ListDefault(name)
|
||||
listing.SetDisplayName(&name)
|
||||
@ -117,11 +103,10 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() {
|
||||
byteArray, err := ow.GetSerializedContent()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
data := &Item{
|
||||
id: name,
|
||||
data: io.NopCloser(bytes.NewReader(byteArray)),
|
||||
info: ListToSPInfo(listing, int64(len(byteArray))),
|
||||
}
|
||||
data := data.NewPrefetchedItem(
|
||||
io.NopCloser(bytes.NewReader(byteArray)),
|
||||
name,
|
||||
details.ItemInfo{SharePoint: ListToSPInfo(listing, int64(len(byteArray)))})
|
||||
|
||||
return data
|
||||
},
|
||||
@ -142,16 +127,15 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() {
|
||||
|
||||
return dir
|
||||
},
|
||||
getItem: func(t *testing.T, itemName string) *Item {
|
||||
getItem: func(t *testing.T, itemName string) data.Item {
|
||||
byteArray := spMock.Page(itemName)
|
||||
page, err := betaAPI.CreatePageFromBytes(byteArray)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
data := &Item{
|
||||
id: itemName,
|
||||
data: io.NopCloser(bytes.NewReader(byteArray)),
|
||||
info: betaAPI.PageInfo(page, int64(len(byteArray))),
|
||||
}
|
||||
data := data.NewPrefetchedItem(
|
||||
io.NopCloser(bytes.NewReader(byteArray)),
|
||||
itemName,
|
||||
details.ItemInfo{SharePoint: betaAPI.PageInfo(page, int64(len(byteArray)))})
|
||||
|
||||
return data
|
||||
},
|
||||
@ -210,11 +194,10 @@ func (suite *SharePointCollectionSuite) TestListCollection_Restore() {
|
||||
byteArray, err := service.Serialize(listing)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
listData := &Item{
|
||||
id: testName,
|
||||
data: io.NopCloser(bytes.NewReader(byteArray)),
|
||||
info: ListToSPInfo(listing, int64(len(byteArray))),
|
||||
}
|
||||
listData := data.NewPrefetchedItem(
|
||||
io.NopCloser(bytes.NewReader(byteArray)),
|
||||
testName,
|
||||
details.ItemInfo{SharePoint: ListToSPInfo(listing, int64(len(byteArray)))})
|
||||
|
||||
destName := testdata.DefaultRestoreConfig("").Location
|
||||
|
||||
|
||||
@ -5,6 +5,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
|
||||
@ -16,7 +17,7 @@ import (
|
||||
|
||||
var (
|
||||
_ data.BackupCollection = &MetadataCollection{}
|
||||
_ data.Item = &MetadataItem{}
|
||||
_ data.Item = &metadataItem{}
|
||||
)
|
||||
|
||||
// MetadataCollection in a simple collection that assumes all items to be
|
||||
@ -24,7 +25,7 @@ var (
|
||||
// created. This collection has no logic for lazily fetching item data.
|
||||
type MetadataCollection struct {
|
||||
fullPath path.Path
|
||||
items []MetadataItem
|
||||
items []metadataItem
|
||||
statusUpdater support.StatusUpdater
|
||||
}
|
||||
|
||||
@ -40,23 +41,29 @@ func NewMetadataEntry(fileName string, mData any) MetadataCollectionEntry {
|
||||
return MetadataCollectionEntry{fileName, mData}
|
||||
}
|
||||
|
||||
func (mce MetadataCollectionEntry) toMetadataItem() (MetadataItem, error) {
|
||||
func (mce MetadataCollectionEntry) toMetadataItem() (metadataItem, error) {
|
||||
if len(mce.fileName) == 0 {
|
||||
return MetadataItem{}, clues.New("missing metadata filename")
|
||||
return metadataItem{}, clues.New("missing metadata filename")
|
||||
}
|
||||
|
||||
if mce.data == nil {
|
||||
return MetadataItem{}, clues.New("missing metadata")
|
||||
return metadataItem{}, clues.New("missing metadata")
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
encoder := json.NewEncoder(buf)
|
||||
|
||||
if err := encoder.Encode(mce.data); err != nil {
|
||||
return MetadataItem{}, clues.Wrap(err, "serializing metadata")
|
||||
return metadataItem{}, clues.Wrap(err, "serializing metadata")
|
||||
}
|
||||
|
||||
return NewMetadataItem(mce.fileName, buf.Bytes()), nil
|
||||
return metadataItem{
|
||||
Item: data.NewUnindexedPrefetchedItem(
|
||||
io.NopCloser(buf),
|
||||
mce.fileName,
|
||||
time.Now()),
|
||||
size: int64(buf.Len()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MakeMetadataCollection creates a metadata collection that has a file
|
||||
@ -71,7 +78,7 @@ func MakeMetadataCollection(
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
items := make([]MetadataItem, 0, len(metadata))
|
||||
items := make([]metadataItem, 0, len(metadata))
|
||||
|
||||
for _, md := range metadata {
|
||||
item, err := md.toMetadataItem()
|
||||
@ -89,7 +96,7 @@ func MakeMetadataCollection(
|
||||
|
||||
func NewMetadataCollection(
|
||||
p path.Path,
|
||||
items []MetadataItem,
|
||||
items []metadataItem,
|
||||
statusUpdater support.StatusUpdater,
|
||||
) *MetadataCollection {
|
||||
return &MetadataCollection{
|
||||
@ -148,7 +155,7 @@ func (md MetadataCollection) Items(
|
||||
defer close(res)
|
||||
|
||||
for _, item := range md.items {
|
||||
totalBytes += int64(len(item.data))
|
||||
totalBytes += item.size
|
||||
res <- item
|
||||
}
|
||||
}()
|
||||
@ -156,36 +163,7 @@ func (md MetadataCollection) Items(
|
||||
return res
|
||||
}
|
||||
|
||||
// MetadataItem is an in-memory data.Item implementation. MetadataItem does
|
||||
// not implement additional interfaces like data.ItemInfo, so it should only
|
||||
// be used for items with a small amount of content that don't need to be added
|
||||
// to backup details.
|
||||
//
|
||||
// Currently the expected use-case for this struct are storing metadata for a
|
||||
// backup like delta tokens or a mapping of container IDs to container paths.
|
||||
type MetadataItem struct {
|
||||
// uuid is an ID that can be used to refer to the item.
|
||||
uuid string
|
||||
// data is a buffer of data that the item refers to.
|
||||
data []byte
|
||||
}
|
||||
|
||||
func NewMetadataItem(uuid string, itemData []byte) MetadataItem {
|
||||
return MetadataItem{
|
||||
uuid: uuid,
|
||||
data: itemData,
|
||||
}
|
||||
}
|
||||
|
||||
func (mi MetadataItem) ID() string {
|
||||
return mi.uuid
|
||||
}
|
||||
|
||||
// TODO(ashmrtn): Fill in once we know how to handle this.
|
||||
func (mi MetadataItem) Deleted() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (mi MetadataItem) ToReader() io.ReadCloser {
|
||||
return io.NopCloser(bytes.NewReader(mi.data))
|
||||
type metadataItem struct {
|
||||
data.Item
|
||||
size int64
|
||||
}
|
||||
|
||||
@ -1,9 +1,11 @@
|
||||
package graph
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/google/uuid"
|
||||
@ -11,6 +13,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/m365/support"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
@ -63,10 +66,18 @@ func (suite *MetadataCollectionUnitSuite) TestItems() {
|
||||
len(itemData),
|
||||
"Requires same number of items and data")
|
||||
|
||||
items := []MetadataItem{}
|
||||
items := []metadataItem{}
|
||||
|
||||
for i := 0; i < len(itemNames); i++ {
|
||||
items = append(items, NewMetadataItem(itemNames[i], itemData[i]))
|
||||
items = append(
|
||||
items,
|
||||
metadataItem{
|
||||
Item: data.NewUnindexedPrefetchedItem(
|
||||
io.NopCloser(bytes.NewReader(itemData[i])),
|
||||
itemNames[i],
|
||||
time.Time{}),
|
||||
size: int64(len(itemData[i])),
|
||||
})
|
||||
}
|
||||
|
||||
p, err := path.Build(
|
||||
|
||||
@ -751,10 +751,6 @@ func compareDriveItem(
|
||||
}
|
||||
|
||||
if isMeta {
|
||||
var itemType *metadata.Item
|
||||
|
||||
assert.IsType(t, itemType, item)
|
||||
|
||||
var (
|
||||
itemMeta metadata.Metadata
|
||||
expectedMeta metadata.Metadata
|
||||
|
||||
@ -55,7 +55,10 @@ func ProduceBackupCollections(
|
||||
"group_id", clues.Hide(bpc.ProtectedResource.ID()),
|
||||
"group_name", clues.Hide(bpc.ProtectedResource.Name()))
|
||||
|
||||
group, err := ac.Groups().GetByID(ctx, bpc.ProtectedResource.ID())
|
||||
group, err := ac.Groups().GetByID(
|
||||
ctx,
|
||||
bpc.ProtectedResource.ID(),
|
||||
api.CallConfig{})
|
||||
if err != nil {
|
||||
return nil, nil, false, clues.Wrap(err, "getting group").WithClues(ctx)
|
||||
}
|
||||
|
||||
@ -7,18 +7,15 @@ import (
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
|
||||
"github.com/alcionai/corso/src/pkg/filters"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
type getByIDer interface {
|
||||
GetByID(ctx context.Context, identifier string) (models.Groupable, error)
|
||||
}
|
||||
|
||||
func IsServiceEnabled(
|
||||
ctx context.Context,
|
||||
gbi getByIDer,
|
||||
gbi api.GetByIDer[models.Groupable],
|
||||
resource string,
|
||||
) (bool, error) {
|
||||
resp, err := gbi.GetByID(ctx, resource)
|
||||
resp, err := gbi.GetByID(ctx, resource, api.CallConfig{})
|
||||
if err != nil {
|
||||
return false, clues.Wrap(err, "getting group").WithClues(ctx)
|
||||
}
|
||||
|
||||
@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
type EnabledUnitSuite struct {
|
||||
@ -22,14 +23,18 @@ func TestEnabledUnitSuite(t *testing.T) {
|
||||
suite.Run(t, &EnabledUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
var _ getByIDer = mockGBI{}
|
||||
var _ api.GetByIDer[models.Groupable] = mockGBI{}
|
||||
|
||||
type mockGBI struct {
|
||||
group models.Groupable
|
||||
err error
|
||||
}
|
||||
|
||||
func (m mockGBI) GetByID(ctx context.Context, identifier string) (models.Groupable, error) {
|
||||
func (m mockGBI) GetByID(
|
||||
ctx context.Context,
|
||||
identifier string,
|
||||
_ api.CallConfig,
|
||||
) (models.Groupable, error) {
|
||||
return m.group, m.err
|
||||
}
|
||||
|
||||
@ -56,13 +61,13 @@ func (suite *EnabledUnitSuite) TestIsServiceEnabled() {
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
mock func(context.Context) getByIDer
|
||||
mock func(context.Context) api.GetByIDer[models.Groupable]
|
||||
expect assert.BoolAssertionFunc
|
||||
expectErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "ok",
|
||||
mock: func(ctx context.Context) getByIDer {
|
||||
mock: func(ctx context.Context) api.GetByIDer[models.Groupable] {
|
||||
return mockGBI{
|
||||
group: unified,
|
||||
}
|
||||
@ -72,7 +77,7 @@ func (suite *EnabledUnitSuite) TestIsServiceEnabled() {
|
||||
},
|
||||
{
|
||||
name: "non-unified group",
|
||||
mock: func(ctx context.Context) getByIDer {
|
||||
mock: func(ctx context.Context) api.GetByIDer[models.Groupable] {
|
||||
return mockGBI{
|
||||
group: nonUnified,
|
||||
}
|
||||
@ -82,7 +87,7 @@ func (suite *EnabledUnitSuite) TestIsServiceEnabled() {
|
||||
},
|
||||
{
|
||||
name: "group not found",
|
||||
mock: func(ctx context.Context) getByIDer {
|
||||
mock: func(ctx context.Context) api.GetByIDer[models.Groupable] {
|
||||
return mockGBI{
|
||||
err: graph.Stack(ctx, odErrMsg(string(graph.RequestResourceNotFound), "message")),
|
||||
}
|
||||
@ -92,7 +97,7 @@ func (suite *EnabledUnitSuite) TestIsServiceEnabled() {
|
||||
},
|
||||
{
|
||||
name: "arbitrary error",
|
||||
mock: func(ctx context.Context) getByIDer {
|
||||
mock: func(ctx context.Context) api.GetByIDer[models.Groupable] {
|
||||
return mockGBI{
|
||||
err: assert.AnError,
|
||||
}
|
||||
|
||||
@ -8,11 +8,13 @@ import (
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/drives"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
apiMock "github.com/alcionai/corso/src/pkg/services/m365/api/mock"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -22,6 +24,8 @@ import (
|
||||
type BackupHandler struct {
|
||||
ItemInfo details.ItemInfo
|
||||
|
||||
DriveItemEnumeration EnumeratesDriveItemsDelta
|
||||
|
||||
GI GetsItem
|
||||
GIP GetsItemPermission
|
||||
|
||||
@ -55,6 +59,7 @@ func DefaultOneDriveBH(resourceOwner string) *BackupHandler {
|
||||
OneDrive: &details.OneDriveInfo{},
|
||||
Extension: &details.ExtensionData{},
|
||||
},
|
||||
DriveItemEnumeration: EnumeratesDriveItemsDelta{},
|
||||
GI: GetsItem{Err: clues.New("not defined")},
|
||||
GIP: GetsItemPermission{Err: clues.New("not defined")},
|
||||
PathPrefixFn: defaultOneDrivePathPrefixer,
|
||||
@ -124,10 +129,6 @@ func (h BackupHandler) NewDrivePager(string, []string) api.Pager[models.Driveabl
|
||||
return h.DrivePagerV
|
||||
}
|
||||
|
||||
func (h BackupHandler) NewItemPager(driveID string, _ string, _ []string) api.DeltaPager[models.DriveItemable] {
|
||||
return h.ItemPagerV[driveID]
|
||||
}
|
||||
|
||||
func (h BackupHandler) FormatDisplayPath(_ string, pb *path.Builder) string {
|
||||
return "/" + pb.String()
|
||||
}
|
||||
@ -152,6 +153,13 @@ func (h *BackupHandler) Get(context.Context, string, map[string]string) (*http.R
|
||||
return h.GetResps[c], h.GetErrs[c]
|
||||
}
|
||||
|
||||
func (h BackupHandler) EnumerateDriveItemsDelta(
|
||||
ctx context.Context,
|
||||
driveID, prevDeltaLink string,
|
||||
) ([]models.DriveItemable, api.DeltaUpdate, error) {
|
||||
return h.DriveItemEnumeration.EnumerateDriveItemsDelta(ctx, driveID, prevDeltaLink)
|
||||
}
|
||||
|
||||
func (h BackupHandler) GetItem(ctx context.Context, _, _ string) (models.DriveItemable, error) {
|
||||
return h.GI.GetItem(ctx, "", "")
|
||||
}
|
||||
@ -254,6 +262,65 @@ func (m GetsItem) GetItem(
|
||||
return m.Item, m.Err
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Enumerates Drive Items
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type EnumeratesDriveItemsDelta struct {
|
||||
Items map[string][]models.DriveItemable
|
||||
DeltaUpdate map[string]api.DeltaUpdate
|
||||
Err map[string]error
|
||||
}
|
||||
|
||||
func (edi EnumeratesDriveItemsDelta) EnumerateDriveItemsDelta(
|
||||
_ context.Context,
|
||||
driveID, _ string,
|
||||
) (
|
||||
[]models.DriveItemable,
|
||||
api.DeltaUpdate,
|
||||
error,
|
||||
) {
|
||||
return edi.Items[driveID], edi.DeltaUpdate[driveID], edi.Err[driveID]
|
||||
}
|
||||
|
||||
func PagerResultToEDID(
|
||||
m map[string][]apiMock.PagerResult[models.DriveItemable],
|
||||
) EnumeratesDriveItemsDelta {
|
||||
edi := EnumeratesDriveItemsDelta{
|
||||
Items: map[string][]models.DriveItemable{},
|
||||
DeltaUpdate: map[string]api.DeltaUpdate{},
|
||||
Err: map[string]error{},
|
||||
}
|
||||
|
||||
for driveID, results := range m {
|
||||
var (
|
||||
err error
|
||||
items = []models.DriveItemable{}
|
||||
deltaUpdate api.DeltaUpdate
|
||||
)
|
||||
|
||||
for _, pr := range results {
|
||||
items = append(items, pr.Values...)
|
||||
|
||||
if pr.DeltaLink != nil {
|
||||
deltaUpdate = api.DeltaUpdate{URL: ptr.Val(pr.DeltaLink)}
|
||||
}
|
||||
|
||||
if pr.Err != nil {
|
||||
err = pr.Err
|
||||
}
|
||||
|
||||
deltaUpdate.Reset = deltaUpdate.Reset || pr.ResetDelta
|
||||
}
|
||||
|
||||
edi.Items[driveID] = items
|
||||
edi.Err[driveID] = err
|
||||
edi.DeltaUpdate[driveID] = deltaUpdate
|
||||
}
|
||||
|
||||
return edi
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Get Item Permissioner
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
@ -4,13 +4,14 @@ import (
|
||||
"bytes"
|
||||
"io"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/m365/collection/site"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/service/sharepoint/api"
|
||||
spMock "github.com/alcionai/corso/src/internal/m365/service/sharepoint/mock"
|
||||
@ -108,9 +109,10 @@ func (suite *SharePointPageSuite) TestRestoreSinglePage() {
|
||||
//nolint:lll
|
||||
byteArray := spMock.Page("Byte Test")
|
||||
|
||||
pageData := site.NewItem(
|
||||
pageData := data.NewUnindexedPrefetchedItem(
|
||||
io.NopCloser(bytes.NewReader(byteArray)),
|
||||
testName,
|
||||
io.NopCloser(bytes.NewReader(byteArray)))
|
||||
time.Now())
|
||||
|
||||
info, err := api.RestoreSitePage(
|
||||
ctx,
|
||||
|
||||
@ -90,12 +90,9 @@ func (suite *LibrariesBackupUnitSuite) TestUpdateCollections() {
|
||||
|
||||
var (
|
||||
paths = map[string]string{}
|
||||
newPaths = map[string]string{}
|
||||
currPaths = map[string]string{}
|
||||
excluded = map[string]struct{}{}
|
||||
itemColls = map[string]map[string]string{
|
||||
driveID: {},
|
||||
}
|
||||
collMap = map[string]map[string]*drive.Collection{
|
||||
collMap = map[string]map[string]*drive.Collection{
|
||||
driveID: {},
|
||||
}
|
||||
)
|
||||
@ -109,15 +106,14 @@ func (suite *LibrariesBackupUnitSuite) TestUpdateCollections() {
|
||||
|
||||
c.CollectionMap = collMap
|
||||
|
||||
err := c.UpdateCollections(
|
||||
_, err := c.UpdateCollections(
|
||||
ctx,
|
||||
driveID,
|
||||
"General",
|
||||
test.items,
|
||||
paths,
|
||||
newPaths,
|
||||
currPaths,
|
||||
excluded,
|
||||
itemColls,
|
||||
true,
|
||||
fault.New(true))
|
||||
|
||||
|
||||
@ -6,6 +6,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
|
||||
@ -128,7 +129,7 @@ type streamCollection struct {
|
||||
// folderPath indicates what level in the hierarchy this collection
|
||||
// represents
|
||||
folderPath path.Path
|
||||
item *streamItem
|
||||
item data.Item
|
||||
}
|
||||
|
||||
func (dc *streamCollection) FullPath() path.Path {
|
||||
@ -157,27 +158,6 @@ func (dc *streamCollection) Items(context.Context, *fault.Bus) <-chan data.Item
|
||||
return items
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// item
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type streamItem struct {
|
||||
name string
|
||||
data []byte
|
||||
}
|
||||
|
||||
func (di *streamItem) ID() string {
|
||||
return di.name
|
||||
}
|
||||
|
||||
func (di *streamItem) ToReader() io.ReadCloser {
|
||||
return io.NopCloser(bytes.NewReader(di.data))
|
||||
}
|
||||
|
||||
func (di *streamItem) Deleted() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// common reader/writer/deleter
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -204,10 +184,10 @@ func collect(
|
||||
|
||||
dc := streamCollection{
|
||||
folderPath: p,
|
||||
item: &streamItem{
|
||||
name: col.itemName,
|
||||
data: bs,
|
||||
},
|
||||
item: data.NewUnindexedPrefetchedItem(
|
||||
io.NopCloser(bytes.NewReader(bs)),
|
||||
col.itemName,
|
||||
time.Now()),
|
||||
}
|
||||
|
||||
return &dc, nil
|
||||
|
||||
@ -384,20 +384,20 @@ func (pec printableErrCore) Values() []string {
|
||||
// funcs, and the function that spawned the local bus should always
|
||||
// return `local.Failure()` to ensure that hard failures are propagated
|
||||
// back upstream.
|
||||
func (e *Bus) Local() *localBus {
|
||||
return &localBus{
|
||||
func (e *Bus) Local() *LocalBus {
|
||||
return &LocalBus{
|
||||
mu: &sync.Mutex{},
|
||||
bus: e,
|
||||
}
|
||||
}
|
||||
|
||||
type localBus struct {
|
||||
type LocalBus struct {
|
||||
mu *sync.Mutex
|
||||
bus *Bus
|
||||
current error
|
||||
}
|
||||
|
||||
func (e *localBus) AddRecoverable(ctx context.Context, err error) {
|
||||
func (e *LocalBus) AddRecoverable(ctx context.Context, err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
@ -422,7 +422,7 @@ func (e *localBus) AddRecoverable(ctx context.Context, err error) {
|
||||
// 2. Skipping avoids a permanent and consistent failure. If
|
||||
// the underlying reason is transient or otherwise recoverable,
|
||||
// the item should not be skipped.
|
||||
func (e *localBus) AddSkip(ctx context.Context, s *Skipped) {
|
||||
func (e *LocalBus) AddSkip(ctx context.Context, s *Skipped) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
@ -437,7 +437,7 @@ func (e *localBus) AddSkip(ctx context.Context, s *Skipped) {
|
||||
// It does not return the underlying bus.Failure(), only the failure
|
||||
// that was recorded within the local bus instance. This error should
|
||||
// get returned by any func which created a local bus.
|
||||
func (e *localBus) Failure() error {
|
||||
func (e *LocalBus) Failure() error {
|
||||
return e.current
|
||||
}
|
||||
|
||||
|
||||
@ -187,6 +187,8 @@ func (r *repository) Initialize(
|
||||
}
|
||||
}()
|
||||
|
||||
observe.Message(ctx, "Initializing repository")
|
||||
|
||||
kopiaRef := kopia.NewConn(r.Storage)
|
||||
if err := kopiaRef.Initialize(ctx, r.Opts.Repo, retentionOpts); err != nil {
|
||||
// replace common internal errors so that sdk users can check results with errors.Is()
|
||||
@ -237,8 +239,7 @@ func (r *repository) Connect(ctx context.Context) (err error) {
|
||||
}
|
||||
}()
|
||||
|
||||
progressBar := observe.MessageWithCompletion(ctx, "Connecting to repository")
|
||||
defer close(progressBar)
|
||||
observe.Message(ctx, "Connecting to repository")
|
||||
|
||||
kopiaRef := kopia.NewConn(r.Storage)
|
||||
if err := kopiaRef.Connect(ctx, r.Opts.Repo); err != nil {
|
||||
|
||||
@ -697,7 +697,7 @@ func (s ExchangeScope) IncludesCategory(cat exchangeCategory) bool {
|
||||
// returns true if the category is included in the scope's data type,
|
||||
// and the value is set to Any().
|
||||
func (s ExchangeScope) IsAny(cat exchangeCategory) bool {
|
||||
return isAnyTarget(s, cat)
|
||||
return IsAnyTarget(s, cat)
|
||||
}
|
||||
|
||||
// Get returns the data category in the scope. If the scope
|
||||
|
||||
@ -699,7 +699,7 @@ func (s GroupsScope) IncludesCategory(cat groupsCategory) bool {
|
||||
// returns true if the category is included in the scope's data type,
|
||||
// and the value is set to Any().
|
||||
func (s GroupsScope) IsAny(cat groupsCategory) bool {
|
||||
return isAnyTarget(s, cat)
|
||||
return IsAnyTarget(s, cat)
|
||||
}
|
||||
|
||||
// Get returns the data category in the scope. If the scope
|
||||
|
||||
@ -484,7 +484,7 @@ func (s OneDriveScope) Matches(cat oneDriveCategory, target string) bool {
|
||||
// returns true if the category is included in the scope's data type,
|
||||
// and the value is set to Any().
|
||||
func (s OneDriveScope) IsAny(cat oneDriveCategory) bool {
|
||||
return isAnyTarget(s, cat)
|
||||
return IsAnyTarget(s, cat)
|
||||
}
|
||||
|
||||
// Get returns the data category in the scope. If the scope
|
||||
|
||||
@ -694,7 +694,7 @@ func matchesPathValues[T scopeT, C categoryT](
|
||||
return false
|
||||
}
|
||||
|
||||
if isAnyTarget(sc, cc) {
|
||||
if IsAnyTarget(sc, cc) {
|
||||
// continue, not return: all path keys must match the entry to succeed
|
||||
continue
|
||||
}
|
||||
@ -795,7 +795,7 @@ func isNoneTarget[T scopeT, C categoryT](s T, cat C) bool {
|
||||
|
||||
// returns true if the category is included in the scope's category type,
|
||||
// and the value is set to Any().
|
||||
func isAnyTarget[T scopeT, C categoryT](s T, cat C) bool {
|
||||
func IsAnyTarget[T scopeT, C categoryT](s T, cat C) bool {
|
||||
if !typeAndCategoryMatches(cat, s.categorizer()) {
|
||||
return false
|
||||
}
|
||||
|
||||
@ -125,14 +125,14 @@ func (suite *SelectorScopesSuite) TestGetCatValue() {
|
||||
func (suite *SelectorScopesSuite) TestIsAnyTarget() {
|
||||
t := suite.T()
|
||||
stub := stubScope("")
|
||||
assert.True(t, isAnyTarget(stub, rootCatStub))
|
||||
assert.True(t, isAnyTarget(stub, leafCatStub))
|
||||
assert.False(t, isAnyTarget(stub, mockCategorizer("smarf")))
|
||||
assert.True(t, IsAnyTarget(stub, rootCatStub))
|
||||
assert.True(t, IsAnyTarget(stub, leafCatStub))
|
||||
assert.False(t, IsAnyTarget(stub, mockCategorizer("smarf")))
|
||||
|
||||
stub = stubScope("none")
|
||||
assert.False(t, isAnyTarget(stub, rootCatStub))
|
||||
assert.False(t, isAnyTarget(stub, leafCatStub))
|
||||
assert.False(t, isAnyTarget(stub, mockCategorizer("smarf")))
|
||||
assert.False(t, IsAnyTarget(stub, rootCatStub))
|
||||
assert.False(t, IsAnyTarget(stub, leafCatStub))
|
||||
assert.False(t, IsAnyTarget(stub, mockCategorizer("smarf")))
|
||||
}
|
||||
|
||||
var reduceTestTable = []struct {
|
||||
|
||||
@ -625,7 +625,7 @@ func (s SharePointScope) IncludesCategory(cat sharePointCategory) bool {
|
||||
// returns true if the category is included in the scope's data type,
|
||||
// and the value is set to Any().
|
||||
func (s SharePointScope) IsAny(cat sharePointCategory) bool {
|
||||
return isAnyTarget(s, cat)
|
||||
return IsAnyTarget(s, cat)
|
||||
}
|
||||
|
||||
// Get returns the data category in the scope. If the scope
|
||||
|
||||
@ -24,7 +24,7 @@ import (
|
||||
type Client struct {
|
||||
Credentials account.M365Config
|
||||
|
||||
// The Stable service is re-usable for any non-paged request.
|
||||
// The Stable service is re-usable for any request.
|
||||
// This allows us to maintain performance across async requests.
|
||||
Stable graph.Servicer
|
||||
|
||||
@ -126,3 +126,15 @@ func (c Client) Get(
|
||||
type CallConfig struct {
|
||||
Expand []string
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// common interfaces
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type GetByIDer[T any] interface {
|
||||
GetByID(
|
||||
ctx context.Context,
|
||||
identifier string,
|
||||
cc CallConfig,
|
||||
) (T, error)
|
||||
}
|
||||
|
||||
@ -101,7 +101,7 @@ func idAnd(ss ...string) []string {
|
||||
// exported
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func DriveItemSelectDefault() []string {
|
||||
func DefaultDriveItemProps() []string {
|
||||
return idAnd(
|
||||
"content.downloadUrl",
|
||||
"createdBy",
|
||||
|
||||
@ -1,11 +0,0 @@
|
||||
package api
|
||||
|
||||
// DeltaUpdate holds the results of a current delta token. It normally
|
||||
// gets produced when aggregating the addition and removal of items in
|
||||
// a delta-queryable folder.
|
||||
type DeltaUpdate struct {
|
||||
// the deltaLink itself
|
||||
URL string
|
||||
// true if the old delta was marked as invalid
|
||||
Reset bool
|
||||
}
|
||||
@ -84,6 +84,26 @@ func (c Drives) GetRootFolder(
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// TODO: pagination controller needed for completion.
|
||||
func (c Drives) GetFolderChildren(
|
||||
ctx context.Context,
|
||||
driveID, folderID string,
|
||||
) ([]models.DriveItemable, error) {
|
||||
response, err := c.Stable.
|
||||
Client().
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().
|
||||
ByDriveItemId(folderID).
|
||||
Children().
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, graph.Wrap(ctx, err, "getting folder children")
|
||||
}
|
||||
|
||||
return response.GetValue(), nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Items
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -331,6 +351,10 @@ func (c Drives) PostItemLinkShareUpdate(
|
||||
return itm, nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// helper funcs
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// DriveItemCollisionKeyy constructs a key from the item name.
|
||||
// collision keys are used to identify duplicate item conflicts for handling advanced restoration config.
|
||||
func DriveItemCollisionKey(item models.DriveItemable) string {
|
||||
@ -340,3 +364,17 @@ func DriveItemCollisionKey(item models.DriveItemable) string {
|
||||
|
||||
return ptr.Val(item.GetName())
|
||||
}
|
||||
|
||||
// NewDriveItem initializes a `models.DriveItemable` with either a folder or file entry.
|
||||
func NewDriveItem(name string, folder bool) *models.DriveItem {
|
||||
itemToCreate := models.NewDriveItem()
|
||||
itemToCreate.SetName(&name)
|
||||
|
||||
if folder {
|
||||
itemToCreate.SetFolder(models.NewFolder())
|
||||
} else {
|
||||
itemToCreate.SetFile(models.NewFile())
|
||||
}
|
||||
|
||||
return itemToCreate
|
||||
}
|
||||
|
||||
@ -15,6 +15,11 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
)
|
||||
|
||||
type DriveItemIDType struct {
|
||||
ItemID string
|
||||
IsFolder bool
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// non-delta item pager
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -65,11 +70,6 @@ func (p *driveItemPageCtrl) ValidModTimes() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type DriveItemIDType struct {
|
||||
ItemID string
|
||||
IsFolder bool
|
||||
}
|
||||
|
||||
func (c Drives) GetItemsInContainerByCollisionKey(
|
||||
ctx context.Context,
|
||||
driveID, containerID string,
|
||||
@ -131,9 +131,9 @@ type DriveItemDeltaPageCtrl struct {
|
||||
options *drives.ItemItemsItemDeltaRequestBuilderGetRequestConfiguration
|
||||
}
|
||||
|
||||
func (c Drives) NewDriveItemDeltaPager(
|
||||
driveID, link string,
|
||||
selectFields []string,
|
||||
func (c Drives) newDriveItemDeltaPager(
|
||||
driveID, prevDeltaLink string,
|
||||
selectProps ...string,
|
||||
) *DriveItemDeltaPageCtrl {
|
||||
preferHeaderItems := []string{
|
||||
"deltashowremovedasdeleted",
|
||||
@ -142,28 +142,32 @@ func (c Drives) NewDriveItemDeltaPager(
|
||||
"hierarchicalsharing",
|
||||
}
|
||||
|
||||
requestConfig := &drives.ItemItemsItemDeltaRequestBuilderGetRequestConfiguration{
|
||||
Headers: newPreferHeaders(preferHeaderItems...),
|
||||
QueryParameters: &drives.ItemItemsItemDeltaRequestBuilderGetQueryParameters{
|
||||
Select: selectFields,
|
||||
},
|
||||
options := &drives.ItemItemsItemDeltaRequestBuilderGetRequestConfiguration{
|
||||
Headers: newPreferHeaders(preferHeaderItems...),
|
||||
QueryParameters: &drives.ItemItemsItemDeltaRequestBuilderGetQueryParameters{},
|
||||
}
|
||||
|
||||
if len(selectProps) > 0 {
|
||||
options.QueryParameters.Select = selectProps
|
||||
}
|
||||
|
||||
builder := c.Stable.
|
||||
Client().
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().
|
||||
ByDriveItemId(onedrive.RootID).
|
||||
Delta()
|
||||
|
||||
if len(prevDeltaLink) > 0 {
|
||||
builder = drives.NewItemItemsItemDeltaRequestBuilder(prevDeltaLink, c.Stable.Adapter())
|
||||
}
|
||||
|
||||
res := &DriveItemDeltaPageCtrl{
|
||||
gs: c.Stable,
|
||||
driveID: driveID,
|
||||
options: requestConfig,
|
||||
builder: c.Stable.
|
||||
Client().
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().
|
||||
ByDriveItemId(onedrive.RootID).
|
||||
Delta(),
|
||||
}
|
||||
|
||||
if len(link) > 0 {
|
||||
res.builder = drives.NewItemItemsItemDeltaRequestBuilder(link, c.Stable.Adapter())
|
||||
options: options,
|
||||
builder: builder,
|
||||
}
|
||||
|
||||
return res
|
||||
@ -193,6 +197,27 @@ func (p *DriveItemDeltaPageCtrl) ValidModTimes() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// EnumerateDriveItems will enumerate all items in the specified drive and hand them to the
|
||||
// provided `collector` method
|
||||
func (c Drives) EnumerateDriveItemsDelta(
|
||||
ctx context.Context,
|
||||
driveID string,
|
||||
prevDeltaLink string,
|
||||
) (
|
||||
[]models.DriveItemable,
|
||||
DeltaUpdate,
|
||||
error,
|
||||
) {
|
||||
pager := c.newDriveItemDeltaPager(driveID, prevDeltaLink, DefaultDriveItemProps()...)
|
||||
|
||||
items, du, err := deltaEnumerateItems[models.DriveItemable](ctx, pager, prevDeltaLink)
|
||||
if err != nil {
|
||||
return nil, du, clues.Stack(err)
|
||||
}
|
||||
|
||||
return items, du, nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// user's drives pager
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
@ -178,3 +178,18 @@ func (suite *DrivePagerIntgSuite) TestDrives_GetItemIDsInContainer() {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *DrivePagerIntgSuite) TestEnumerateDriveItems() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
items, du, err := suite.its.
|
||||
ac.
|
||||
Drives().
|
||||
EnumerateDriveItemsDelta(ctx, suite.its.user.driveID, "")
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
require.NotEmpty(t, items, "no items found in user's drive")
|
||||
assert.NotEmpty(t, du.URL, "should have a delta link")
|
||||
}
|
||||
|
||||
@ -17,6 +17,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/control/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
type DriveAPIIntgSuite struct {
|
||||
@ -50,20 +51,6 @@ func (suite *DriveAPIIntgSuite) TestDrives_CreatePagerAndGetPage() {
|
||||
assert.NotNil(t, a)
|
||||
}
|
||||
|
||||
// newItem initializes a `models.DriveItemable` that can be used as input to `createItem`
|
||||
func newItem(name string, folder bool) *models.DriveItem {
|
||||
itemToCreate := models.NewDriveItem()
|
||||
itemToCreate.SetName(&name)
|
||||
|
||||
if folder {
|
||||
itemToCreate.SetFolder(models.NewFolder())
|
||||
} else {
|
||||
itemToCreate.SetFile(models.NewFile())
|
||||
}
|
||||
|
||||
return itemToCreate
|
||||
}
|
||||
|
||||
func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer() {
|
||||
t := suite.T()
|
||||
|
||||
@ -78,12 +65,12 @@ func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer() {
|
||||
ctx,
|
||||
suite.its.user.driveID,
|
||||
suite.its.user.driveRootFolderID,
|
||||
newItem(rc.Location, true),
|
||||
api.NewDriveItem(rc.Location, true),
|
||||
control.Replace)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// generate a folder to use for collision testing
|
||||
folder := newItem("collision", true)
|
||||
folder := api.NewDriveItem("collision", true)
|
||||
origFolder, err := acd.PostItemInContainer(
|
||||
ctx,
|
||||
suite.its.user.driveID,
|
||||
@ -93,7 +80,7 @@ func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer() {
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// generate an item to use for collision testing
|
||||
file := newItem("collision.txt", false)
|
||||
file := api.NewDriveItem("collision.txt", false)
|
||||
origFile, err := acd.PostItemInContainer(
|
||||
ctx,
|
||||
suite.its.user.driveID,
|
||||
@ -241,7 +228,7 @@ func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer_replaceFolderRegr
|
||||
ctx,
|
||||
suite.its.user.driveID,
|
||||
suite.its.user.driveRootFolderID,
|
||||
newItem(rc.Location, true),
|
||||
api.NewDriveItem(rc.Location, true),
|
||||
// skip instead of replace here to get
|
||||
// an ErrItemAlreadyExistsConflict, just in case.
|
||||
control.Skip)
|
||||
@ -249,7 +236,7 @@ func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer_replaceFolderRegr
|
||||
|
||||
// generate items within that folder
|
||||
for i := 0; i < 5; i++ {
|
||||
file := newItem(fmt.Sprintf("collision_%d.txt", i), false)
|
||||
file := api.NewDriveItem(fmt.Sprintf("collision_%d.txt", i), false)
|
||||
f, err := acd.PostItemInContainer(
|
||||
ctx,
|
||||
suite.its.user.driveID,
|
||||
@ -265,7 +252,7 @@ func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer_replaceFolderRegr
|
||||
ctx,
|
||||
suite.its.user.driveID,
|
||||
ptr.Val(folder.GetParentReference().GetId()),
|
||||
newItem(rc.Location, true),
|
||||
api.NewDriveItem(rc.Location, true),
|
||||
control.Replace)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
require.NotEmpty(t, ptr.Val(resultFolder.GetId()))
|
||||
|
||||
@ -102,6 +102,7 @@ const filterGroupByDisplayNameQueryTmpl = "displayName eq '%s'"
|
||||
func (c Groups) GetByID(
|
||||
ctx context.Context,
|
||||
identifier string,
|
||||
_ CallConfig, // matching standards
|
||||
) (models.Groupable, error) {
|
||||
service, err := c.Service()
|
||||
if err != nil {
|
||||
@ -234,9 +235,9 @@ func IsTeam(ctx context.Context, mg models.Groupable) bool {
|
||||
func (c Groups) GetIDAndName(
|
||||
ctx context.Context,
|
||||
groupID string,
|
||||
_ CallConfig, // not currently supported
|
||||
cc CallConfig,
|
||||
) (string, string, error) {
|
||||
s, err := c.GetByID(ctx, groupID)
|
||||
s, err := c.GetByID(ctx, groupID, cc)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
@ -121,7 +121,7 @@ func (suite *GroupsIntgSuite) TestGroups_GetByID() {
|
||||
groupsAPI = suite.its.ac.Groups()
|
||||
)
|
||||
|
||||
grp, err := groupsAPI.GetByID(ctx, groupID)
|
||||
grp, err := groupsAPI.GetByID(ctx, groupID, api.CallConfig{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
table := []struct {
|
||||
@ -157,7 +157,7 @@ func (suite *GroupsIntgSuite) TestGroups_GetByID() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
_, err := groupsAPI.GetByID(ctx, test.id)
|
||||
_, err := groupsAPI.GetByID(ctx, test.id, api.CallConfig{})
|
||||
test.expectErr(t, err, clues.ToCore(err))
|
||||
})
|
||||
}
|
||||
|
||||
@ -13,6 +13,20 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// common structs
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// DeltaUpdate holds the results of a current delta token. It normally
|
||||
// gets produced when aggregating the addition and removal of items in
|
||||
// a delta-queryable folder.
|
||||
type DeltaUpdate struct {
|
||||
// the deltaLink itself
|
||||
URL string
|
||||
// true if the old delta was marked as invalid
|
||||
Reset bool
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// common interfaces
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
@ -223,6 +223,26 @@ func (c Mail) PatchFolder(
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: needs pager implementation for completion
|
||||
func (c Mail) GetContainerChildren(
|
||||
ctx context.Context,
|
||||
userID, containerID string,
|
||||
) ([]models.MailFolderable, error) {
|
||||
resp, err := c.Stable.
|
||||
Client().
|
||||
Users().
|
||||
ByUserId(userID).
|
||||
MailFolders().
|
||||
ByMailFolderId(containerID).
|
||||
ChildFolders().
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, graph.Wrap(ctx, err, "getting container child folders")
|
||||
}
|
||||
|
||||
return resp.GetValue(), nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// items
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
@ -32,10 +32,11 @@ func (dnl *DeltaNextLinkValues[T]) GetOdataDeltaLink() *string {
|
||||
}
|
||||
|
||||
type PagerResult[T any] struct {
|
||||
Values []T
|
||||
NextLink *string
|
||||
DeltaLink *string
|
||||
Err error
|
||||
Values []T
|
||||
NextLink *string
|
||||
DeltaLink *string
|
||||
ResetDelta bool
|
||||
Err error
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
@ -28,6 +28,27 @@ type Group struct {
|
||||
IsTeam bool
|
||||
}
|
||||
|
||||
// GroupByID retrieves a specific group.
|
||||
func GroupByID(
|
||||
ctx context.Context,
|
||||
acct account.Account,
|
||||
id string,
|
||||
) (*Group, error) {
|
||||
ac, err := makeAC(ctx, acct, path.GroupsService)
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
cc := api.CallConfig{}
|
||||
|
||||
g, err := ac.Groups().GetByID(ctx, id, cc)
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err)
|
||||
}
|
||||
|
||||
return parseGroup(ctx, g)
|
||||
}
|
||||
|
||||
// GroupsCompat returns a list of groups in the specified M365 tenant.
|
||||
func GroupsCompat(ctx context.Context, acct account.Account) ([]*Group, error) {
|
||||
errs := fault.New(true)
|
||||
|
||||
@ -41,6 +41,24 @@ func (suite *GroupsIntgSuite) SetupSuite() {
|
||||
suite.acct = tconfig.NewM365Account(t)
|
||||
}
|
||||
|
||||
func (suite *GroupsIntgSuite) TestGroupByID() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
graph.InitializeConcurrencyLimiter(ctx, true, 4)
|
||||
|
||||
gid := tconfig.M365TeamID(t)
|
||||
|
||||
group, err := m365.GroupByID(ctx, suite.acct, gid)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
require.NotNil(t, group)
|
||||
|
||||
assert.Equal(t, gid, group.ID, "must match expected id")
|
||||
assert.NotEmpty(t, group.DisplayName)
|
||||
}
|
||||
|
||||
func (suite *GroupsIntgSuite) TestGroups() {
|
||||
t := suite.T()
|
||||
|
||||
|
||||
@ -7,16 +7,17 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/credentials"
|
||||
"github.com/alcionai/corso/src/pkg/storage"
|
||||
)
|
||||
|
||||
type CommonCfgSuite struct {
|
||||
suite.Suite
|
||||
type CommonCfgUnitSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestCommonCfgSuite(t *testing.T) {
|
||||
suite.Run(t, new(CommonCfgSuite))
|
||||
func TestCommonCfgUnitSuite(t *testing.T) {
|
||||
suite.Run(t, &CommonCfgUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
var goodCommonConfig = storage.CommonConfig{
|
||||
@ -25,7 +26,7 @@ var goodCommonConfig = storage.CommonConfig{
|
||||
},
|
||||
}
|
||||
|
||||
func (suite *CommonCfgSuite) TestCommonConfig_Config() {
|
||||
func (suite *CommonCfgUnitSuite) TestCommonConfig_Config() {
|
||||
cfg := goodCommonConfig
|
||||
c, err := cfg.StringConfig()
|
||||
assert.NoError(suite.T(), err, clues.ToCore(err))
|
||||
@ -43,7 +44,7 @@ func (suite *CommonCfgSuite) TestCommonConfig_Config() {
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *CommonCfgSuite) TestStorage_CommonConfig() {
|
||||
func (suite *CommonCfgUnitSuite) TestStorage_CommonConfig() {
|
||||
t := suite.T()
|
||||
|
||||
in := goodCommonConfig
|
||||
@ -55,7 +56,7 @@ func (suite *CommonCfgSuite) TestStorage_CommonConfig() {
|
||||
assert.Equal(t, in.CorsoPassphrase, out.CorsoPassphrase)
|
||||
}
|
||||
|
||||
func (suite *CommonCfgSuite) TestStorage_CommonConfig_InvalidCases() {
|
||||
func (suite *CommonCfgUnitSuite) TestStorage_CommonConfig_InvalidCases() {
|
||||
// missing required properties
|
||||
table := []struct {
|
||||
name string
|
||||
|
||||
@ -20,6 +20,10 @@ type FilesystemConfig struct {
|
||||
Path string
|
||||
}
|
||||
|
||||
func (s Storage) ToFilesystemConfig() (*FilesystemConfig, error) {
|
||||
return buildFilesystemConfigFromMap(s.Config)
|
||||
}
|
||||
|
||||
func buildFilesystemConfigFromMap(config map[string]string) (*FilesystemConfig, error) {
|
||||
c := &FilesystemConfig{}
|
||||
|
||||
@ -69,7 +73,7 @@ func (c *FilesystemConfig) ApplyConfigOverrides(
|
||||
if matchFromConfig {
|
||||
providerType := cast.ToString(g.Get(StorageProviderTypeKey))
|
||||
if providerType != ProviderFilesystem.String() {
|
||||
return clues.New("unsupported storage provider in config file: " + providerType)
|
||||
return clues.New("unsupported storage provider in config file: [" + providerType + "]")
|
||||
}
|
||||
|
||||
// This is matching override values from config file.
|
||||
|
||||
@ -62,6 +62,28 @@ var s3constToTomlKeyMap = map[string]string{
|
||||
StorageProviderTypeKey: StorageProviderTypeKey,
|
||||
}
|
||||
|
||||
func (s Storage) ToS3Config() (*S3Config, error) {
|
||||
return buildS3ConfigFromMap(s.Config)
|
||||
}
|
||||
|
||||
func buildS3ConfigFromMap(config map[string]string) (*S3Config, error) {
|
||||
c := &S3Config{}
|
||||
|
||||
if len(config) > 0 {
|
||||
c.AccessKey = orEmptyString(config[keyS3AccessKey])
|
||||
c.SecretKey = orEmptyString(config[keyS3SecretKey])
|
||||
c.SessionToken = orEmptyString(config[keyS3SessionToken])
|
||||
|
||||
c.Bucket = orEmptyString(config[keyS3Bucket])
|
||||
c.Endpoint = orEmptyString(config[keyS3Endpoint])
|
||||
c.Prefix = orEmptyString(config[keyS3Prefix])
|
||||
c.DoNotUseTLS = str.ParseBool(config[keyS3DoNotUseTLS])
|
||||
c.DoNotVerifyTLS = str.ParseBool(config[keyS3DoNotVerifyTLS])
|
||||
}
|
||||
|
||||
return c, c.validate()
|
||||
}
|
||||
|
||||
func (c *S3Config) normalize() S3Config {
|
||||
return S3Config{
|
||||
Bucket: common.NormalizeBucket(c.Bucket),
|
||||
@ -91,24 +113,6 @@ func (c *S3Config) StringConfig() (map[string]string, error) {
|
||||
return cfg, cn.validate()
|
||||
}
|
||||
|
||||
func buildS3ConfigFromMap(config map[string]string) (*S3Config, error) {
|
||||
c := &S3Config{}
|
||||
|
||||
if len(config) > 0 {
|
||||
c.AccessKey = orEmptyString(config[keyS3AccessKey])
|
||||
c.SecretKey = orEmptyString(config[keyS3SecretKey])
|
||||
c.SessionToken = orEmptyString(config[keyS3SessionToken])
|
||||
|
||||
c.Bucket = orEmptyString(config[keyS3Bucket])
|
||||
c.Endpoint = orEmptyString(config[keyS3Endpoint])
|
||||
c.Prefix = orEmptyString(config[keyS3Prefix])
|
||||
c.DoNotUseTLS = str.ParseBool(config[keyS3DoNotUseTLS])
|
||||
c.DoNotVerifyTLS = str.ParseBool(config[keyS3DoNotVerifyTLS])
|
||||
}
|
||||
|
||||
return c, c.validate()
|
||||
}
|
||||
|
||||
func (c S3Config) validate() error {
|
||||
check := map[string]string{
|
||||
Bucket: c.Bucket,
|
||||
@ -169,11 +173,11 @@ func (c *S3Config) ApplyConfigOverrides(
|
||||
if matchFromConfig {
|
||||
providerType := cast.ToString(kvg.Get(StorageProviderTypeKey))
|
||||
if providerType != ProviderS3.String() {
|
||||
return clues.New("unsupported storage provider: " + providerType)
|
||||
return clues.New("unsupported storage provider: [" + providerType + "]")
|
||||
}
|
||||
|
||||
if err := mustMatchConfig(kvg, s3constToTomlKeyMap, s3Overrides(overrides)); err != nil {
|
||||
return clues.Wrap(err, "verifying s3 configs in corso config file")
|
||||
return clues.Stack(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -8,15 +8,16 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/credentials"
|
||||
)
|
||||
|
||||
type S3CfgSuite struct {
|
||||
suite.Suite
|
||||
type S3CfgUnitSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestS3CfgSuite(t *testing.T) {
|
||||
suite.Run(t, new(S3CfgSuite))
|
||||
func TestS3CfgUnitSuite(t *testing.T) {
|
||||
suite.Run(t, &S3CfgUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
var (
|
||||
@ -41,7 +42,7 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
func (suite *S3CfgSuite) TestS3Config_Config() {
|
||||
func (suite *S3CfgUnitSuite) TestS3Config_Config() {
|
||||
s3 := goodS3Config
|
||||
|
||||
c, err := s3.StringConfig()
|
||||
@ -60,16 +61,16 @@ func (suite *S3CfgSuite) TestS3Config_Config() {
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *S3CfgSuite) TestStorage_S3Config() {
|
||||
func (suite *S3CfgUnitSuite) TestStorage_S3Config() {
|
||||
t := suite.T()
|
||||
|
||||
in := goodS3Config
|
||||
|
||||
s, err := NewStorage(ProviderS3, &in)
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
sc, err := s.StorageConfig()
|
||||
|
||||
out, err := s.ToS3Config()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
out := sc.(*S3Config)
|
||||
assert.Equal(t, in.Bucket, out.Bucket)
|
||||
assert.Equal(t, in.Endpoint, out.Endpoint)
|
||||
assert.Equal(t, in.Prefix, out.Prefix)
|
||||
@ -84,7 +85,7 @@ func makeTestS3Cfg(bkt, end, pre, access, secret, session string) S3Config {
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *S3CfgSuite) TestStorage_S3Config_invalidCases() {
|
||||
func (suite *S3CfgUnitSuite) TestStorage_S3Config_invalidCases() {
|
||||
// missing required properties
|
||||
table := []struct {
|
||||
name string
|
||||
@ -118,13 +119,14 @@ func (suite *S3CfgSuite) TestStorage_S3Config_invalidCases() {
|
||||
st, err := NewStorage(ProviderUnknown, &goodS3Config)
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
test.amend(st)
|
||||
_, err = st.StorageConfig()
|
||||
assert.Error(t, err)
|
||||
|
||||
_, err = st.ToS3Config()
|
||||
assert.Error(t, err, clues.ToCore(err))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *S3CfgSuite) TestStorage_S3Config_StringConfig() {
|
||||
func (suite *S3CfgUnitSuite) TestStorage_S3Config_StringConfig() {
|
||||
table := []struct {
|
||||
name string
|
||||
input S3Config
|
||||
@ -178,7 +180,7 @@ func (suite *S3CfgSuite) TestStorage_S3Config_StringConfig() {
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *S3CfgSuite) TestStorage_S3Config_Normalize() {
|
||||
func (suite *S3CfgUnitSuite) TestStorage_S3Config_Normalize() {
|
||||
const (
|
||||
prefixedBkt = "s3://bkt"
|
||||
normalBkt = "bkt"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user