Merge branch 'main' into updateKopiaPassword

This commit is contained in:
neha_gupta 2023-10-04 12:45:19 +05:30 committed by GitHub
commit 23cbb7ca33
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
137 changed files with 4888 additions and 3355 deletions

View File

@ -45,6 +45,9 @@ runs:
shell: bash
working-directory: src
run: |
echo "---------------------------"
echo Backup ${{ inputs.service }} ${{ inputs.kind }}
echo "---------------------------"
set -euo pipefail
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-backup-${{ inputs.service }}-${{inputs.kind }}.log
./corso backup create '${{ inputs.service }}' \
@ -61,6 +64,9 @@ runs:
shell: bash
working-directory: src
run: |
echo "---------------------------"
echo Restore ${{ inputs.service }} ${{ inputs.kind }}
echo "---------------------------"
set -euo pipefail
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-restore-${{ inputs.service }}-${{inputs.kind }}.log
./corso restore '${{ inputs.service }}' \
@ -85,11 +91,14 @@ runs:
SANITY_TEST_KIND: restore
SANITY_TEST_FOLDER: ${{ steps.restore.outputs.result }}
SANITY_TEST_SERVICE: ${{ inputs.service }}
TEST_DATA: ${{ inputs.test-folder }}
BASE_BACKUP: ${{ inputs.base-backup }}
SANITY_TEST_DATA: ${{ inputs.test-folder }}
SANITY_BASE_BACKUP: ${{ inputs.base-backup }}
run: |
echo "---------------------------"
echo Sanity Test Restore ${{ inputs.service }} ${{ inputs.kind }}
echo "---------------------------"
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-validate-${{ inputs.service }}-${{inputs.kind }}.log
./sanity-test
./sanity-test restore ${{ inputs.service }}
- name: Export ${{ inputs.service }} ${{ inputs.kind }}
if: inputs.with-export == true
@ -97,6 +106,9 @@ runs:
shell: bash
working-directory: src
run: |
echo "---------------------------"
echo Export ${{ inputs.service }} ${{ inputs.kind }}
echo "---------------------------"
set -euo pipefail
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-restore-${{ inputs.service }}-${{inputs.kind }}.log
./corso export '${{ inputs.service }}' \
@ -116,11 +128,14 @@ runs:
SANITY_TEST_KIND: export
SANITY_TEST_FOLDER: /tmp/export-${{ inputs.service }}-${{inputs.kind }}
SANITY_TEST_SERVICE: ${{ inputs.service }}
TEST_DATA: ${{ inputs.test-folder }}
BASE_BACKUP: ${{ inputs.base-backup }}
SANITY_TEST_DATA: ${{ inputs.test-folder }}
SANITY_BASE_BACKUP: ${{ inputs.base-backup }}
run: |
echo "---------------------------"
echo Sanity-Test Export ${{ inputs.service }} ${{ inputs.kind }}
echo "---------------------------"
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-validate-${{ inputs.service }}-${{inputs.kind }}.log
./sanity-test
./sanity-test export ${{ inputs.service }}
- name: Export archive ${{ inputs.service }} ${{ inputs.kind }}
if: inputs.with-export == true
@ -128,6 +143,9 @@ runs:
shell: bash
working-directory: src
run: |
echo "---------------------------"
echo Export Archive ${{ inputs.service }} ${{ inputs.kind }}
echo "---------------------------"
set -euo pipefail
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-restore-${{ inputs.service }}-${{inputs.kind }}.log
./corso export '${{ inputs.service }}' \
@ -150,16 +168,22 @@ runs:
SANITY_TEST_KIND: export
SANITY_TEST_FOLDER: /tmp/export-${{ inputs.service }}-${{inputs.kind }}-unzipped
SANITY_TEST_SERVICE: ${{ inputs.service }}
TEST_DATA: ${{ inputs.test-folder }}
BASE_BACKUP: ${{ inputs.base-backup }}
SANITY_TEST_DATA: ${{ inputs.test-folder }}
SANITY_BASE_BACKUP: ${{ inputs.base-backup }}
run: |
echo "---------------------------"
echo Sanity-Test Export Archive ${{ inputs.service }} ${{ inputs.kind }}
echo "---------------------------"
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-validate-${{ inputs.service }}-${{inputs.kind }}.log
./sanity-test
./sanity-test export ${{ inputs.service }}
- name: List ${{ inputs.service }} ${{ inputs.kind }}
shell: bash
working-directory: src
run: |
echo "---------------------------"
echo Backup list ${{ inputs.service }} ${{ inputs.kind }}
echo "---------------------------"
set -euo pipefail
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-backup-list-${{ inputs.service }}-${{inputs.kind }}.log
./corso backup list ${{ inputs.service }} \
@ -178,6 +202,9 @@ runs:
shell: bash
working-directory: src
run: |
echo "---------------------------"
echo Backup List w/ Backup ${{ inputs.service }} ${{ inputs.kind }}
echo "---------------------------"
set -euo pipefail
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-backup-list-single-${{ inputs.service }}-${{inputs.kind }}.log
./corso backup list ${{ inputs.service }} \
@ -193,7 +220,13 @@ runs:
exit 1
fi
# Upload the original go test output as an artifact for later review.
- if: always()
shell: bash
run: |
echo "---------------------------"
echo Logging Results
echo "---------------------------"
- name: Upload test log
if: always()
uses: actions/upload-artifact@v3

View File

@ -31,7 +31,7 @@ runs:
- name: use url or blank val
shell: bash
run: |
echo "STEP=${{ github.action || '' }}" >> $GITHUB_ENV
echo "STEP=${{ env.trimmed_ref || '' }}" >> $GITHUB_ENV
echo "JOB=${{ github.job || '' }}" >> $GITHUB_ENV
echo "LOGS=${{ github.run_id && env.logurl || '-' }}" >> $GITHUB_ENV
echo "COMMIT=${{ github.sha && env.commiturl || '-' }}" >> $GITHUB_ENV
@ -51,7 +51,7 @@ runs:
"type": "section",
"text": {
"type": "mrkdwn",
"text": "${{ inputs.msg }} :: ${{ env.JOB }} - ${{ env.STEP }}\n${{ env.LOGS }} ${{ env.COMMIT }} ${{ env.REF }}"
"text": "${{ inputs.msg }}\n${{ env.JOB }} :: ${{ env.STEP }}\n${{ env.LOGS }} ${{ env.COMMIT }} ${{ env.REF }}"
}
}
]

View File

@ -181,7 +181,7 @@ jobs:
uses: ./.github/actions/backup-restore-test
with:
service: exchange
kind: initial
kind: first-backup
backup-args: '--mailbox "${{ env.TEST_USER }}" --data "email"'
restore-args: '--email-folder ${{ env.RESTORE_DEST_PFX }}${{ steps.repo-init.outputs.result }}'
test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.repo-init.outputs.result }}'
@ -249,7 +249,7 @@ jobs:
uses: ./.github/actions/backup-restore-test
with:
service: onedrive
kind: initial
kind: first-backup
backup-args: '--user "${{ env.TEST_USER }}"'
restore-args: '--folder ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-onedrive.outputs.result }}'
test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-onedrive.outputs.result }}'
@ -305,7 +305,7 @@ jobs:
uses: ./.github/actions/backup-restore-test
with:
service: sharepoint
kind: initial
kind: first-backup
backup-args: '--site "${{ secrets.CORSO_M365_TEST_SITE_URL }}"'
restore-args: '--folder ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-sharepoint.outputs.result }}'
test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-sharepoint.outputs.result }}'
@ -362,12 +362,34 @@ jobs:
uses: ./.github/actions/backup-restore-test
with:
service: groups
kind: initial
kind: first-backup
backup-args: '--group "${{ vars.CORSO_M365_TEST_TEAM_ID }}"'
test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }}'
log-dir: ${{ env.CORSO_LOG_DIR }}
# TODO: incrementals
# generate some more enteries for incremental check
# - name: Groups - Create new data (for incremental)
# working-directory: ./src/cmd/factory
# run: |
# go run . sharepoint files \
# --site ${{ secrets.CORSO_M365_TEST_GROUPS_SITE_URL }} \
# --user ${{ env.TEST_USER }} \
# --secondaryuser ${{ env.CORSO_SECONDARY_M365_TEST_USER_ID }} \
# --tenant ${{ secrets.TENANT_ID }} \
# --destination ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }} \
# --count 4
# - name: Groups - Incremental backup
# id: groups-incremental
# uses: ./.github/actions/backup-restore-test
# with:
# service: groups
# kind: incremental
# backup-args: '--site "${{ secrets.CORSO_M365_TEST_GROUPS_SITE_URL }}"'
# restore-args: '--folder ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }}'
# test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }}'
# log-dir: ${{ env.CORSO_LOG_DIR }}
# with-export: true
##########################################################################################################################################

View File

@ -12,6 +12,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Reduce backup runtime for OneDrive and SharePoint incremental backups that have no file changes.
- Increase Exchange backup performance by lazily fetching data only for items whose content changed.
- Added `--backups` flag to delete multiple backups in `corso backup delete` command.
- Backup now includes all sites that belongs to a team, not just the root site.
## Fixed
- Teams Channels that cannot support delta tokens (those without messages) fall back to non-delta enumeration and no longer fail a backup.

View File

@ -16,6 +16,8 @@ import (
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/m365/graph"
"github.com/alcionai/corso/src/pkg/backup"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/logger"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/repository"
@ -48,12 +50,12 @@ func AddCommands(cmd *cobra.Command) {
for _, sc := range subCommandFuncs {
subCommand := sc()
flags.AddAllProviderFlags(subCommand)
flags.AddAllStorageFlags(subCommand)
backupC.AddCommand(subCommand)
for _, addBackupTo := range serviceCommands {
addBackupTo(subCommand)
sc := addBackupTo(subCommand)
flags.AddAllProviderFlags(sc)
flags.AddAllStorageFlags(sc)
}
}
}
@ -163,7 +165,7 @@ func handleDeleteCmd(cmd *cobra.Command, args []string) error {
// standard set of selector behavior that we want used in the cli
var defaultSelectorConfig = selectors.Config{OnlyMatchItemNames: true}
func runBackups(
func genericCreateCommand(
ctx context.Context,
r repository.Repositoryer,
serviceName string,
@ -332,6 +334,65 @@ func genericListCommand(
return nil
}
func genericDetailsCommand(
cmd *cobra.Command,
backupID string,
sel selectors.Selector,
) (*details.Details, error) {
ctx := cmd.Context()
r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.OneDriveService)
if err != nil {
return nil, clues.Stack(err)
}
defer utils.CloseRepo(ctx, r)
return genericDetailsCore(
ctx,
r,
backupID,
sel,
rdao.Opts)
}
func genericDetailsCore(
ctx context.Context,
bg repository.BackupGetter,
backupID string,
sel selectors.Selector,
opts control.Options,
) (*details.Details, error) {
ctx = clues.Add(ctx, "backup_id", backupID)
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
d, _, errs := bg.GetBackupDetails(ctx, backupID)
// TODO: log/track recoverable errors
if errs.Failure() != nil {
if errors.Is(errs.Failure(), data.ErrNotFound) {
return nil, clues.New("no backup exists with the id " + backupID)
}
return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository")
}
if opts.SkipReduce {
return d, nil
}
d, err := sel.Reduce(ctx, d, errs)
if err != nil {
return nil, clues.Wrap(err, "filtering backup details to selection")
}
return d, nil
}
// ---------------------------------------------------------------------------
// helper funcs
// ---------------------------------------------------------------------------
func ifShow(flag string) bool {
return strings.ToLower(strings.TrimSpace(flag)) == "show"
}

View File

@ -0,0 +1,68 @@
package backup
import (
"testing"
"github.com/alcionai/clues"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/cli/utils/testdata"
"github.com/alcionai/corso/src/internal/tester"
dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/selectors"
)
type BackupUnitSuite struct {
tester.Suite
}
func TestBackupUnitSuite(t *testing.T) {
suite.Run(t, &BackupUnitSuite{Suite: tester.NewUnitSuite(t)})
}
func (suite *BackupUnitSuite) TestGenericDetailsCore() {
t := suite.T()
expected := append(
append(
dtd.GetItemsForVersion(
t,
path.ExchangeService,
path.EmailCategory,
0,
-1),
dtd.GetItemsForVersion(
t,
path.ExchangeService,
path.EventsCategory,
0,
-1)...),
dtd.GetItemsForVersion(
t,
path.ExchangeService,
path.ContactsCategory,
0,
-1)...)
ctx, flush := tester.NewContext(t)
defer flush()
bg := testdata.VersionedBackupGetter{
Details: dtd.GetDetailsSetForVersion(t, 0),
}
sel := selectors.NewExchangeBackup([]string{"user-id"})
sel.Include(sel.AllData())
output, err := genericDetailsCore(
ctx,
bg,
"backup-ID",
sel.Selector,
control.DefaultOptions())
assert.NoError(t, err, clues.ToCore(err))
assert.ElementsMatch(t, expected, output.Entries)
}

View File

@ -1,21 +1,15 @@
package backup
import (
"context"
"github.com/alcionai/clues"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/alcionai/corso/src/cli/flags"
. "github.com/alcionai/corso/src/cli/print"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/repository"
"github.com/alcionai/corso/src/pkg/selectors"
)
@ -182,7 +176,7 @@ func createExchangeCmd(cmd *cobra.Command, args []string) error {
selectorSet = append(selectorSet, discSel.Selector)
}
return runBackups(
return genericCreateCommand(
ctx,
r,
"Exchange",
@ -272,74 +266,31 @@ func detailsExchangeCmd(cmd *cobra.Command, args []string) error {
return nil
}
return runDetailsExchangeCmd(cmd)
}
func runDetailsExchangeCmd(cmd *cobra.Command) error {
ctx := cmd.Context()
opts := utils.MakeExchangeOpts(cmd)
r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.ExchangeService)
sel := utils.IncludeExchangeRestoreDataSelectors(opts)
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
utils.FilterExchangeRestoreInfoSelectors(sel, opts)
ds, err := genericDetailsCommand(cmd, flags.BackupIDFV, sel.Selector)
if err != nil {
return Only(ctx, err)
}
defer utils.CloseRepo(ctx, r)
ds, err := runDetailsExchangeCmd(
ctx,
r,
flags.BackupIDFV,
opts,
rdao.Opts.SkipReduce)
if err != nil {
return Only(ctx, err)
}
if len(ds.Entries) == 0 {
if len(ds.Entries) > 0 {
ds.PrintEntries(ctx)
} else {
Info(ctx, selectors.ErrorNoMatchingItems)
return nil
}
ds.PrintEntries(ctx)
return nil
}
// runDetailsExchangeCmd actually performs the lookup in backup details.
// the fault.Errors return is always non-nil. Callers should check if
// errs.Failure() == nil.
func runDetailsExchangeCmd(
ctx context.Context,
r repository.BackupGetter,
backupID string,
opts utils.ExchangeOpts,
skipReduce bool,
) (*details.Details, error) {
if err := utils.ValidateExchangeRestoreFlags(backupID, opts); err != nil {
return nil, err
}
ctx = clues.Add(ctx, "backup_id", backupID)
d, _, errs := r.GetBackupDetails(ctx, backupID)
// TODO: log/track recoverable errors
if errs.Failure() != nil {
if errors.Is(errs.Failure(), data.ErrNotFound) {
return nil, clues.New("No backup exists with the id " + backupID)
}
return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository")
}
ctx = clues.Add(ctx, "details_entries", len(d.Entries))
if !skipReduce {
sel := utils.IncludeExchangeRestoreDataSelectors(opts)
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
utils.FilterExchangeRestoreInfoSelectors(sel, opts)
d = sel.Reduce(ctx, d, errs)
}
return d, nil
}
// ------------------------------------------------------------------------------------------------
// backup delete
// ------------------------------------------------------------------------------------------------

View File

@ -55,7 +55,7 @@ func (suite *NoBackupExchangeE2ESuite) SetupSuite() {
defer flush()
suite.its = newIntegrationTesterSetup(t)
suite.dpnd = prepM365Test(t, ctx)
suite.dpnd = prepM365Test(t, ctx, path.ExchangeService)
}
func (suite *NoBackupExchangeE2ESuite) TestExchangeBackupListCmd_noBackups() {
@ -109,7 +109,7 @@ func (suite *BackupExchangeE2ESuite) SetupSuite() {
defer flush()
suite.its = newIntegrationTesterSetup(t)
suite.dpnd = prepM365Test(t, ctx)
suite.dpnd = prepM365Test(t, ctx, path.ExchangeService)
}
func (suite *BackupExchangeE2ESuite) TestExchangeBackupCmd_email() {
@ -336,7 +336,7 @@ func (suite *PreparedBackupExchangeE2ESuite) SetupSuite() {
defer flush()
suite.its = newIntegrationTesterSetup(t)
suite.dpnd = prepM365Test(t, ctx)
suite.dpnd = prepM365Test(t, ctx, path.ExchangeService)
suite.backupOps = make(map[path.CategoryType]string)
var (
@ -579,7 +579,7 @@ func (suite *BackupDeleteExchangeE2ESuite) SetupSuite() {
ctx, flush := tester.NewContext(t)
defer flush()
suite.dpnd = prepM365Test(t, ctx)
suite.dpnd = prepM365Test(t, ctx, path.ExchangeService)
m365UserID := tconfig.M365UserID(t)
users := []string{m365UserID}

View File

@ -1,8 +1,6 @@
package backup
import (
"bytes"
"fmt"
"strconv"
"testing"
@ -14,11 +12,9 @@ import (
"github.com/alcionai/corso/src/cli/flags"
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
cliTD "github.com/alcionai/corso/src/cli/testdata"
"github.com/alcionai/corso/src/cli/utils"
utilsTD "github.com/alcionai/corso/src/cli/utils/testdata"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/version"
dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata"
"github.com/alcionai/corso/src/pkg/control"
)
@ -92,76 +88,46 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() {
func (suite *ExchangeUnitSuite) TestBackupCreateFlags() {
t := suite.T()
cmd := &cobra.Command{Use: createCommand}
// persistent flags not added by addCommands
flags.AddRunModeFlag(cmd, true)
c := addExchangeCommands(cmd)
require.NotNil(t, c)
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
exchangeServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
cmd := cliTD.SetUpCmdHasFlags(
t,
&cobra.Command{Use: createCommand},
addExchangeCommands,
[]cliTD.UseCobraCommandFn{
flags.AddAllProviderFlags,
flags.AddAllStorageFlags,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags())
flagsTD.WithFlags(
exchangeServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.MailBoxFN, flagsTD.FlgInputs(flagsTD.MailboxInput),
"--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.ExchangeCategoryDataInput),
"--" + flags.FetchParallelismFN, flagsTD.FetchParallelism,
"--" + flags.DeltaPageSizeFN, flagsTD.DeltaPageSize,
// Test arg parsing for few args
args := []string{
exchangeServiceCommand,
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.MailBoxFN, flagsTD.FlgInputs(flagsTD.MailboxInput),
"--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.ExchangeCategoryDataInput),
"--" + flags.FetchParallelismFN, flagsTD.FetchParallelism,
"--" + flags.DeltaPageSizeFN, flagsTD.DeltaPageSize,
// bool flags
"--" + flags.FailFastFN,
"--" + flags.DisableIncrementalsFN,
"--" + flags.ForceItemDataDownloadFN,
"--" + flags.DisableDeltaFN,
"--" + flags.EnableImmutableIDFN,
"--" + flags.DisableConcurrencyLimiterFN,
}
args = append(args, flagsTD.PreparedProviderFlags()...)
args = append(args, flagsTD.PreparedStorageFlags()...)
cmd.SetArgs(args)
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
// bool flags
"--" + flags.FailFastFN,
"--" + flags.DisableIncrementalsFN,
"--" + flags.ForceItemDataDownloadFN,
"--" + flags.DisableDeltaFN,
"--" + flags.EnableImmutableIDFN,
"--" + flags.DisableConcurrencyLimiterFN,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
opts := utils.MakeExchangeOpts(cmd)
co := utils.Control()
assert.ElementsMatch(t, flagsTD.MailboxInput, opts.Users)
// no assertion for category data input
assert.Equal(t, flagsTD.FetchParallelism, strconv.Itoa(co.Parallelism.ItemFetch))
assert.Equal(t, flagsTD.DeltaPageSize, strconv.Itoa(int(co.DeltaPageSize)))
// bool flags
assert.Equal(t, control.FailFast, co.FailureHandling)
assert.True(t, co.ToggleFeatures.DisableIncrementals)
assert.True(t, co.ToggleFeatures.ForceItemDataDownload)
assert.True(t, co.ToggleFeatures.DisableDelta)
assert.True(t, co.ToggleFeatures.ExchangeImmutableIDs)
assert.True(t, co.ToggleFeatures.DisableConcurrencyLimiter)
flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd)
}
@ -169,36 +135,25 @@ func (suite *ExchangeUnitSuite) TestBackupCreateFlags() {
func (suite *ExchangeUnitSuite) TestBackupListFlags() {
t := suite.T()
cmd := &cobra.Command{Use: listCommand}
// persistent flags not added by addCommands
flags.AddRunModeFlag(cmd, true)
c := addExchangeCommands(cmd)
require.NotNil(t, c)
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
exchangeServiceCommand, []string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
cmd := cliTD.SetUpCmdHasFlags(
t,
&cobra.Command{Use: listCommand},
addExchangeCommands,
[]cliTD.UseCobraCommandFn{
flags.AddAllProviderFlags,
flags.AddAllStorageFlags,
},
flagsTD.PreparedBackupListFlags(),
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags())
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
flagsTD.WithFlags(
exchangeServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
},
flagsTD.PreparedBackupListFlags(),
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
flagsTD.AssertBackupListFlags(t, cmd)
flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd)
@ -207,41 +162,28 @@ func (suite *ExchangeUnitSuite) TestBackupListFlags() {
func (suite *ExchangeUnitSuite) TestBackupDetailsFlags() {
t := suite.T()
cmd := &cobra.Command{Use: detailsCommand}
// persistent flags not added by addCommands
flags.AddRunModeFlag(cmd, true)
c := addExchangeCommands(cmd)
require.NotNil(t, c)
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
exchangeServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.SkipReduceFN,
cmd := cliTD.SetUpCmdHasFlags(
t,
&cobra.Command{Use: detailsCommand},
addExchangeCommands,
[]cliTD.UseCobraCommandFn{
flags.AddAllProviderFlags,
flags.AddAllStorageFlags,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags())
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
flagsTD.WithFlags(
exchangeServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.SkipReduceFN,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
co := utils.Control()
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.True(t, co.SkipReduce)
flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd)
}
@ -249,36 +191,24 @@ func (suite *ExchangeUnitSuite) TestBackupDetailsFlags() {
func (suite *ExchangeUnitSuite) TestBackupDeleteFlags() {
t := suite.T()
cmd := &cobra.Command{Use: deleteCommand}
// persistent flags not added by addCommands
flags.AddRunModeFlag(cmd, true)
c := addExchangeCommands(cmd)
require.NotNil(t, c)
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
exchangeServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
cmd := cliTD.SetUpCmdHasFlags(
t,
&cobra.Command{Use: deleteCommand},
addExchangeCommands,
[]cliTD.UseCobraCommandFn{
flags.AddAllProviderFlags,
flags.AddAllStorageFlags,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags())
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
flagsTD.WithFlags(
exchangeServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd)
}
@ -434,51 +364,3 @@ func (suite *ExchangeUnitSuite) TestExchangeBackupCreateSelectors() {
})
}
}
func (suite *ExchangeUnitSuite) TestExchangeBackupDetailsSelectors() {
for v := 0; v <= version.Backup; v++ {
suite.Run(fmt.Sprintf("version%d", v), func() {
for _, test := range utilsTD.ExchangeOptionDetailLookups {
suite.Run(test.Name, func() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
bg := utilsTD.VersionedBackupGetter{
Details: dtd.GetDetailsSetForVersion(t, v),
}
output, err := runDetailsExchangeCmd(
ctx,
bg,
"backup-ID",
test.Opts(t, v),
false)
assert.NoError(t, err, clues.ToCore(err))
assert.ElementsMatch(t, test.Expected(t, v), output.Entries)
})
}
})
}
}
func (suite *ExchangeUnitSuite) TestExchangeBackupDetailsSelectorsBadFormats() {
for _, test := range utilsTD.BadExchangeOptionsFormats {
suite.Run(test.Name, func() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
output, err := runDetailsExchangeCmd(
ctx,
test.BackupGetter,
"backup-ID",
test.Opts(t, version.Backup),
false)
assert.Error(t, err, clues.ToCore(err))
assert.Empty(t, output)
})
}
}

View File

@ -2,7 +2,6 @@ package backup
import (
"context"
"errors"
"fmt"
"github.com/alcionai/clues"
@ -14,12 +13,9 @@ import (
. "github.com/alcionai/corso/src/cli/print"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/common/idname"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/filters"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/repository"
"github.com/alcionai/corso/src/pkg/selectors"
"github.com/alcionai/corso/src/pkg/services/m365"
)
@ -174,7 +170,7 @@ func createGroupsCmd(cmd *cobra.Command, args []string) error {
selectorSet = append(selectorSet, discSel.Selector)
}
return runBackups(
return genericCreateCommand(
ctx,
r,
"Group",
@ -225,74 +221,31 @@ func detailsGroupsCmd(cmd *cobra.Command, args []string) error {
return nil
}
return runDetailsGroupsCmd(cmd)
}
func runDetailsGroupsCmd(cmd *cobra.Command) error {
ctx := cmd.Context()
opts := utils.MakeGroupsOpts(cmd)
r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.GroupsService)
sel := utils.IncludeGroupsRestoreDataSelectors(ctx, opts)
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
utils.FilterGroupsRestoreInfoSelectors(sel, opts)
ds, err := genericDetailsCommand(cmd, flags.BackupIDFV, sel.Selector)
if err != nil {
return Only(ctx, err)
}
defer utils.CloseRepo(ctx, r)
ds, err := runDetailsGroupsCmd(
ctx,
r,
flags.BackupIDFV,
opts,
rdao.Opts.SkipReduce)
if err != nil {
return Only(ctx, err)
}
if len(ds.Entries) == 0 {
if len(ds.Entries) > 0 {
ds.PrintEntries(ctx)
} else {
Info(ctx, selectors.ErrorNoMatchingItems)
return nil
}
ds.PrintEntries(ctx)
return nil
}
// runDetailsGroupsCmd actually performs the lookup in backup details.
// the fault.Errors return is always non-nil. Callers should check if
// errs.Failure() == nil.
func runDetailsGroupsCmd(
ctx context.Context,
r repository.BackupGetter,
backupID string,
opts utils.GroupsOpts,
skipReduce bool,
) (*details.Details, error) {
if err := utils.ValidateGroupsRestoreFlags(backupID, opts); err != nil {
return nil, err
}
ctx = clues.Add(ctx, "backup_id", backupID)
d, _, errs := r.GetBackupDetails(ctx, backupID)
// TODO: log/track recoverable errors
if errs.Failure() != nil {
if errors.Is(errs.Failure(), data.ErrNotFound) {
return nil, clues.New("no backup exists with the id " + backupID)
}
return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository")
}
ctx = clues.Add(ctx, "details_entries", len(d.Entries))
if !skipReduce {
sel := utils.IncludeGroupsRestoreDataSelectors(ctx, opts)
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
utils.FilterGroupsRestoreInfoSelectors(sel, opts)
d = sel.Reduce(ctx, d, errs)
}
return d, nil
}
// ------------------------------------------------------------------------------------------------
// backup delete
// ------------------------------------------------------------------------------------------------

View File

@ -56,7 +56,7 @@ func (suite *NoBackupGroupsE2ESuite) SetupSuite() {
defer flush()
suite.its = newIntegrationTesterSetup(t)
suite.dpnd = prepM365Test(t, ctx)
suite.dpnd = prepM365Test(t, ctx, path.GroupsService)
}
func (suite *NoBackupGroupsE2ESuite) TestGroupsBackupListCmd_noBackups() {
@ -110,7 +110,7 @@ func (suite *BackupGroupsE2ESuite) SetupSuite() {
defer flush()
suite.its = newIntegrationTesterSetup(t)
suite.dpnd = prepM365Test(t, ctx)
suite.dpnd = prepM365Test(t, ctx, path.GroupsService)
}
func (suite *BackupGroupsE2ESuite) TestGroupsBackupCmd_channelMessages() {
@ -287,7 +287,7 @@ func (suite *PreparedBackupGroupsE2ESuite) SetupSuite() {
defer flush()
suite.its = newIntegrationTesterSetup(t)
suite.dpnd = prepM365Test(t, ctx)
suite.dpnd = prepM365Test(t, ctx, path.GroupsService)
suite.backupOps = make(map[path.CategoryType]string)
var (
@ -515,7 +515,7 @@ func (suite *BackupDeleteGroupsE2ESuite) SetupSuite() {
ctx, flush := tester.NewContext(t)
defer flush()
suite.dpnd = prepM365Test(t, ctx)
suite.dpnd = prepM365Test(t, ctx, path.GroupsService)
m365GroupID := tconfig.M365GroupID(t)
groups := []string{m365GroupID}

View File

@ -1,7 +1,6 @@
package backup
import (
"bytes"
"strconv"
"testing"
@ -13,6 +12,7 @@ import (
"github.com/alcionai/corso/src/cli/flags"
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
cliTD "github.com/alcionai/corso/src/cli/testdata"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/control"
@ -128,70 +128,38 @@ func (suite *GroupsUnitSuite) TestValidateGroupsBackupCreateFlags() {
func (suite *GroupsUnitSuite) TestBackupCreateFlags() {
t := suite.T()
cmd := &cobra.Command{Use: createCommand}
// persistent flags not added by addCommands
flags.AddRunModeFlag(cmd, true)
c := addGroupsCommands(cmd)
require.NotNil(t, c)
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
groupsServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
cmd := cliTD.SetUpCmdHasFlags(
t,
&cobra.Command{Use: createCommand},
addGroupsCommands,
[]cliTD.UseCobraCommandFn{
flags.AddAllProviderFlags,
flags.AddAllStorageFlags,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags())
// Test arg parsing for few args
args := []string{
groupsServiceCommand,
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.GroupFN, flagsTD.FlgInputs(flagsTD.GroupsInput),
"--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.GroupsCategoryDataInput),
"--" + flags.FetchParallelismFN, flagsTD.FetchParallelism,
// bool flags
"--" + flags.FailFastFN,
"--" + flags.DisableIncrementalsFN,
"--" + flags.ForceItemDataDownloadFN,
"--" + flags.DisableDeltaFN,
}
args = append(args, flagsTD.PreparedProviderFlags()...)
args = append(args, flagsTD.PreparedStorageFlags()...)
cmd.SetArgs(args)
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
flagsTD.WithFlags(
groupsServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.GroupFN, flagsTD.FlgInputs(flagsTD.GroupsInput),
"--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.GroupsCategoryDataInput),
"--" + flags.FetchParallelismFN, flagsTD.FetchParallelism,
"--" + flags.FailFastFN,
"--" + flags.DisableIncrementalsFN,
"--" + flags.ForceItemDataDownloadFN,
"--" + flags.DisableDeltaFN,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
opts := utils.MakeGroupsOpts(cmd)
co := utils.Control()
assert.ElementsMatch(t, flagsTD.GroupsInput, opts.Groups)
// no assertion for category data input
assert.Equal(t, flagsTD.FetchParallelism, strconv.Itoa(co.Parallelism.ItemFetch))
// bool flags
assert.Equal(t, control.FailFast, co.FailureHandling)
assert.True(t, co.ToggleFeatures.DisableIncrementals)
assert.True(t, co.ToggleFeatures.ForceItemDataDownload)
assert.True(t, co.ToggleFeatures.DisableDelta)
flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd)
}
@ -199,37 +167,25 @@ func (suite *GroupsUnitSuite) TestBackupCreateFlags() {
func (suite *GroupsUnitSuite) TestBackupListFlags() {
t := suite.T()
cmd := &cobra.Command{Use: listCommand}
// persistent flags not added by addCommands
flags.AddRunModeFlag(cmd, true)
c := addGroupsCommands(cmd)
require.NotNil(t, c)
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
groupsServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
cmd := cliTD.SetUpCmdHasFlags(
t,
&cobra.Command{Use: listCommand},
addGroupsCommands,
[]cliTD.UseCobraCommandFn{
flags.AddAllProviderFlags,
flags.AddAllStorageFlags,
},
flagsTD.PreparedBackupListFlags(),
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags())
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
flagsTD.WithFlags(
groupsServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
},
flagsTD.PreparedBackupListFlags(),
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
flagsTD.AssertBackupListFlags(t, cmd)
flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd)
@ -238,41 +194,28 @@ func (suite *GroupsUnitSuite) TestBackupListFlags() {
func (suite *GroupsUnitSuite) TestBackupDetailsFlags() {
t := suite.T()
cmd := &cobra.Command{Use: detailsCommand}
// persistent flags not added by addCommands
flags.AddRunModeFlag(cmd, true)
c := addGroupsCommands(cmd)
require.NotNil(t, c)
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
groupsServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.SkipReduceFN,
cmd := cliTD.SetUpCmdHasFlags(
t,
&cobra.Command{Use: detailsCommand},
addGroupsCommands,
[]cliTD.UseCobraCommandFn{
flags.AddAllProviderFlags,
flags.AddAllStorageFlags,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags())
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
flagsTD.WithFlags(
groupsServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.SkipReduceFN,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
co := utils.Control()
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.True(t, co.SkipReduce)
flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd)
}
@ -280,48 +223,24 @@ func (suite *GroupsUnitSuite) TestBackupDetailsFlags() {
func (suite *GroupsUnitSuite) TestBackupDeleteFlags() {
t := suite.T()
cmd := &cobra.Command{Use: deleteCommand}
// persistent flags not added by addCommands
flags.AddRunModeFlag(cmd, true)
c := addGroupsCommands(cmd)
require.NotNil(t, c)
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
groupsServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
cmd := cliTD.SetUpCmdHasFlags(
t,
&cobra.Command{Use: deleteCommand},
addGroupsCommands,
[]cliTD.UseCobraCommandFn{
flags.AddAllProviderFlags,
flags.AddAllStorageFlags,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags())
// Test arg parsing for few args
args := []string{
groupsServiceCommand,
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
}
args = append(args, flagsTD.PreparedProviderFlags()...)
args = append(args, flagsTD.PreparedStorageFlags()...)
cmd.SetArgs(args)
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
flagsTD.WithFlags(
groupsServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd)
}

View File

@ -21,7 +21,7 @@ import (
"github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/control"
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/repository"
"github.com/alcionai/corso/src/pkg/services/m365/api"
"github.com/alcionai/corso/src/pkg/services/m365/api/mock"
@ -133,6 +133,7 @@ type dependencies struct {
func prepM365Test(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
pst path.ServiceType,
) dependencies {
var (
acct = tconfig.NewM365Account(t)
@ -140,11 +141,9 @@ func prepM365Test(
recorder = strings.Builder{}
)
sc, err := st.StorageConfig()
cfg, err := st.ToS3Config()
require.NoError(t, err, clues.ToCore(err))
cfg := sc.(*storage.S3Config)
force := map[string]string{
tconfig.TestCfgAccountProvider: account.ProviderM365.String(),
tconfig.TestCfgStorageProvider: storage.ProviderS3.String(),
@ -162,7 +161,9 @@ func prepM365Test(
repository.NewRepoID)
require.NoError(t, err, clues.ToCore(err))
err = repo.Initialize(ctx, ctrlRepo.Retention{})
err = repo.Initialize(ctx, repository.InitConfig{
Service: pst,
})
require.NoError(t, err, clues.ToCore(err))
return dependencies{

View File

@ -1,21 +1,15 @@
package backup
import (
"context"
"github.com/alcionai/clues"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/alcionai/corso/src/cli/flags"
. "github.com/alcionai/corso/src/cli/print"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/repository"
"github.com/alcionai/corso/src/pkg/selectors"
)
@ -162,7 +156,7 @@ func createOneDriveCmd(cmd *cobra.Command, args []string) error {
selectorSet = append(selectorSet, discSel.Selector)
}
return runBackups(
return genericCreateCommand(
ctx,
r,
"OneDrive",
@ -229,74 +223,31 @@ func detailsOneDriveCmd(cmd *cobra.Command, args []string) error {
return nil
}
return runDetailsOneDriveCmd(cmd)
}
func runDetailsOneDriveCmd(cmd *cobra.Command) error {
ctx := cmd.Context()
opts := utils.MakeOneDriveOpts(cmd)
r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.OneDriveService)
sel := utils.IncludeOneDriveRestoreDataSelectors(opts)
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
utils.FilterOneDriveRestoreInfoSelectors(sel, opts)
ds, err := genericDetailsCommand(cmd, flags.BackupIDFV, sel.Selector)
if err != nil {
return Only(ctx, err)
}
defer utils.CloseRepo(ctx, r)
ds, err := runDetailsOneDriveCmd(
ctx,
r,
flags.BackupIDFV,
opts,
rdao.Opts.SkipReduce)
if err != nil {
return Only(ctx, err)
}
if len(ds.Entries) == 0 {
if len(ds.Entries) > 0 {
ds.PrintEntries(ctx)
} else {
Info(ctx, selectors.ErrorNoMatchingItems)
return nil
}
ds.PrintEntries(ctx)
return nil
}
// runDetailsOneDriveCmd actually performs the lookup in backup details.
// the fault.Errors return is always non-nil. Callers should check if
// errs.Failure() == nil.
func runDetailsOneDriveCmd(
ctx context.Context,
r repository.BackupGetter,
backupID string,
opts utils.OneDriveOpts,
skipReduce bool,
) (*details.Details, error) {
if err := utils.ValidateOneDriveRestoreFlags(backupID, opts); err != nil {
return nil, err
}
ctx = clues.Add(ctx, "backup_id", backupID)
d, _, errs := r.GetBackupDetails(ctx, backupID)
// TODO: log/track recoverable errors
if errs.Failure() != nil {
if errors.Is(errs.Failure(), data.ErrNotFound) {
return nil, clues.New("no backup exists with the id " + backupID)
}
return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository")
}
ctx = clues.Add(ctx, "details_entries", len(d.Entries))
if !skipReduce {
sel := utils.IncludeOneDriveRestoreDataSelectors(opts)
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
utils.FilterOneDriveRestoreInfoSelectors(sel, opts)
d = sel.Reduce(ctx, d, errs)
}
return d, nil
}
// `corso backup delete onedrive [<flag>...]`
func oneDriveDeleteCmd() *cobra.Command {
return &cobra.Command{

View File

@ -20,6 +20,7 @@ import (
"github.com/alcionai/corso/src/internal/operations"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/selectors"
selTD "github.com/alcionai/corso/src/pkg/selectors/testdata"
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
@ -48,7 +49,7 @@ func (suite *NoBackupOneDriveE2ESuite) SetupSuite() {
ctx, flush := tester.NewContext(t)
defer flush()
suite.dpnd = prepM365Test(t, ctx)
suite.dpnd = prepM365Test(t, ctx, path.OneDriveService)
}
func (suite *NoBackupOneDriveE2ESuite) TestOneDriveBackupListCmd_empty() {
@ -139,7 +140,7 @@ func (suite *BackupDeleteOneDriveE2ESuite) SetupSuite() {
ctx, flush := tester.NewContext(t)
defer flush()
suite.dpnd = prepM365Test(t, ctx)
suite.dpnd = prepM365Test(t, ctx, path.OneDriveService)
var (
m365UserID = tconfig.M365UserID(t)

View File

@ -1,8 +1,6 @@
package backup
import (
"bytes"
"fmt"
"testing"
"github.com/alcionai/clues"
@ -13,11 +11,9 @@ import (
"github.com/alcionai/corso/src/cli/flags"
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
cliTD "github.com/alcionai/corso/src/cli/testdata"
"github.com/alcionai/corso/src/cli/utils"
utilsTD "github.com/alcionai/corso/src/cli/utils/testdata"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/version"
dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata"
"github.com/alcionai/corso/src/pkg/control"
)
@ -92,48 +88,33 @@ func (suite *OneDriveUnitSuite) TestAddOneDriveCommands() {
func (suite *OneDriveUnitSuite) TestBackupCreateFlags() {
t := suite.T()
cmd := &cobra.Command{Use: createCommand}
// persistent flags not added by addCommands
flags.AddRunModeFlag(cmd, true)
c := addOneDriveCommands(cmd)
require.NotNil(t, c)
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
oneDriveServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.UserFN, flagsTD.FlgInputs(flagsTD.UsersInput),
"--" + flags.FailFastFN,
"--" + flags.DisableIncrementalsFN,
"--" + flags.ForceItemDataDownloadFN,
cmd := cliTD.SetUpCmdHasFlags(
t,
&cobra.Command{Use: createCommand},
addOneDriveCommands,
[]cliTD.UseCobraCommandFn{
flags.AddAllProviderFlags,
flags.AddAllStorageFlags,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags())
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
flagsTD.WithFlags(
oneDriveServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.UserFN, flagsTD.FlgInputs(flagsTD.UsersInput),
"--" + flags.FailFastFN,
"--" + flags.DisableIncrementalsFN,
"--" + flags.ForceItemDataDownloadFN,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
opts := utils.MakeOneDriveOpts(cmd)
co := utils.Control()
assert.ElementsMatch(t, flagsTD.UsersInput, opts.Users)
// no assertion for category data input
// bool flags
assert.Equal(t, control.FailFast, co.FailureHandling)
assert.True(t, co.ToggleFeatures.DisableIncrementals)
assert.True(t, co.ToggleFeatures.ForceItemDataDownload)
flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd)
}
@ -141,37 +122,25 @@ func (suite *OneDriveUnitSuite) TestBackupCreateFlags() {
func (suite *OneDriveUnitSuite) TestBackupListFlags() {
t := suite.T()
cmd := &cobra.Command{Use: listCommand}
// persistent flags not added by addCommands
flags.AddRunModeFlag(cmd, true)
c := addOneDriveCommands(cmd)
require.NotNil(t, c)
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
oneDriveServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
cmd := cliTD.SetUpCmdHasFlags(
t,
&cobra.Command{Use: listCommand},
addOneDriveCommands,
[]cliTD.UseCobraCommandFn{
flags.AddAllProviderFlags,
flags.AddAllStorageFlags,
},
flagsTD.PreparedBackupListFlags(),
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags())
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
flagsTD.WithFlags(
oneDriveServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
},
flagsTD.PreparedBackupListFlags(),
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
flagsTD.AssertBackupListFlags(t, cmd)
flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd)
@ -180,41 +149,28 @@ func (suite *OneDriveUnitSuite) TestBackupListFlags() {
func (suite *OneDriveUnitSuite) TestBackupDetailsFlags() {
t := suite.T()
cmd := &cobra.Command{Use: detailsCommand}
// persistent flags not added by addCommands
flags.AddRunModeFlag(cmd, true)
c := addOneDriveCommands(cmd)
require.NotNil(t, c)
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
oneDriveServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.SkipReduceFN,
cmd := cliTD.SetUpCmdHasFlags(
t,
&cobra.Command{Use: detailsCommand},
addOneDriveCommands,
[]cliTD.UseCobraCommandFn{
flags.AddAllProviderFlags,
flags.AddAllStorageFlags,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags())
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
flagsTD.WithFlags(
oneDriveServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.SkipReduceFN,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
co := utils.Control()
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.True(t, co.SkipReduce)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd)
}
@ -222,36 +178,24 @@ func (suite *OneDriveUnitSuite) TestBackupDetailsFlags() {
func (suite *OneDriveUnitSuite) TestBackupDeleteFlags() {
t := suite.T()
cmd := &cobra.Command{Use: deleteCommand}
// persistent flags not added by addCommands
flags.AddRunModeFlag(cmd, true)
c := addOneDriveCommands(cmd)
require.NotNil(t, c)
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
oneDriveServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
cmd := cliTD.SetUpCmdHasFlags(
t,
&cobra.Command{Use: deleteCommand},
addOneDriveCommands,
[]cliTD.UseCobraCommandFn{
flags.AddAllProviderFlags,
flags.AddAllStorageFlags,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags())
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
flagsTD.WithFlags(
oneDriveServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd)
}
@ -279,51 +223,3 @@ func (suite *OneDriveUnitSuite) TestValidateOneDriveBackupCreateFlags() {
})
}
}
func (suite *OneDriveUnitSuite) TestOneDriveBackupDetailsSelectors() {
for v := 0; v <= version.Backup; v++ {
suite.Run(fmt.Sprintf("version%d", v), func() {
for _, test := range utilsTD.OneDriveOptionDetailLookups {
suite.Run(test.Name, func() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
bg := utilsTD.VersionedBackupGetter{
Details: dtd.GetDetailsSetForVersion(t, v),
}
output, err := runDetailsOneDriveCmd(
ctx,
bg,
"backup-ID",
test.Opts(t, v),
false)
assert.NoError(t, err, clues.ToCore(err))
assert.ElementsMatch(t, test.Expected(t, v), output.Entries)
})
}
})
}
}
func (suite *OneDriveUnitSuite) TestOneDriveBackupDetailsSelectorsBadFormats() {
for _, test := range utilsTD.BadOneDriveOptionsFormats {
suite.Run(test.Name, func() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
output, err := runDetailsOneDriveCmd(
ctx,
test.BackupGetter,
"backup-ID",
test.Opts(t, version.Backup),
false)
assert.Error(t, err, clues.ToCore(err))
assert.Empty(t, output)
})
}
}

View File

@ -4,7 +4,6 @@ import (
"context"
"github.com/alcionai/clues"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"golang.org/x/exp/slices"
@ -13,12 +12,9 @@ import (
. "github.com/alcionai/corso/src/cli/print"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/common/idname"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/filters"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/repository"
"github.com/alcionai/corso/src/pkg/selectors"
"github.com/alcionai/corso/src/pkg/services/m365"
)
@ -179,7 +175,7 @@ func createSharePointCmd(cmd *cobra.Command, args []string) error {
selectorSet = append(selectorSet, discSel.Selector)
}
return runBackups(
return genericCreateCommand(
ctx,
r,
"SharePoint",
@ -303,7 +299,7 @@ func deleteSharePointCmd(cmd *cobra.Command, args []string) error {
// backup details
// ------------------------------------------------------------------------------------------------
// `corso backup details onedrive [<flag>...]`
// `corso backup details SharePoint [<flag>...]`
func sharePointDetailsCmd() *cobra.Command {
return &cobra.Command{
Use: sharePointServiceCommand,
@ -324,70 +320,27 @@ func detailsSharePointCmd(cmd *cobra.Command, args []string) error {
return nil
}
return runDetailsSharePointCmd(cmd)
}
func runDetailsSharePointCmd(cmd *cobra.Command) error {
ctx := cmd.Context()
opts := utils.MakeSharePointOpts(cmd)
r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.SharePointService)
sel := utils.IncludeSharePointRestoreDataSelectors(ctx, opts)
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
utils.FilterSharePointRestoreInfoSelectors(sel, opts)
ds, err := genericDetailsCommand(cmd, flags.BackupIDFV, sel.Selector)
if err != nil {
return Only(ctx, err)
}
defer utils.CloseRepo(ctx, r)
ds, err := runDetailsSharePointCmd(
ctx,
r,
flags.BackupIDFV,
opts,
rdao.Opts.SkipReduce)
if err != nil {
return Only(ctx, err)
}
if len(ds.Entries) == 0 {
if len(ds.Entries) > 0 {
ds.PrintEntries(ctx)
} else {
Info(ctx, selectors.ErrorNoMatchingItems)
return nil
}
ds.PrintEntries(ctx)
return nil
}
// runDetailsSharePointCmd actually performs the lookup in backup details.
// the fault.Errors return is always non-nil. Callers should check if
// errs.Failure() == nil.
func runDetailsSharePointCmd(
ctx context.Context,
r repository.BackupGetter,
backupID string,
opts utils.SharePointOpts,
skipReduce bool,
) (*details.Details, error) {
if err := utils.ValidateSharePointRestoreFlags(backupID, opts); err != nil {
return nil, err
}
ctx = clues.Add(ctx, "backup_id", backupID)
d, _, errs := r.GetBackupDetails(ctx, backupID)
// TODO: log/track recoverable errors
if errs.Failure() != nil {
if errors.Is(errs.Failure(), data.ErrNotFound) {
return nil, clues.New("no backup exists with the id " + backupID)
}
return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository")
}
ctx = clues.Add(ctx, "details_entries", len(d.Entries))
if !skipReduce {
sel := utils.IncludeSharePointRestoreDataSelectors(ctx, opts)
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
utils.FilterSharePointRestoreInfoSelectors(sel, opts)
d = sel.Reduce(ctx, d, errs)
}
return d, nil
}

View File

@ -20,6 +20,7 @@ import (
"github.com/alcionai/corso/src/internal/operations"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/selectors"
"github.com/alcionai/corso/src/pkg/selectors/testdata"
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
@ -46,7 +47,7 @@ func (suite *NoBackupSharePointE2ESuite) SetupSuite() {
ctx, flush := tester.NewContext(t)
defer flush()
suite.dpnd = prepM365Test(t, ctx)
suite.dpnd = prepM365Test(t, ctx, path.SharePointService)
}
func (suite *NoBackupSharePointE2ESuite) TestSharePointBackupListCmd_empty() {
@ -103,7 +104,7 @@ func (suite *BackupDeleteSharePointE2ESuite) SetupSuite() {
ctx, flush := tester.NewContext(t)
defer flush()
suite.dpnd = prepM365Test(t, ctx)
suite.dpnd = prepM365Test(t, ctx, path.SharePointService)
var (
m365SiteID = tconfig.M365SiteID(t)

View File

@ -1,8 +1,6 @@
package backup
import (
"bytes"
"fmt"
"strings"
"testing"
@ -14,12 +12,10 @@ import (
"github.com/alcionai/corso/src/cli/flags"
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
cliTD "github.com/alcionai/corso/src/cli/testdata"
"github.com/alcionai/corso/src/cli/utils"
utilsTD "github.com/alcionai/corso/src/cli/utils/testdata"
"github.com/alcionai/corso/src/internal/common/idname"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/version"
dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/selectors"
)
@ -94,51 +90,36 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
func (suite *SharePointUnitSuite) TestBackupCreateFlags() {
t := suite.T()
cmd := &cobra.Command{Use: createCommand}
// persistent flags not added by addCommands
flags.AddRunModeFlag(cmd, true)
c := addSharePointCommands(cmd)
require.NotNil(t, c)
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
sharePointServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.SiteIDFN, flagsTD.FlgInputs(flagsTD.SiteIDInput),
"--" + flags.SiteFN, flagsTD.FlgInputs(flagsTD.WebURLInput),
"--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.SharepointCategoryDataInput),
"--" + flags.FailFastFN,
"--" + flags.DisableIncrementalsFN,
"--" + flags.ForceItemDataDownloadFN,
cmd := cliTD.SetUpCmdHasFlags(
t,
&cobra.Command{Use: createCommand},
addSharePointCommands,
[]cliTD.UseCobraCommandFn{
flags.AddAllProviderFlags,
flags.AddAllStorageFlags,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags())
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
flagsTD.WithFlags(
sharePointServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.SiteIDFN, flagsTD.FlgInputs(flagsTD.SiteIDInput),
"--" + flags.SiteFN, flagsTD.FlgInputs(flagsTD.WebURLInput),
"--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.SharepointCategoryDataInput),
"--" + flags.FailFastFN,
"--" + flags.DisableIncrementalsFN,
"--" + flags.ForceItemDataDownloadFN,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
opts := utils.MakeSharePointOpts(cmd)
co := utils.Control()
assert.ElementsMatch(t, []string{strings.Join(flagsTD.SiteIDInput, ",")}, opts.SiteID)
assert.ElementsMatch(t, flagsTD.WebURLInput, opts.WebURL)
// no assertion for category data input
// bool flags
assert.Equal(t, control.FailFast, co.FailureHandling)
assert.True(t, co.ToggleFeatures.DisableIncrementals)
assert.True(t, co.ToggleFeatures.ForceItemDataDownload)
flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd)
}
@ -146,37 +127,25 @@ func (suite *SharePointUnitSuite) TestBackupCreateFlags() {
func (suite *SharePointUnitSuite) TestBackupListFlags() {
t := suite.T()
cmd := &cobra.Command{Use: listCommand}
// persistent flags not added by addCommands
flags.AddRunModeFlag(cmd, true)
c := addSharePointCommands(cmd)
require.NotNil(t, c)
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
sharePointServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
cmd := cliTD.SetUpCmdHasFlags(
t,
&cobra.Command{Use: listCommand},
addSharePointCommands,
[]cliTD.UseCobraCommandFn{
flags.AddAllProviderFlags,
flags.AddAllStorageFlags,
},
flagsTD.PreparedBackupListFlags(),
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags())
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
flagsTD.WithFlags(
sharePointServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
},
flagsTD.PreparedBackupListFlags(),
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
flagsTD.AssertBackupListFlags(t, cmd)
flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd)
@ -185,41 +154,28 @@ func (suite *SharePointUnitSuite) TestBackupListFlags() {
func (suite *SharePointUnitSuite) TestBackupDetailsFlags() {
t := suite.T()
cmd := &cobra.Command{Use: detailsCommand}
// persistent flags not added by addCommands
flags.AddRunModeFlag(cmd, true)
c := addSharePointCommands(cmd)
require.NotNil(t, c)
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
sharePointServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.SkipReduceFN,
cmd := cliTD.SetUpCmdHasFlags(
t,
&cobra.Command{Use: detailsCommand},
addSharePointCommands,
[]cliTD.UseCobraCommandFn{
flags.AddAllProviderFlags,
flags.AddAllStorageFlags,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags())
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
flagsTD.WithFlags(
sharePointServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.SkipReduceFN,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
co := utils.Control()
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.True(t, co.SkipReduce)
flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd)
}
@ -227,36 +183,24 @@ func (suite *SharePointUnitSuite) TestBackupDetailsFlags() {
func (suite *SharePointUnitSuite) TestBackupDeleteFlags() {
t := suite.T()
cmd := &cobra.Command{Use: deleteCommand}
// persistent flags not added by addCommands
flags.AddRunModeFlag(cmd, true)
c := addSharePointCommands(cmd)
require.NotNil(t, c)
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
sharePointServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
cmd := cliTD.SetUpCmdHasFlags(
t,
&cobra.Command{Use: deleteCommand},
addSharePointCommands,
[]cliTD.UseCobraCommandFn{
flags.AddAllProviderFlags,
flags.AddAllStorageFlags,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags())
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
flagsTD.WithFlags(
sharePointServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd)
}
@ -391,51 +335,3 @@ func (suite *SharePointUnitSuite) TestSharePointBackupCreateSelectors() {
})
}
}
func (suite *SharePointUnitSuite) TestSharePointBackupDetailsSelectors() {
for v := 0; v <= version.Backup; v++ {
suite.Run(fmt.Sprintf("version%d", v), func() {
for _, test := range utilsTD.SharePointOptionDetailLookups {
suite.Run(test.Name, func() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
bg := utilsTD.VersionedBackupGetter{
Details: dtd.GetDetailsSetForVersion(t, v),
}
output, err := runDetailsSharePointCmd(
ctx,
bg,
"backup-ID",
test.Opts(t, v),
false)
assert.NoError(t, err, clues.ToCore(err))
assert.ElementsMatch(t, test.Expected(t, v), output.Entries)
})
}
})
}
}
func (suite *SharePointUnitSuite) TestSharePointBackupDetailsSelectorsBadFormats() {
for _, test := range utilsTD.BadSharePointOptionsFormats {
suite.Run(test.Name, func() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
output, err := runDetailsSharePointCmd(
ctx,
test.BackupGetter,
"backup-ID",
test.Opts(t, version.Backup),
false)
assert.Error(t, err, clues.ToCore(err))
assert.Empty(t, output)
})
}
}

View File

@ -54,7 +54,7 @@ func configureAccount(
if matchFromConfig {
providerType := vpr.GetString(account.AccountProviderTypeKey)
if providerType != account.ProviderM365.String() {
return acct, clues.New("unsupported account provider: " + providerType)
return acct, clues.New("unsupported account provider: [" + providerType + "]")
}
if err := mustMatchConfig(vpr, m365Overrides(overrides)); err != nil {

View File

@ -279,8 +279,7 @@ func getStorageAndAccountWithViper(
// possibly read the prior config from a .corso file
if readFromFile {
err = vpr.ReadInConfig()
if err != nil {
if err := vpr.ReadInConfig(); err != nil {
if _, ok := err.(viper.ConfigFileNotFoundError); !ok {
return config, clues.Wrap(err, "reading corso config file: "+vpr.ConfigFileUsed())
}

View File

@ -356,10 +356,9 @@ func (suite *ConfigSuite) TestReadFromFlags() {
m365Config, _ := repoDetails.Account.M365Config()
sc, err := repoDetails.Storage.StorageConfig()
s3Cfg, err := repoDetails.Storage.ToS3Config()
require.NoError(t, err, "reading s3 config from storage", clues.ToCore(err))
s3Cfg := sc.(*storage.S3Config)
commonConfig, _ := repoDetails.Storage.CommonConfig()
pass := commonConfig.Corso.CorsoPassphrase
@ -425,17 +424,21 @@ func (suite *ConfigIntegrationSuite) TestGetStorageAndAccount() {
err = writeRepoConfigWithViper(vpr, s3Cfg, m365, repository.Options{}, "repoid")
require.NoError(t, err, "writing repo config", clues.ToCore(err))
require.Equal(
t,
account.ProviderM365.String(),
vpr.GetString(account.AccountProviderTypeKey),
"viper should have m365 as the account provider")
err = vpr.ReadInConfig()
require.NoError(t, err, "reading repo config", clues.ToCore(err))
cfg, err := getStorageAndAccountWithViper(vpr, storage.ProviderS3, true, true, nil)
require.NoError(t, err, "getting storage and account from config", clues.ToCore(err))
sc, err := cfg.Storage.StorageConfig()
readS3Cfg, err := cfg.Storage.ToS3Config()
require.NoError(t, err, "reading s3 config from storage", clues.ToCore(err))
readS3Cfg := sc.(*storage.S3Config)
assert.Equal(t, readS3Cfg.Bucket, s3Cfg.Bucket)
assert.Equal(t, readS3Cfg.Endpoint, s3Cfg.Endpoint)
assert.Equal(t, readS3Cfg.Prefix, s3Cfg.Prefix)
@ -482,11 +485,9 @@ func (suite *ConfigIntegrationSuite) TestGetStorageAndAccount_noFileOnlyOverride
cfg, err := getStorageAndAccountWithViper(vpr, storage.ProviderS3, false, true, overrides)
require.NoError(t, err, "getting storage and account from config", clues.ToCore(err))
sc, err := cfg.Storage.StorageConfig()
readS3Cfg, err := cfg.Storage.ToS3Config()
require.NoError(t, err, "reading s3 config from storage", clues.ToCore(err))
readS3Cfg := sc.(*storage.S3Config)
assert.Equal(t, readS3Cfg.Bucket, bkt)
assert.Equal(t, cfg.RepoID, "")
assert.Equal(t, readS3Cfg.Endpoint, end)

View File

@ -27,11 +27,11 @@ var exportCommands = []func(cmd *cobra.Command) *cobra.Command{
// AddCommands attaches all `corso export * *` commands to the parent.
func AddCommands(cmd *cobra.Command) {
subCommand := exportCmd()
flags.AddAllStorageFlags(subCommand)
cmd.AddCommand(subCommand)
for _, addExportTo := range exportCommands {
addExportTo(subCommand)
sc := addExportTo(subCommand)
flags.AddAllStorageFlags(sc)
}
}

View File

@ -1,17 +1,15 @@
package export
import (
"bytes"
"testing"
"github.com/alcionai/clues"
"github.com/spf13/cobra"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/cli/flags"
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
cliTD "github.com/alcionai/corso/src/cli/testdata"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/tester"
)
@ -39,55 +37,41 @@ func (suite *GroupsUnitSuite) TestAddGroupsCommands() {
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
parent := &cobra.Command{Use: exportCommand}
cmd := &cobra.Command{Use: test.use}
// persistent flags not added by addCommands
flags.AddRunModeFlag(cmd, true)
c := addGroupsCommands(cmd)
require.NotNil(t, c)
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
cmds := cmd.Commands()
require.Len(t, cmds, 1)
child := cmds[0]
assert.Equal(t, test.expectUse, child.Use)
assert.Equal(t, test.expectShort, child.Short)
tester.AreSameFunc(t, test.expectRunE, child.RunE)
flagsTD.WithFlags(
cmd,
groupsServiceCommand,
[]string{
flagsTD.RestoreDestination,
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.FormatFN, flagsTD.FormatType,
// bool flags
"--" + flags.ArchiveFN,
cmd := cliTD.SetUpCmdHasFlags(
t,
parent,
addGroupsCommands,
[]cliTD.UseCobraCommandFn{
flags.AddAllProviderFlags,
flags.AddAllStorageFlags,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags())
flagsTD.WithFlags(
groupsServiceCommand,
[]string{
flagsTD.RestoreDestination,
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.FormatFN, flagsTD.FormatType,
"--" + flags.ArchiveFN,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
cliTD.CheckCmdChild(
t,
parent,
3,
test.expectUse,
test.expectShort,
test.expectRunE)
opts := utils.MakeGroupsOpts(cmd)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.Equal(t, flagsTD.Archive, opts.ExportCfg.Archive)
assert.Equal(t, flagsTD.FormatType, opts.ExportCfg.Format)
flagsTD.AssertStorageFlags(t, cmd)
})
}

View File

@ -1,17 +1,15 @@
package export
import (
"bytes"
"testing"
"github.com/alcionai/clues"
"github.com/spf13/cobra"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/cli/flags"
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
cliTD "github.com/alcionai/corso/src/cli/testdata"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/tester"
)
@ -39,67 +37,55 @@ func (suite *OneDriveUnitSuite) TestAddOneDriveCommands() {
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
parent := &cobra.Command{Use: exportCommand}
cmd := &cobra.Command{Use: test.use}
// persistent flags not added by addCommands
flags.AddRunModeFlag(cmd, true)
c := addOneDriveCommands(cmd)
require.NotNil(t, c)
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
cmds := cmd.Commands()
require.Len(t, cmds, 1)
child := cmds[0]
assert.Equal(t, test.expectUse, child.Use)
assert.Equal(t, test.expectShort, child.Short)
tester.AreSameFunc(t, test.expectRunE, child.RunE)
flagsTD.WithFlags(
cmd,
oneDriveServiceCommand,
[]string{
flagsTD.RestoreDestination,
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
"--" + flags.FormatFN, flagsTD.FormatType,
// bool flags
"--" + flags.ArchiveFN,
cmd := cliTD.SetUpCmdHasFlags(
t,
parent,
addOneDriveCommands,
[]cliTD.UseCobraCommandFn{
flags.AddAllProviderFlags,
flags.AddAllStorageFlags,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags())
flagsTD.WithFlags(
oneDriveServiceCommand,
[]string{
flagsTD.RestoreDestination,
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
"--" + flags.FormatFN, flagsTD.FormatType,
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
// bool flags
"--" + flags.ArchiveFN,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
cliTD.CheckCmdChild(
t,
parent,
3,
test.expectUse,
test.expectShort,
test.expectRunE)
opts := utils.MakeOneDriveOpts(cmd)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName)
assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath)
assert.Equal(t, flagsTD.FileCreatedAfterInput, opts.FileCreatedAfter)
assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore)
assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter)
assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore)
assert.Equal(t, flagsTD.CorsoPassphrase, flags.CorsoPassphraseFV)
flagsTD.AssertStorageFlags(t, cmd)
})
}

View File

@ -1,17 +1,15 @@
package export
import (
"bytes"
"testing"
"github.com/alcionai/clues"
"github.com/spf13/cobra"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/cli/flags"
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
cliTD "github.com/alcionai/corso/src/cli/testdata"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/tester"
)
@ -39,63 +37,50 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
parent := &cobra.Command{Use: exportCommand}
cmd := &cobra.Command{Use: test.use}
// persistent flags not added by addCommands
flags.AddRunModeFlag(cmd, true)
c := addSharePointCommands(cmd)
require.NotNil(t, c)
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
cmds := cmd.Commands()
require.Len(t, cmds, 1)
child := cmds[0]
assert.Equal(t, test.expectUse, child.Use)
assert.Equal(t, test.expectShort, child.Short)
tester.AreSameFunc(t, test.expectRunE, child.RunE)
flagsTD.WithFlags(
cmd,
sharePointServiceCommand,
[]string{
flagsTD.RestoreDestination,
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.LibraryFN, flagsTD.LibraryInput,
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
"--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput),
"--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput),
"--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput),
"--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput),
"--" + flags.FormatFN, flagsTD.FormatType,
// bool flags
"--" + flags.ArchiveFN,
cmd := cliTD.SetUpCmdHasFlags(
t,
parent,
addSharePointCommands,
[]cliTD.UseCobraCommandFn{
flags.AddAllProviderFlags,
flags.AddAllStorageFlags,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags())
flagsTD.WithFlags(
sharePointServiceCommand,
[]string{
flagsTD.RestoreDestination,
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.LibraryFN, flagsTD.LibraryInput,
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
"--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput),
"--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput),
"--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput),
"--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput),
"--" + flags.FormatFN, flagsTD.FormatType,
"--" + flags.ArchiveFN,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
cliTD.CheckCmdChild(
t,
parent,
3,
test.expectUse,
test.expectShort,
test.expectRunE)
opts := utils.MakeSharePointOpts(cmd)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.Equal(t, flagsTD.LibraryInput, opts.Library)
assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName)
assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath)
@ -103,16 +88,12 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore)
assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter)
assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore)
assert.ElementsMatch(t, flagsTD.ListItemInput, opts.ListItem)
assert.ElementsMatch(t, flagsTD.ListFolderInput, opts.ListFolder)
assert.ElementsMatch(t, flagsTD.PageInput, opts.Page)
assert.ElementsMatch(t, flagsTD.PageFolderInput, opts.PageFolder)
assert.Equal(t, flagsTD.Archive, opts.ExportCfg.Archive)
assert.Equal(t, flagsTD.FormatType, opts.ExportCfg.Format)
flagsTD.AssertStorageFlags(t, cmd)
})
}

View File

@ -3,9 +3,10 @@ package testdata
import (
"testing"
"github.com/alcionai/corso/src/cli/flags"
"github.com/spf13/cobra"
"gotest.tools/v3/assert"
"github.com/alcionai/corso/src/cli/flags"
)
func PreparedBackupListFlags() []string {

View File

@ -86,7 +86,7 @@ var (
DisableConcurrencyLimiter = true
)
func WithFlags(
func WithFlags2(
cc *cobra.Command,
command string,
flagSets ...[]string,
@ -99,3 +99,18 @@ func WithFlags(
cc.SetArgs(args)
}
func WithFlags(
command string,
flagSets ...[]string,
) func(*cobra.Command) {
return func(cc *cobra.Command) {
args := []string{command}
for _, sl := range flagSets {
args = append(args, sl...)
}
cc.SetArgs(args)
}
}

View File

@ -85,7 +85,7 @@ func initFilesystemCmd(cmd *cobra.Command, args []string) error {
opt := utils.ControlWithConfig(cfg)
// Retention is not supported for filesystem repos.
retention := ctrlRepo.Retention{}
retentionOpts := ctrlRepo.Retention{}
// SendStartCorsoEvent uses distict ID as tenant ID because repoID is still not generated
utils.SendStartCorsoEvent(
@ -96,13 +96,11 @@ func initFilesystemCmd(cmd *cobra.Command, args []string) error {
cfg.Account.ID(),
opt)
sc, err := cfg.Storage.StorageConfig()
storageCfg, err := cfg.Storage.ToFilesystemConfig()
if err != nil {
return Only(ctx, clues.Wrap(err, "Retrieving filesystem configuration"))
}
storageCfg := sc.(*storage.FilesystemConfig)
m365, err := cfg.Account.M365Config()
if err != nil {
return Only(ctx, clues.Wrap(err, "Failed to parse m365 account config"))
@ -118,19 +116,27 @@ func initFilesystemCmd(cmd *cobra.Command, args []string) error {
return Only(ctx, clues.Wrap(err, "Failed to construct the repository controller"))
}
if err = r.Initialize(ctx, retention); err != nil {
ric := repository.InitConfig{RetentionOpts: retentionOpts}
if err = r.Initialize(ctx, ric); err != nil {
if flags.SucceedIfExistsFV && errors.Is(err, repository.ErrorRepoAlreadyExists) {
return nil
}
return Only(ctx, clues.Wrap(err, "Failed to initialize a new filesystem repository"))
return Only(ctx, clues.Stack(ErrInitializingRepo, err))
}
defer utils.CloseRepo(ctx, r)
Infof(ctx, "Initialized a repository at path %s", storageCfg.Path)
if err = config.WriteRepoConfig(ctx, sc, m365, opt.Repo, r.GetID()); err != nil {
err = config.WriteRepoConfig(
ctx,
storageCfg,
m365,
opt.Repo,
r.GetID())
if err != nil {
return Only(ctx, clues.Wrap(err, "Failed to write repository configuration"))
}
@ -181,13 +187,11 @@ func connectFilesystemCmd(cmd *cobra.Command, args []string) error {
repoID = events.RepoIDNotFound
}
sc, err := cfg.Storage.StorageConfig()
storageCfg, err := cfg.Storage.ToFilesystemConfig()
if err != nil {
return Only(ctx, clues.Wrap(err, "Retrieving filesystem configuration"))
}
storageCfg := sc.(*storage.FilesystemConfig)
m365, err := cfg.Account.M365Config()
if err != nil {
return Only(ctx, clues.Wrap(err, "Failed to parse m365 account config"))
@ -205,15 +209,21 @@ func connectFilesystemCmd(cmd *cobra.Command, args []string) error {
return Only(ctx, clues.Wrap(err, "Failed to create a repository controller"))
}
if err := r.Connect(ctx); err != nil {
return Only(ctx, clues.Wrap(err, "Failed to connect to the filesystem repository"))
if err := r.Connect(ctx, repository.ConnConfig{}); err != nil {
return Only(ctx, clues.Stack(ErrConnectingRepo, err))
}
defer utils.CloseRepo(ctx, r)
Infof(ctx, "Connected to repository at path %s", storageCfg.Path)
if err = config.WriteRepoConfig(ctx, sc, m365, opts.Repo, r.GetID()); err != nil {
err = config.WriteRepoConfig(
ctx,
storageCfg,
m365,
opts.Repo,
r.GetID())
if err != nil {
return Only(ctx, clues.Wrap(err, "Failed to write repository configuration"))
}

View File

@ -16,7 +16,6 @@ import (
"github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/control"
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
"github.com/alcionai/corso/src/pkg/repository"
"github.com/alcionai/corso/src/pkg/storage"
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
@ -56,9 +55,8 @@ func (suite *FilesystemE2ESuite) TestInitFilesystemCmd() {
st := storeTD.NewFilesystemStorage(t)
sc, err := st.StorageConfig()
cfg, err := st.ToFilesystemConfig()
require.NoError(t, err, clues.ToCore(err))
cfg := sc.(*storage.FilesystemConfig)
force := map[string]string{
tconfig.TestCfgStorageProvider: storage.ProviderFilesystem.String(),
@ -113,9 +111,8 @@ func (suite *FilesystemE2ESuite) TestConnectFilesystemCmd() {
defer flush()
st := storeTD.NewFilesystemStorage(t)
sc, err := st.StorageConfig()
cfg, err := st.ToFilesystemConfig()
require.NoError(t, err, clues.ToCore(err))
cfg := sc.(*storage.FilesystemConfig)
force := map[string]string{
tconfig.TestCfgAccountProvider: account.ProviderM365.String(),
@ -134,13 +131,13 @@ func (suite *FilesystemE2ESuite) TestConnectFilesystemCmd() {
// init the repo first
r, err := repository.New(
ctx,
account.Account{},
tconfig.NewM365Account(t),
st,
control.DefaultOptions(),
repository.NewRepoID)
require.NoError(t, err, clues.ToCore(err))
err = r.Initialize(ctx, ctrlRepo.Retention{})
err = r.Initialize(ctx, repository.InitConfig{})
require.NoError(t, err, clues.ToCore(err))
// then test it

View File

@ -21,6 +21,11 @@ const (
maintenanceCommand = "maintenance"
)
var (
ErrConnectingRepo = clues.New("connecting repository")
ErrInitializingRepo = clues.New("initializing repository")
)
var repoCommands = []func(cmd *cobra.Command) *cobra.Command{
addS3Commands,
addFilesystemCommands,

View File

@ -116,13 +116,11 @@ func initS3Cmd(cmd *cobra.Command, args []string) error {
cfg.Account.ID(),
opt)
sc, err := cfg.Storage.StorageConfig()
s3Cfg, err := cfg.Storage.ToS3Config()
if err != nil {
return Only(ctx, clues.Wrap(err, "Retrieving s3 configuration"))
}
s3Cfg := sc.(*storage.S3Config)
if strings.HasPrefix(s3Cfg.Endpoint, "http://") || strings.HasPrefix(s3Cfg.Endpoint, "https://") {
invalidEndpointErr := "endpoint doesn't support specifying protocol. " +
"pass --disable-tls flag to use http:// instead of default https://"
@ -145,12 +143,14 @@ func initS3Cmd(cmd *cobra.Command, args []string) error {
return Only(ctx, clues.Wrap(err, "Failed to construct the repository controller"))
}
if err = r.Initialize(ctx, retentionOpts); err != nil {
ric := repository.InitConfig{RetentionOpts: retentionOpts}
if err = r.Initialize(ctx, ric); err != nil {
if flags.SucceedIfExistsFV && errors.Is(err, repository.ErrorRepoAlreadyExists) {
return nil
}
return Only(ctx, clues.Wrap(err, "Failed to initialize a new S3 repository"))
return Only(ctx, clues.Stack(ErrInitializingRepo, err))
}
defer utils.CloseRepo(ctx, r)
@ -199,13 +199,11 @@ func connectS3Cmd(cmd *cobra.Command, args []string) error {
repoID = events.RepoIDNotFound
}
sc, err := cfg.Storage.StorageConfig()
s3Cfg, err := cfg.Storage.ToS3Config()
if err != nil {
return Only(ctx, clues.Wrap(err, "Retrieving s3 configuration"))
}
s3Cfg := sc.(*storage.S3Config)
m365, err := cfg.Account.M365Config()
if err != nil {
return Only(ctx, clues.Wrap(err, "Failed to parse m365 account config"))
@ -230,8 +228,8 @@ func connectS3Cmd(cmd *cobra.Command, args []string) error {
return Only(ctx, clues.Wrap(err, "Failed to create a repository controller"))
}
if err := r.Connect(ctx); err != nil {
return Only(ctx, clues.Wrap(err, "Failed to connect to the S3 repository"))
if err := r.Connect(ctx, repository.ConnConfig{}); err != nil {
return Only(ctx, clues.Stack(ErrConnectingRepo, err))
}
defer utils.CloseRepo(ctx, r)

View File

@ -8,15 +8,16 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"golang.org/x/exp/maps"
"github.com/alcionai/corso/src/cli"
"github.com/alcionai/corso/src/cli/config"
cliTD "github.com/alcionai/corso/src/cli/testdata"
"github.com/alcionai/corso/src/internal/common/str"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/control"
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
"github.com/alcionai/corso/src/pkg/repository"
"github.com/alcionai/corso/src/pkg/storage"
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
@ -64,9 +65,8 @@ func (suite *S3E2ESuite) TestInitS3Cmd() {
st := storeTD.NewPrefixedS3Storage(t)
sc, err := st.StorageConfig()
cfg, err := st.ToS3Config()
require.NoError(t, err, clues.ToCore(err))
cfg := sc.(*storage.S3Config)
vpr, configFP := tconfig.MakeTempTestConfigClone(t, nil)
if !test.hasConfigFile {
@ -102,10 +102,9 @@ func (suite *S3E2ESuite) TestInitMultipleTimes() {
defer flush()
st := storeTD.NewPrefixedS3Storage(t)
sc, err := st.StorageConfig()
require.NoError(t, err, clues.ToCore(err))
cfg := sc.(*storage.S3Config)
cfg, err := st.ToS3Config()
require.NoError(t, err, clues.ToCore(err))
vpr, configFP := tconfig.MakeTempTestConfigClone(t, nil)
@ -134,11 +133,9 @@ func (suite *S3E2ESuite) TestInitS3Cmd_missingBucket() {
st := storeTD.NewPrefixedS3Storage(t)
sc, err := st.StorageConfig()
cfg, err := st.ToS3Config()
require.NoError(t, err, clues.ToCore(err))
cfg := sc.(*storage.S3Config)
force := map[string]string{
tconfig.TestCfgBucket: "",
}
@ -189,9 +186,9 @@ func (suite *S3E2ESuite) TestConnectS3Cmd() {
defer flush()
st := storeTD.NewPrefixedS3Storage(t)
sc, err := st.StorageConfig()
cfg, err := st.ToS3Config()
require.NoError(t, err, clues.ToCore(err))
cfg := sc.(*storage.S3Config)
force := map[string]string{
tconfig.TestCfgAccountProvider: account.ProviderM365.String(),
@ -210,13 +207,13 @@ func (suite *S3E2ESuite) TestConnectS3Cmd() {
// init the repo first
r, err := repository.New(
ctx,
account.Account{},
tconfig.NewM365Account(t),
st,
control.DefaultOptions(),
repository.NewRepoID)
require.NoError(t, err, clues.ToCore(err))
err = r.Initialize(ctx, ctrlRepo.Retention{})
err = r.Initialize(ctx, repository.InitConfig{})
require.NoError(t, err, clues.ToCore(err))
// then test it
@ -234,60 +231,65 @@ func (suite *S3E2ESuite) TestConnectS3Cmd() {
}
}
func (suite *S3E2ESuite) TestConnectS3Cmd_BadBucket() {
t := suite.T()
ctx, flush := tester.NewContext(t)
func (suite *S3E2ESuite) TestConnectS3Cmd_badInputs() {
table := []struct {
name string
bucket string
prefix string
expectErr func(t *testing.T, err error)
}{
{
name: "bucket",
bucket: "wrong",
expectErr: func(t *testing.T, err error) {
assert.ErrorIs(t, err, storage.ErrVerifyingConfigStorage, clues.ToCore(err))
},
},
{
name: "prefix",
prefix: "wrong",
expectErr: func(t *testing.T, err error) {
assert.ErrorIs(t, err, storage.ErrVerifyingConfigStorage, clues.ToCore(err))
},
},
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
defer flush()
ctx, flush := tester.NewContext(t)
defer flush()
st := storeTD.NewPrefixedS3Storage(t)
sc, err := st.StorageConfig()
require.NoError(t, err, clues.ToCore(err))
st := storeTD.NewPrefixedS3Storage(t)
cfg, err := st.ToS3Config()
require.NoError(t, err, clues.ToCore(err))
cfg := sc.(*storage.S3Config)
bucket := str.First(test.bucket, cfg.Bucket)
prefix := str.First(test.prefix, cfg.Prefix)
vpr, configFP := tconfig.MakeTempTestConfigClone(t, nil)
over := map[string]string{}
acct := tconfig.NewM365Account(t)
ctx = config.SetViper(ctx, vpr)
maps.Copy(over, acct.Config)
over[account.AccountProviderTypeKey] = account.ProviderM365.String()
over[storage.StorageProviderTypeKey] = storage.ProviderS3.String()
cmd := cliTD.StubRootCmd(
"repo", "connect", "s3",
"--config-file", configFP,
"--bucket", "wrong",
"--prefix", cfg.Prefix)
cli.BuildCommandTree(cmd)
vpr, configFP := tconfig.MakeTempTestConfigClone(t, over)
ctx = config.SetViper(ctx, vpr)
// run the command
err = cmd.ExecuteContext(ctx)
require.Error(t, err, clues.ToCore(err))
}
cmd := cliTD.StubRootCmd(
"repo", "connect", "s3",
"--config-file", configFP,
"--bucket", bucket,
"--prefix", prefix)
cli.BuildCommandTree(cmd)
func (suite *S3E2ESuite) TestConnectS3Cmd_BadPrefix() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
st := storeTD.NewPrefixedS3Storage(t)
sc, err := st.StorageConfig()
require.NoError(t, err, clues.ToCore(err))
cfg := sc.(*storage.S3Config)
vpr, configFP := tconfig.MakeTempTestConfigClone(t, nil)
ctx = config.SetViper(ctx, vpr)
cmd := cliTD.StubRootCmd(
"repo", "connect", "s3",
"--config-file", configFP,
"--bucket", cfg.Bucket,
"--prefix", "wrong")
cli.BuildCommandTree(cmd)
// run the command
err = cmd.ExecuteContext(ctx)
require.Error(t, err, clues.ToCore(err))
// run the command
err = cmd.ExecuteContext(ctx)
require.Error(t, err, clues.ToCore(err))
test.expectErr(t, err)
})
}
}
func (suite *S3E2ESuite) TestUpdateS3Cmd() {

View File

@ -20,7 +20,6 @@ import (
"github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/control"
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/repository"
"github.com/alcionai/corso/src/pkg/selectors"
@ -66,11 +65,9 @@ func (suite *RestoreExchangeE2ESuite) SetupSuite() {
suite.acct = tconfig.NewM365Account(t)
suite.st = storeTD.NewPrefixedS3Storage(t)
sc, err := suite.st.StorageConfig()
cfg, err := suite.st.ToS3Config()
require.NoError(t, err, clues.ToCore(err))
cfg := sc.(*storage.S3Config)
force := map[string]string{
tconfig.TestCfgAccountProvider: account.ProviderM365.String(),
tconfig.TestCfgStorageProvider: storage.ProviderS3.String(),
@ -94,7 +91,7 @@ func (suite *RestoreExchangeE2ESuite) SetupSuite() {
repository.NewRepoID)
require.NoError(t, err, clues.ToCore(err))
err = suite.repo.Initialize(ctx, ctrlRepo.Retention{})
err = suite.repo.Initialize(ctx, repository.InitConfig{Service: path.ExchangeService})
require.NoError(t, err, clues.ToCore(err))
suite.backupOps = make(map[path.CategoryType]operations.BackupOperation)

View File

@ -1,17 +1,15 @@
package restore
import (
"bytes"
"testing"
"github.com/alcionai/clues"
"github.com/spf13/cobra"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/cli/flags"
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
cliTD "github.com/alcionai/corso/src/cli/testdata"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/tester"
)
@ -39,80 +37,64 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() {
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
parent := &cobra.Command{Use: restoreCommand}
cmd := &cobra.Command{Use: test.use}
// persistent flags not added by addCommands
flags.AddRunModeFlag(cmd, true)
c := addExchangeCommands(cmd)
require.NotNil(t, c)
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
cmds := cmd.Commands()
require.Len(t, cmds, 1)
child := cmds[0]
assert.Equal(t, test.expectUse, child.Use)
assert.Equal(t, test.expectShort, child.Short)
tester.AreSameFunc(t, test.expectRunE, child.RunE)
flagsTD.WithFlags(
cmd,
exchangeServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.ContactFN, flagsTD.FlgInputs(flagsTD.ContactInput),
"--" + flags.ContactFolderFN, flagsTD.FlgInputs(flagsTD.ContactFldInput),
"--" + flags.ContactNameFN, flagsTD.ContactNameInput,
"--" + flags.EmailFN, flagsTD.FlgInputs(flagsTD.EmailInput),
"--" + flags.EmailFolderFN, flagsTD.FlgInputs(flagsTD.EmailFldInput),
"--" + flags.EmailReceivedAfterFN, flagsTD.EmailReceivedAfterInput,
"--" + flags.EmailReceivedBeforeFN, flagsTD.EmailReceivedBeforeInput,
"--" + flags.EmailSenderFN, flagsTD.EmailSenderInput,
"--" + flags.EmailSubjectFN, flagsTD.EmailSubjectInput,
"--" + flags.EventFN, flagsTD.FlgInputs(flagsTD.EventInput),
"--" + flags.EventCalendarFN, flagsTD.FlgInputs(flagsTD.EventCalInput),
"--" + flags.EventOrganizerFN, flagsTD.EventOrganizerInput,
"--" + flags.EventRecursFN, flagsTD.EventRecursInput,
"--" + flags.EventStartsAfterFN, flagsTD.EventStartsAfterInput,
"--" + flags.EventStartsBeforeFN, flagsTD.EventStartsBeforeInput,
"--" + flags.EventSubjectFN, flagsTD.EventSubjectInput,
"--" + flags.CollisionsFN, flagsTD.Collisions,
"--" + flags.DestinationFN, flagsTD.Destination,
"--" + flags.ToResourceFN, flagsTD.ToResource,
cmd := cliTD.SetUpCmdHasFlags(
t,
parent,
addExchangeCommands,
[]cliTD.UseCobraCommandFn{
flags.AddAllProviderFlags,
flags.AddAllStorageFlags,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags())
flagsTD.WithFlags(
exchangeServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.ContactFN, flagsTD.FlgInputs(flagsTD.ContactInput),
"--" + flags.ContactFolderFN, flagsTD.FlgInputs(flagsTD.ContactFldInput),
"--" + flags.ContactNameFN, flagsTD.ContactNameInput,
"--" + flags.EmailFN, flagsTD.FlgInputs(flagsTD.EmailInput),
"--" + flags.EmailFolderFN, flagsTD.FlgInputs(flagsTD.EmailFldInput),
"--" + flags.EmailReceivedAfterFN, flagsTD.EmailReceivedAfterInput,
"--" + flags.EmailReceivedBeforeFN, flagsTD.EmailReceivedBeforeInput,
"--" + flags.EmailSenderFN, flagsTD.EmailSenderInput,
"--" + flags.EmailSubjectFN, flagsTD.EmailSubjectInput,
"--" + flags.EventFN, flagsTD.FlgInputs(flagsTD.EventInput),
"--" + flags.EventCalendarFN, flagsTD.FlgInputs(flagsTD.EventCalInput),
"--" + flags.EventOrganizerFN, flagsTD.EventOrganizerInput,
"--" + flags.EventRecursFN, flagsTD.EventRecursInput,
"--" + flags.EventStartsAfterFN, flagsTD.EventStartsAfterInput,
"--" + flags.EventStartsBeforeFN, flagsTD.EventStartsBeforeInput,
"--" + flags.EventSubjectFN, flagsTD.EventSubjectInput,
"--" + flags.CollisionsFN, flagsTD.Collisions,
"--" + flags.DestinationFN, flagsTD.Destination,
"--" + flags.ToResourceFN, flagsTD.ToResource,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
cliTD.CheckCmdChild(
t,
parent,
3,
test.expectUse,
test.expectShort,
test.expectRunE)
opts := utils.MakeExchangeOpts(cmd)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.ElementsMatch(t, flagsTD.ContactInput, opts.Contact)
assert.ElementsMatch(t, flagsTD.ContactFldInput, opts.ContactFolder)
assert.Equal(t, flagsTD.ContactNameInput, opts.ContactName)
assert.ElementsMatch(t, flagsTD.EmailInput, opts.Email)
assert.ElementsMatch(t, flagsTD.EmailFldInput, opts.EmailFolder)
assert.Equal(t, flagsTD.EmailReceivedAfterInput, opts.EmailReceivedAfter)
assert.Equal(t, flagsTD.EmailReceivedBeforeInput, opts.EmailReceivedBefore)
assert.Equal(t, flagsTD.EmailSenderInput, opts.EmailSender)
assert.Equal(t, flagsTD.EmailSubjectInput, opts.EmailSubject)
assert.ElementsMatch(t, flagsTD.EventInput, opts.Event)
assert.ElementsMatch(t, flagsTD.EventCalInput, opts.EventCalendar)
assert.Equal(t, flagsTD.EventOrganizerInput, opts.EventOrganizer)
@ -120,11 +102,9 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() {
assert.Equal(t, flagsTD.EventStartsAfterInput, opts.EventStartsAfter)
assert.Equal(t, flagsTD.EventStartsBeforeInput, opts.EventStartsBefore)
assert.Equal(t, flagsTD.EventSubjectInput, opts.EventSubject)
assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions)
assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination)
assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource)
flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd)
})

View File

@ -1,17 +1,15 @@
package restore
import (
"bytes"
"testing"
"github.com/alcionai/clues"
"github.com/spf13/cobra"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/cli/flags"
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
cliTD "github.com/alcionai/corso/src/cli/testdata"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/tester"
)
@ -39,65 +37,51 @@ func (suite *GroupsUnitSuite) TestAddGroupsCommands() {
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
parent := &cobra.Command{Use: restoreCommand}
cmd := &cobra.Command{Use: test.use}
// persistent flags not added by addCommands
flags.AddRunModeFlag(cmd, true)
c := addGroupsCommands(cmd)
require.NotNil(t, c)
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
cmds := cmd.Commands()
require.Len(t, cmds, 1)
child := cmds[0]
assert.Equal(t, test.expectUse, child.Use)
assert.Equal(t, test.expectShort, child.Short)
tester.AreSameFunc(t, test.expectRunE, child.RunE)
flagsTD.WithFlags(
cmd,
groupsServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.LibraryFN, flagsTD.LibraryInput,
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
"--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput),
"--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput),
"--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput),
"--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput),
"--" + flags.CollisionsFN, flagsTD.Collisions,
"--" + flags.DestinationFN, flagsTD.Destination,
"--" + flags.ToResourceFN, flagsTD.ToResource,
// bool flags
"--" + flags.NoPermissionsFN,
cmd := cliTD.SetUpCmdHasFlags(
t,
parent,
addGroupsCommands,
[]cliTD.UseCobraCommandFn{
flags.AddAllProviderFlags,
flags.AddAllStorageFlags,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags())
flagsTD.WithFlags(
groupsServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.LibraryFN, flagsTD.LibraryInput,
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
"--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput),
"--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput),
"--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput),
"--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput),
"--" + flags.CollisionsFN, flagsTD.Collisions,
"--" + flags.DestinationFN, flagsTD.Destination,
"--" + flags.ToResourceFN, flagsTD.ToResource,
"--" + flags.NoPermissionsFN,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
cliTD.CheckCmdChild(
t,
parent,
3,
test.expectUse,
test.expectShort,
test.expectRunE)
opts := utils.MakeGroupsOpts(cmd)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.Equal(t, flagsTD.LibraryInput, opts.Library)
assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName)
assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath)
@ -105,14 +89,10 @@ func (suite *GroupsUnitSuite) TestAddGroupsCommands() {
assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore)
assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter)
assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore)
assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions)
assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination)
assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource)
// bool flags
assert.True(t, flags.NoPermissionsFV)
flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd)
})

View File

@ -1,17 +1,15 @@
package restore
import (
"bytes"
"testing"
"github.com/alcionai/clues"
"github.com/spf13/cobra"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/cli/flags"
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
cliTD "github.com/alcionai/corso/src/cli/testdata"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/tester"
)
@ -39,73 +37,56 @@ func (suite *OneDriveUnitSuite) TestAddOneDriveCommands() {
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
parent := &cobra.Command{Use: restoreCommand}
cmd := &cobra.Command{Use: test.use}
// persistent flags not added by addCommands
flags.AddRunModeFlag(cmd, true)
c := addOneDriveCommands(cmd)
require.NotNil(t, c)
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
cmds := cmd.Commands()
require.Len(t, cmds, 1)
child := cmds[0]
assert.Equal(t, test.expectUse, child.Use)
assert.Equal(t, test.expectShort, child.Short)
tester.AreSameFunc(t, test.expectRunE, child.RunE)
flagsTD.WithFlags(
cmd,
oneDriveServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
"--" + flags.CollisionsFN, flagsTD.Collisions,
"--" + flags.DestinationFN, flagsTD.Destination,
"--" + flags.ToResourceFN, flagsTD.ToResource,
// bool flags
"--" + flags.NoPermissionsFN,
cmd := cliTD.SetUpCmdHasFlags(
t,
parent,
addOneDriveCommands,
[]cliTD.UseCobraCommandFn{
flags.AddAllProviderFlags,
flags.AddAllStorageFlags,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags())
flagsTD.WithFlags(
oneDriveServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
"--" + flags.CollisionsFN, flagsTD.Collisions,
"--" + flags.DestinationFN, flagsTD.Destination,
"--" + flags.ToResourceFN, flagsTD.ToResource,
"--" + flags.NoPermissionsFN,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
cliTD.CheckCmdChild(
t,
parent,
3,
test.expectUse,
test.expectShort,
test.expectRunE)
opts := utils.MakeOneDriveOpts(cmd)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName)
assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath)
assert.Equal(t, flagsTD.FileCreatedAfterInput, opts.FileCreatedAfter)
assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore)
assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter)
assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore)
assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions)
assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination)
assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource)
// bool flags
assert.True(t, flags.NoPermissionsFV)
flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd)
})

View File

@ -25,12 +25,12 @@ var restoreCommands = []func(cmd *cobra.Command) *cobra.Command{
// AddCommands attaches all `corso restore * *` commands to the parent.
func AddCommands(cmd *cobra.Command) {
subCommand := restoreCmd()
flags.AddAllProviderFlags(subCommand)
flags.AddAllStorageFlags(subCommand)
cmd.AddCommand(subCommand)
for _, addRestoreTo := range restoreCommands {
addRestoreTo(subCommand)
sc := addRestoreTo(subCommand)
flags.AddAllProviderFlags(sc)
flags.AddAllStorageFlags(sc)
}
}

View File

@ -1,17 +1,15 @@
package restore
import (
"bytes"
"testing"
"github.com/alcionai/clues"
"github.com/spf13/cobra"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/cli/flags"
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
cliTD "github.com/alcionai/corso/src/cli/testdata"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/tester"
)
@ -39,64 +37,51 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
parent := &cobra.Command{Use: restoreCommand}
cmd := &cobra.Command{Use: test.use}
// persistent flags not added by addCommands
flags.AddRunModeFlag(cmd, true)
c := addSharePointCommands(cmd)
require.NotNil(t, c)
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
cmds := cmd.Commands()
require.Len(t, cmds, 1)
child := cmds[0]
assert.Equal(t, test.expectUse, child.Use)
assert.Equal(t, test.expectShort, child.Short)
tester.AreSameFunc(t, test.expectRunE, child.RunE)
flagsTD.WithFlags(
cmd,
sharePointServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.LibraryFN, flagsTD.LibraryInput,
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
"--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput),
"--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput),
"--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput),
"--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput),
"--" + flags.CollisionsFN, flagsTD.Collisions,
"--" + flags.DestinationFN, flagsTD.Destination,
"--" + flags.ToResourceFN, flagsTD.ToResource,
// bool flags
"--" + flags.NoPermissionsFN,
cmd := cliTD.SetUpCmdHasFlags(
t,
parent,
addSharePointCommands,
[]cliTD.UseCobraCommandFn{
flags.AddAllProviderFlags,
flags.AddAllStorageFlags,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags())
flagsTD.WithFlags(
sharePointServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.LibraryFN, flagsTD.LibraryInput,
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
"--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput),
"--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput),
"--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput),
"--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput),
"--" + flags.CollisionsFN, flagsTD.Collisions,
"--" + flags.DestinationFN, flagsTD.Destination,
"--" + flags.ToResourceFN, flagsTD.ToResource,
"--" + flags.NoPermissionsFN,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
cliTD.CheckCmdChild(
t,
parent,
3,
test.expectUse,
test.expectShort,
test.expectRunE)
opts := utils.MakeSharePointOpts(cmd)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.Equal(t, flagsTD.LibraryInput, opts.Library)
assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName)
assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath)
@ -104,20 +89,14 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore)
assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter)
assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore)
assert.ElementsMatch(t, flagsTD.ListItemInput, opts.ListItem)
assert.ElementsMatch(t, flagsTD.ListFolderInput, opts.ListFolder)
assert.ElementsMatch(t, flagsTD.PageInput, opts.Page)
assert.ElementsMatch(t, flagsTD.PageFolderInput, opts.PageFolder)
assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions)
assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination)
assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource)
// bool flags
assert.True(t, flags.NoPermissionsFV)
flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd)
})

View File

@ -1,11 +1,20 @@
package testdata
import (
"bytes"
"fmt"
"strings"
"testing"
"time"
"github.com/alcionai/clues"
"github.com/google/uuid"
"github.com/spf13/cobra"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/alcionai/corso/src/cli/flags"
"github.com/alcionai/corso/src/internal/tester"
)
// StubRootCmd builds a stub cobra command to be used as
@ -27,3 +36,82 @@ func StubRootCmd(args ...string) *cobra.Command {
return c
}
type UseCobraCommandFn func(*cobra.Command)
func SetUpCmdHasFlags(
t *testing.T,
parentCmd *cobra.Command,
addChildCommand func(*cobra.Command) *cobra.Command,
addFlags []UseCobraCommandFn,
setArgs UseCobraCommandFn,
) *cobra.Command {
parentCmd.PersistentPreRun = func(c *cobra.Command, args []string) {
t.Log("testing args:")
for _, arg := range args {
t.Log(arg)
}
}
// persistent flags not added by addCommands
flags.AddRunModeFlag(parentCmd, true)
cmd := addChildCommand(parentCmd)
require.NotNil(t, cmd)
cul := cmd.UseLine()
require.Truef(
t,
strings.HasPrefix(cul, parentCmd.Use+" "+cmd.Use),
"child command has expected usage format 'parent child', got %q",
cul)
for _, af := range addFlags {
af(cmd)
}
setArgs(parentCmd)
parentCmd.SetOut(new(bytes.Buffer)) // drop output
parentCmd.SetErr(new(bytes.Buffer)) // drop output
err := parentCmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
return cmd
}
type CobraRunEFn func(cmd *cobra.Command, args []string) error
func CheckCmdChild(
t *testing.T,
cmd *cobra.Command,
expectChildCount int,
expectUse string,
expectShort string,
expectRunE CobraRunEFn,
) {
var (
cmds = cmd.Commands()
child *cobra.Command
)
for _, cc := range cmds {
if cc.Use == expectUse {
child = cc
break
}
}
require.Len(
t,
cmds,
expectChildCount,
"parent command should have the correct child command count")
require.NotNil(t, child, "should have found expected child command")
assert.Equal(t, expectShort, child.Short)
tester.AreSameFunc(t, expectRunE, child.RunE)
}

View File

@ -78,16 +78,10 @@ func GetAccountAndConnectWithOverrides(
return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "creating a repository controller")
}
if err := r.Connect(ctx); err != nil {
if err := r.Connect(ctx, repository.ConnConfig{Service: pst}); err != nil {
return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "connecting to the "+cfg.Storage.Provider.String()+" repository")
}
// this initializes our graph api client configurations,
// including control options such as concurency limitations.
if _, err := r.ConnectToM365(ctx, pst); err != nil {
return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "connecting to m365")
}
rdao := RepoDetailsAndOpts{
Repo: cfg,
Opts: opts,

View File

@ -72,7 +72,7 @@ func deleteBackups(
// Only supported for S3 repos currently.
func pitrListBackups(
ctx context.Context,
service path.ServiceType,
pst path.ServiceType,
pitr time.Time,
backupIDs []string,
) error {
@ -113,14 +113,14 @@ func pitrListBackups(
return clues.Wrap(err, "creating a repo")
}
err = r.Connect(ctx)
err = r.Connect(ctx, repository.ConnConfig{Service: pst})
if err != nil {
return clues.Wrap(err, "connecting to the repository")
}
defer r.Close(ctx)
backups, err := r.BackupsByTag(ctx, store.Service(service))
backups, err := r.BackupsByTag(ctx, store.Service(pst))
if err != nil {
return clues.Wrap(err, "listing backups").WithClues(ctx)
}

View File

@ -197,13 +197,11 @@ func handleCheckerCommand(cmd *cobra.Command, args []string, f flags) error {
return clues.Wrap(err, "getting storage config")
}
sc, err := repoDetails.Storage.StorageConfig()
cfg, err := repoDetails.Storage.ToS3Config()
if err != nil {
return clues.Wrap(err, "getting S3 config")
}
cfg := sc.(*storage.S3Config)
endpoint := defaultS3Endpoint
if len(cfg.Endpoint) > 0 {
endpoint = cfg.Endpoint

View File

@ -1,6 +1,68 @@
package common
import (
"context"
"fmt"
"os"
"strings"
"time"
"github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/credentials"
"github.com/alcionai/corso/src/pkg/logger"
"github.com/alcionai/corso/src/pkg/services/m365/api"
)
type PermissionInfo struct {
EntityID string
Roles []string
}
const (
sanityBaseBackup = "SANITY_BASE_BACKUP"
sanityTestData = "SANITY_TEST_DATA"
sanityTestFolder = "SANITY_TEST_FOLDER"
sanityTestService = "SANITY_TEST_SERVICE"
)
type Envs struct {
BaseBackupFolder string
DataFolder string
FolderName string
Service string
SiteID string
StartTime time.Time
UserID string
}
func EnvVars(ctx context.Context) Envs {
folder := strings.TrimSpace(os.Getenv(sanityTestFolder))
startTime, _ := MustGetTimeFromName(ctx, folder)
e := Envs{
BaseBackupFolder: os.Getenv(sanityBaseBackup),
DataFolder: os.Getenv(sanityTestData),
FolderName: folder,
SiteID: tconfig.GetM365SiteID(ctx),
Service: os.Getenv(sanityTestService),
StartTime: startTime,
UserID: tconfig.GetM365UserID(ctx),
}
fmt.Printf("\n-----\nenvs %+v\n-----\n", e)
logger.Ctx(ctx).Info("envs", e)
return e
}
func GetAC() (api.Client, error) {
creds := account.M365Config{
M365: credentials.GetM365(),
AzureTenantID: os.Getenv(account.AzureTenantID),
}
return api.NewClient(creds, control.DefaultOptions())
}

View File

@ -0,0 +1,38 @@
package common
import (
"os"
"path/filepath"
"time"
"github.com/alcionai/clues"
)
func FilepathWalker(
folderName string,
exportFileSizes map[string]int64,
startTime time.Time,
) filepath.WalkFunc {
return func(path string, info os.FileInfo, err error) error {
if err != nil {
return clues.Stack(err)
}
if info.IsDir() {
return nil
}
relPath, err := filepath.Rel(folderName, path)
if err != nil {
return clues.Stack(err)
}
exportFileSizes[relPath] = info.Size()
if startTime.After(info.ModTime()) {
startTime = info.ModTime()
}
return nil
}
}

View File

@ -0,0 +1,69 @@
package common
import (
"context"
"golang.org/x/exp/maps"
)
// Sanitree is used to build out a hierarchical tree of items
// for comparison against each other. Primarily so that a restore
// can compare two subtrees easily.
type Sanitree[T any] struct {
Container T
ContainerID string
ContainerName string
// non-containers only
ContainsItems int
// name -> node
Children map[string]*Sanitree[T]
}
func AssertEqualTrees[T any](
ctx context.Context,
expect, other *Sanitree[T],
) {
if expect == nil && other == nil {
return
}
Assert(
ctx,
func() bool { return expect != nil && other != nil },
"non nil nodes",
expect,
other)
Assert(
ctx,
func() bool { return expect.ContainerName == other.ContainerName },
"container names match",
expect.ContainerName,
other.ContainerName)
Assert(
ctx,
func() bool { return expect.ContainsItems == other.ContainsItems },
"count of items in container matches",
expect.ContainsItems,
other.ContainsItems)
Assert(
ctx,
func() bool { return len(expect.Children) == len(other.Children) },
"count of child containers matches",
len(expect.Children),
len(other.Children))
for name, s := range expect.Children {
ch, ok := other.Children[name]
Assert(
ctx,
func() bool { return ok },
"found matching child container",
name,
maps.Keys(other.Children))
AssertEqualTrees(ctx, s, ch)
}
}

View File

@ -22,7 +22,7 @@ func Assert(
return
}
header = "Error: " + header
header = "TEST FAILURE: " + header
expected := fmt.Sprintf("* Expected: %+v", expect)
got := fmt.Sprintf("* Current: %+v", current)
@ -37,7 +37,7 @@ func Assert(
func Fatal(ctx context.Context, msg string, err error) {
logger.CtxErr(ctx, err).Error("test failure: " + msg)
fmt.Println(msg+": ", err)
fmt.Println("TEST FAILURE: "+msg+": ", err)
os.Exit(1)
}

View File

@ -0,0 +1,16 @@
package export
import (
"context"
"github.com/alcionai/corso/src/cmd/sanity_test/common"
"github.com/alcionai/corso/src/pkg/services/m365/api"
)
func CheckGroupsExport(
ctx context.Context,
ac api.Client,
envs common.Envs,
) {
// TODO
}

View File

@ -3,28 +3,21 @@ package export
import (
"context"
"fmt"
"os"
"path/filepath"
"time"
"github.com/alcionai/clues"
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
"github.com/alcionai/corso/src/cmd/sanity_test/common"
"github.com/alcionai/corso/src/cmd/sanity_test/restore"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/pkg/services/m365/api"
)
func CheckOneDriveExport(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
userID, folderName, dataFolder string,
ac api.Client,
envs common.Envs,
) {
drive, err := client.
Users().
ByUserId(userID).
Drive().
Get(ctx, nil)
drive, err := ac.Users().GetDefaultDrive(ctx, envs.UserID)
if err != nil {
common.Fatal(ctx, "getting the drive:", err)
}
@ -36,37 +29,19 @@ func CheckOneDriveExport(
startTime = time.Now()
)
err = filepath.Walk(folderName, func(path string, info os.FileInfo, err error) error {
if err != nil {
return clues.Stack(err)
}
if info.IsDir() {
return nil
}
relPath, err := filepath.Rel(folderName, path)
if err != nil {
return clues.Stack(err)
}
exportFileSizes[relPath] = info.Size()
if startTime.After(info.ModTime()) {
startTime = info.ModTime()
}
return nil
})
err = filepath.Walk(
envs.FolderName,
common.FilepathWalker(envs.FolderName, exportFileSizes, startTime))
if err != nil {
fmt.Println("Error walking the path:", err)
}
_ = restore.PopulateDriveDetails(
ctx,
client,
ac,
ptr.Val(drive.GetId()),
folderName,
dataFolder,
envs.FolderName,
envs.DataFolder,
fileSizes,
map[string][]common.PermissionInfo{},
startTime)

View File

@ -3,28 +3,21 @@ package export
import (
"context"
"fmt"
"os"
"path/filepath"
"time"
"github.com/alcionai/clues"
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
"github.com/alcionai/corso/src/cmd/sanity_test/common"
"github.com/alcionai/corso/src/cmd/sanity_test/restore"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/pkg/services/m365/api"
)
func CheckSharePointExport(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
siteID, folderName, dataFolder string,
ac api.Client,
envs common.Envs,
) {
drive, err := client.
Sites().
BySiteId(siteID).
Drive().
Get(ctx, nil)
drive, err := ac.Sites().GetDefaultDrive(ctx, envs.SiteID)
if err != nil {
common.Fatal(ctx, "getting the drive:", err)
}
@ -36,37 +29,19 @@ func CheckSharePointExport(
startTime = time.Now()
)
err = filepath.Walk(folderName, func(path string, info os.FileInfo, err error) error {
if err != nil {
return clues.Stack(err)
}
if info.IsDir() {
return nil
}
relPath, err := filepath.Rel(folderName, path)
if err != nil {
return clues.Stack(err)
}
exportFileSizes[relPath] = info.Size()
if startTime.After(info.ModTime()) {
startTime = info.ModTime()
}
return nil
})
err = filepath.Walk(
envs.FolderName,
common.FilepathWalker(envs.FolderName, exportFileSizes, startTime))
if err != nil {
fmt.Println("Error walking the path:", err)
}
_ = restore.PopulateDriveDetails(
ctx,
client,
ac,
ptr.Val(drive.GetId()),
folderName,
dataFolder,
envs.FolderName,
envs.DataFolder,
fileSizes,
map[string][]common.PermissionInfo{},
startTime)

View File

@ -3,99 +3,43 @@ package restore
import (
"context"
"fmt"
stdpath "path"
"strings"
"time"
"github.com/alcionai/clues"
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
"github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/microsoftgraph/msgraph-sdk-go/users"
"github.com/alcionai/corso/src/cmd/sanity_test/common"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/pkg/filters"
"github.com/alcionai/corso/src/pkg/services/m365/api"
)
// CheckEmailRestoration verifies that the emails count in restored folder is equivalent to
// emails in actual m365 account
func CheckEmailRestoration(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
testUser, folderName, dataFolder, baseBackupFolder string,
startTime time.Time,
ac api.Client,
envs common.Envs,
) {
var (
restoreFolder models.MailFolderable
itemCount = make(map[string]int32)
restoreItemCount = make(map[string]int32)
builder = client.Users().ByUserId(testUser).MailFolders()
folderNameToItemCount = make(map[string]int32)
folderNameToRestoreItemCount = make(map[string]int32)
)
for {
result, err := builder.Get(ctx, nil)
if err != nil {
common.Fatal(ctx, "getting mail folders", err)
}
restoredTree := buildSanitree(ctx, ac, envs.UserID, envs.FolderName)
dataTree := buildSanitree(ctx, ac, envs.UserID, envs.DataFolder)
values := result.GetValue()
for _, v := range values {
itemName := ptr.Val(v.GetDisplayName())
if itemName == folderName {
restoreFolder = v
continue
}
if itemName == dataFolder || itemName == baseBackupFolder {
// otherwise, recursively aggregate all child folders.
getAllMailSubFolders(ctx, client, testUser, v, itemName, dataFolder, itemCount)
itemCount[itemName] = ptr.Val(v.GetTotalItemCount())
}
}
link, ok := ptr.ValOK(result.GetOdataNextLink())
if !ok {
break
}
builder = users.NewItemMailFoldersRequestBuilder(link, client.GetAdapter())
}
folderID := ptr.Val(restoreFolder.GetId())
folderName = ptr.Val(restoreFolder.GetDisplayName())
ctx = clues.Add(
ctx,
"restore_folder_id", folderID,
"restore_folder_name", folderName)
"restore_folder_id", restoredTree.ContainerID,
"restore_folder_name", restoredTree.ContainerName,
"original_folder_id", dataTree.ContainerID,
"original_folder_name", dataTree.ContainerName)
childFolder, err := client.
Users().
ByUserId(testUser).
MailFolders().
ByMailFolderId(folderID).
ChildFolders().
Get(ctx, nil)
if err != nil {
common.Fatal(ctx, "getting restore folder child folders", err)
}
verifyEmailData(ctx, folderNameToRestoreItemCount, folderNameToItemCount)
for _, fld := range childFolder.GetValue() {
restoreDisplayName := ptr.Val(fld.GetDisplayName())
// check if folder is the data folder we loaded or the base backup to verify
// the incremental backup worked fine
if strings.EqualFold(restoreDisplayName, dataFolder) || strings.EqualFold(restoreDisplayName, baseBackupFolder) {
count, _ := ptr.ValOK(fld.GetTotalItemCount())
restoreItemCount[restoreDisplayName] = count
checkAllSubFolder(ctx, client, fld, testUser, restoreDisplayName, dataFolder, restoreItemCount)
}
}
verifyEmailData(ctx, restoreItemCount, itemCount)
common.AssertEqualTrees[models.MailFolderable](
ctx,
dataTree,
restoredTree.Children[envs.DataFolder])
}
func verifyEmailData(ctx context.Context, restoreMessageCount, messageCount map[string]int32) {
@ -111,109 +55,71 @@ func verifyEmailData(ctx context.Context, restoreMessageCount, messageCount map[
}
}
// getAllSubFolder will recursively check for all subfolders and get the corresponding
// email count.
func getAllMailSubFolders(
func buildSanitree(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
testUser string,
r models.MailFolderable,
parentFolder,
dataFolder string,
messageCount map[string]int32,
) {
var (
folderID = ptr.Val(r.GetId())
count int32 = 99
options = &users.ItemMailFoldersItemChildFoldersRequestBuilderGetRequestConfiguration{
QueryParameters: &users.ItemMailFoldersItemChildFoldersRequestBuilderGetQueryParameters{
Top: &count,
},
}
)
ctx = clues.Add(ctx, "parent_folder_id", folderID)
childFolder, err := client.
Users().
ByUserId(testUser).
MailFolders().
ByMailFolderId(folderID).
ChildFolders().
Get(ctx, options)
ac api.Client,
userID, folderName string,
) *common.Sanitree[models.MailFolderable] {
gcc, err := ac.Mail().GetContainerByName(
ctx,
userID,
api.MsgFolderRoot,
folderName)
if err != nil {
common.Fatal(ctx, "getting mail subfolders", err)
common.Fatal(
ctx,
fmt.Sprintf("finding folder by name %q", folderName),
err)
}
for _, child := range childFolder.GetValue() {
var (
childDisplayName = ptr.Val(child.GetDisplayName())
childFolderCount = ptr.Val(child.GetChildFolderCount())
//nolint:forbidigo
fullFolderName = stdpath.Join(parentFolder, childDisplayName)
)
mmf, ok := gcc.(models.MailFolderable)
if !ok {
common.Fatal(
ctx,
"mail folderable required",
clues.New("casting "+*gcc.GetDisplayName()+" to models.MailFolderable"))
}
if filters.PathContains([]string{dataFolder}).Compare(fullFolderName) {
messageCount[fullFolderName] = ptr.Val(child.GetTotalItemCount())
// recursively check for subfolders
if childFolderCount > 0 {
parentFolder := fullFolderName
root := &common.Sanitree[models.MailFolderable]{
Container: mmf,
ContainerID: ptr.Val(mmf.GetId()),
ContainerName: ptr.Val(mmf.GetDisplayName()),
ContainsItems: int(ptr.Val(mmf.GetTotalItemCount())),
Children: map[string]*common.Sanitree[models.MailFolderable]{},
}
getAllMailSubFolders(ctx, client, testUser, child, parentFolder, dataFolder, messageCount)
}
}
}
}
// checkAllSubFolder will recursively traverse inside the restore folder and
// verify that data matched in all subfolders
func checkAllSubFolder(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
r models.MailFolderable,
testUser,
parentFolder,
dataFolder string,
restoreMessageCount map[string]int32,
) {
var (
folderID = ptr.Val(r.GetId())
count int32 = 99
options = &users.ItemMailFoldersItemChildFoldersRequestBuilderGetRequestConfiguration{
QueryParameters: &users.ItemMailFoldersItemChildFoldersRequestBuilderGetQueryParameters{
Top: &count,
},
}
)
childFolder, err := client.
Users().
ByUserId(testUser).
MailFolders().
ByMailFolderId(folderID).
ChildFolders().
Get(ctx, options)
if err != nil {
common.Fatal(ctx, "getting mail subfolders", err)
}
for _, child := range childFolder.GetValue() {
var (
childDisplayName = ptr.Val(child.GetDisplayName())
//nolint:forbidigo
fullFolderName = stdpath.Join(parentFolder, childDisplayName)
)
if filters.PathContains([]string{dataFolder}).Compare(fullFolderName) {
childTotalCount, _ := ptr.ValOK(child.GetTotalItemCount())
restoreMessageCount[fullFolderName] = childTotalCount
}
childFolderCount := ptr.Val(child.GetChildFolderCount())
if childFolderCount > 0 {
parentFolder := fullFolderName
checkAllSubFolder(ctx, client, child, testUser, parentFolder, dataFolder, restoreMessageCount)
recurseSubfolders(ctx, ac, root, userID)
return root
}
func recurseSubfolders(
ctx context.Context,
ac api.Client,
parent *common.Sanitree[models.MailFolderable],
userID string,
) {
childFolders, err := ac.Mail().GetContainerChildren(
ctx,
userID,
parent.ContainerID)
if err != nil {
common.Fatal(ctx, "getting subfolders", err)
}
for _, child := range childFolders {
c := &common.Sanitree[models.MailFolderable]{
Container: child,
ContainerID: ptr.Val(child.GetId()),
ContainerName: ptr.Val(child.GetDisplayName()),
ContainsItems: int(ptr.Val(child.GetTotalItemCount())),
Children: map[string]*common.Sanitree[models.MailFolderable]{},
}
parent.Children[c.ContainerName] = c
if ptr.Val(child.GetChildFolderCount()) > 0 {
recurseSubfolders(ctx, ac, c, userID)
}
}
}

View File

@ -0,0 +1,16 @@
package restore
import (
"context"
"github.com/alcionai/corso/src/cmd/sanity_test/common"
"github.com/alcionai/corso/src/pkg/services/m365/api"
)
func CheckGroupsRestoration(
ctx context.Context,
ac api.Client,
envs common.Envs,
) {
// TODO
}

View File

@ -7,12 +7,12 @@ import (
"time"
"github.com/alcionai/clues"
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
"golang.org/x/exp/slices"
"github.com/alcionai/corso/src/cmd/sanity_test/common"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/services/m365/api"
)
const (
@ -21,34 +21,29 @@ const (
func CheckOneDriveRestoration(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
userID, folderName, dataFolder string,
startTime time.Time,
ac api.Client,
envs common.Envs,
) {
drive, err := client.
Users().
ByUserId(userID).
Drive().
Get(ctx, nil)
drive, err := ac.Users().GetDefaultDrive(ctx, envs.UserID)
if err != nil {
common.Fatal(ctx, "getting the drive:", err)
}
checkDriveRestoration(
ctx,
client,
ac,
path.OneDriveService,
folderName,
envs.FolderName,
ptr.Val(drive.GetId()),
ptr.Val(drive.GetName()),
dataFolder,
startTime,
envs.DataFolder,
envs.StartTime,
false)
}
func checkDriveRestoration(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
ac api.Client,
service path.ServiceType,
folderName,
driveID,
@ -70,7 +65,7 @@ func checkDriveRestoration(
restoreFolderID := PopulateDriveDetails(
ctx,
client,
ac,
driveID,
folderName,
dataFolder,
@ -78,7 +73,14 @@ func checkDriveRestoration(
folderPermissions,
startTime)
getRestoredDrive(ctx, client, driveID, restoreFolderID, restoreFile, restoredFolderPermissions, startTime)
getRestoredDrive(
ctx,
ac,
driveID,
restoreFolderID,
restoreFile,
restoredFolderPermissions,
startTime)
checkRestoredDriveItemPermissions(
ctx,
@ -105,7 +107,7 @@ func checkDriveRestoration(
func PopulateDriveDetails(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
ac api.Client,
driveID, folderName, dataFolder string,
fileSizes map[string]int64,
folderPermissions map[string][]common.PermissionInfo,
@ -113,18 +115,12 @@ func PopulateDriveDetails(
) string {
var restoreFolderID string
response, err := client.
Drives().
ByDriveId(driveID).
Items().
ByDriveItemId("root").
Children().
Get(ctx, nil)
children, err := ac.Drives().GetFolderChildren(ctx, driveID, "root")
if err != nil {
common.Fatal(ctx, "getting drive by id", err)
}
for _, driveItem := range response.GetValue() {
for _, driveItem := range children {
var (
itemID = ptr.Val(driveItem.GetId())
itemName = ptr.Val(driveItem.GetName())
@ -156,8 +152,17 @@ func PopulateDriveDetails(
continue
}
folderPermissions[itemName] = permissionIn(ctx, client, driveID, itemID)
getOneDriveChildFolder(ctx, client, driveID, itemID, itemName, fileSizes, folderPermissions, startTime)
folderPermissions[itemName] = permissionIn(ctx, ac, driveID, itemID)
getOneDriveChildFolder(
ctx,
ac,
driveID,
itemID,
itemName,
fileSizes,
folderPermissions,
startTime)
}
return restoreFolderID
@ -228,18 +233,18 @@ func checkRestoredDriveItemPermissions(
func getOneDriveChildFolder(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
ac api.Client,
driveID, itemID, parentName string,
fileSizes map[string]int64,
folderPermission map[string][]common.PermissionInfo,
startTime time.Time,
) {
response, err := client.Drives().ByDriveId(driveID).Items().ByDriveItemId(itemID).Children().Get(ctx, nil)
children, err := ac.Drives().GetFolderChildren(ctx, driveID, itemID)
if err != nil {
common.Fatal(ctx, "getting child folder", err)
}
for _, driveItem := range response.GetValue() {
for _, driveItem := range children {
var (
itemID = ptr.Val(driveItem.GetId())
itemName = ptr.Val(driveItem.GetName())
@ -268,31 +273,33 @@ func getOneDriveChildFolder(
continue
}
folderPermission[fullName] = permissionIn(ctx, client, driveID, itemID)
getOneDriveChildFolder(ctx, client, driveID, itemID, fullName, fileSizes, folderPermission, startTime)
folderPermission[fullName] = permissionIn(ctx, ac, driveID, itemID)
getOneDriveChildFolder(
ctx,
ac,
driveID,
itemID,
fullName,
fileSizes,
folderPermission,
startTime)
}
}
func getRestoredDrive(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
ac api.Client,
driveID, restoreFolderID string,
restoreFile map[string]int64,
restoreFolder map[string][]common.PermissionInfo,
startTime time.Time,
) {
restored, err := client.
Drives().
ByDriveId(driveID).
Items().
ByDriveItemId(restoreFolderID).
Children().
Get(ctx, nil)
children, err := ac.Drives().GetFolderChildren(ctx, driveID, restoreFolderID)
if err != nil {
common.Fatal(ctx, "getting child folder", err)
}
for _, item := range restored.GetValue() {
for _, item := range children {
var (
itemID = ptr.Val(item.GetId())
itemName = ptr.Val(item.GetName())
@ -308,8 +315,16 @@ func getRestoredDrive(
continue
}
restoreFolder[itemName] = permissionIn(ctx, client, driveID, itemID)
getOneDriveChildFolder(ctx, client, driveID, itemID, itemName, restoreFile, restoreFolder, startTime)
restoreFolder[itemName] = permissionIn(ctx, ac, driveID, itemID)
getOneDriveChildFolder(
ctx,
ac,
driveID,
itemID,
itemName,
restoreFile,
restoreFolder,
startTime)
}
}
@ -319,18 +334,12 @@ func getRestoredDrive(
func permissionIn(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
ac api.Client,
driveID, itemID string,
) []common.PermissionInfo {
pi := []common.PermissionInfo{}
pcr, err := client.
Drives().
ByDriveId(driveID).
Items().
ByDriveItemId(itemID).
Permissions().
Get(ctx, nil)
pcr, err := ac.Drives().GetItemPermission(ctx, driveID, itemID)
if err != nil {
common.Fatal(ctx, "getting permission", err)
}

View File

@ -2,38 +2,31 @@ package restore
import (
"context"
"time"
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
"github.com/alcionai/corso/src/cmd/sanity_test/common"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/services/m365/api"
)
func CheckSharePointRestoration(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
siteID, userID, folderName, dataFolder string,
startTime time.Time,
ac api.Client,
envs common.Envs,
) {
drive, err := client.
Sites().
BySiteId(siteID).
Drive().
Get(ctx, nil)
drive, err := ac.Sites().GetDefaultDrive(ctx, envs.SiteID)
if err != nil {
common.Fatal(ctx, "getting the drive:", err)
}
checkDriveRestoration(
ctx,
client,
ac,
path.SharePointService,
folderName,
envs.FolderName,
ptr.Val(drive.GetId()),
ptr.Val(drive.GetName()),
dataFolder,
startTime,
envs.DataFolder,
envs.StartTime,
true)
}

View File

@ -2,21 +2,40 @@ package main
import (
"context"
"fmt"
"os"
"strings"
"time"
"github.com/alcionai/clues"
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
"github.com/spf13/cobra"
"github.com/alcionai/corso/src/cli/print"
"github.com/alcionai/corso/src/cmd/sanity_test/common"
"github.com/alcionai/corso/src/cmd/sanity_test/export"
"github.com/alcionai/corso/src/cmd/sanity_test/restore"
"github.com/alcionai/corso/src/internal/m365/graph"
"github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/logger"
)
// ---------------------------------------------------------------------------
// root command
// ---------------------------------------------------------------------------
func rootCMD() *cobra.Command {
return &cobra.Command{
Use: "sanity-test",
Short: "run the sanity tests",
DisableAutoGenTag: true,
RunE: sanityTestRoot,
PersistentPreRun: func(cmd *cobra.Command, args []string) {
fmt.Println("running", cmd.UseLine())
},
}
}
func sanityTestRoot(cmd *cobra.Command, args []string) error {
return print.Only(cmd.Context(), clues.New("must specify a kind of test"))
}
func main() {
ls := logger.Settings{
File: logger.GetLogFile(""),
@ -29,60 +48,226 @@ func main() {
_ = log.Sync() // flush all logs in the buffer
}()
// TODO: only needed for exchange
graph.InitializeConcurrencyLimiter(ctx, true, 4)
adapter, err := graph.CreateAdapter(
tconfig.GetM365TenantID(ctx),
os.Getenv("AZURE_CLIENT_ID"),
os.Getenv("AZURE_CLIENT_SECRET"))
if err != nil {
common.Fatal(ctx, "creating adapter", err)
}
root := rootCMD()
var (
client = msgraphsdk.NewGraphServiceClient(adapter)
testUser = tconfig.GetM365UserID(ctx)
testSite = tconfig.GetM365SiteID(ctx)
testKind = os.Getenv("SANITY_TEST_KIND") // restore or export (cli arg?)
testService = os.Getenv("SANITY_TEST_SERVICE")
folder = strings.TrimSpace(os.Getenv("SANITY_TEST_FOLDER"))
dataFolder = os.Getenv("TEST_DATA")
baseBackupFolder = os.Getenv("BASE_BACKUP")
)
restCMD := restoreCMD()
ctx = clues.Add(
ctx,
"resource_owner", testUser,
"service", testService,
"sanity_restore_folder", folder)
restCMD.AddCommand(restoreExchangeCMD())
restCMD.AddCommand(restoreOneDriveCMD())
restCMD.AddCommand(restoreSharePointCMD())
restCMD.AddCommand(restoreGroupsCMD())
root.AddCommand(restCMD)
logger.Ctx(ctx).Info("starting sanity test check")
expCMD := exportCMD()
switch testKind {
case "restore":
startTime, _ := common.MustGetTimeFromName(ctx, folder)
clues.Add(ctx, "sanity_restore_start_time", startTime.Format(time.RFC3339))
expCMD.AddCommand(exportOneDriveCMD())
expCMD.AddCommand(exportSharePointCMD())
expCMD.AddCommand(exportGroupsCMD())
root.AddCommand(expCMD)
switch testService {
case "exchange":
restore.CheckEmailRestoration(ctx, client, testUser, folder, dataFolder, baseBackupFolder, startTime)
case "onedrive":
restore.CheckOneDriveRestoration(ctx, client, testUser, folder, dataFolder, startTime)
case "sharepoint":
restore.CheckSharePointRestoration(ctx, client, testSite, testUser, folder, dataFolder, startTime)
default:
common.Fatal(ctx, "unknown service for restore sanity tests", nil)
}
case "export":
switch testService {
case "onedrive":
export.CheckOneDriveExport(ctx, client, testUser, folder, dataFolder)
case "sharepoint":
export.CheckSharePointExport(ctx, client, testSite, folder, dataFolder)
default:
common.Fatal(ctx, "unknown service for export sanity tests", nil)
}
default:
common.Fatal(ctx, "unknown test kind (expected restore or export)", nil)
if err := root.Execute(); err != nil {
os.Exit(1)
}
}
// ---------------------------------------------------------------------------
// restore/export command
// ---------------------------------------------------------------------------
func exportCMD() *cobra.Command {
return &cobra.Command{
Use: "restore",
Short: "run the post-export sanity tests",
DisableAutoGenTag: true,
RunE: sanityTestExport,
}
}
func sanityTestExport(cmd *cobra.Command, args []string) error {
return print.Only(cmd.Context(), clues.New("must specify a service"))
}
func restoreCMD() *cobra.Command {
return &cobra.Command{
Use: "restore",
Short: "run the post-restore sanity tests",
DisableAutoGenTag: true,
RunE: sanityTestRestore,
}
}
func sanityTestRestore(cmd *cobra.Command, args []string) error {
return print.Only(cmd.Context(), clues.New("must specify a service"))
}
// ---------------------------------------------------------------------------
// service commands - export
// ---------------------------------------------------------------------------
func exportGroupsCMD() *cobra.Command {
return &cobra.Command{
Use: "groups",
Short: "run the groups export sanity tests",
DisableAutoGenTag: true,
RunE: sanityTestExportGroups,
}
}
func sanityTestExportGroups(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
envs := common.EnvVars(ctx)
ac, err := common.GetAC()
if err != nil {
return print.Only(ctx, err)
}
export.CheckGroupsExport(ctx, ac, envs)
return nil
}
func exportOneDriveCMD() *cobra.Command {
return &cobra.Command{
Use: "onedrive",
Short: "run the onedrive export sanity tests",
DisableAutoGenTag: true,
RunE: sanityTestExportOneDrive,
}
}
func sanityTestExportOneDrive(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
envs := common.EnvVars(ctx)
ac, err := common.GetAC()
if err != nil {
return print.Only(ctx, err)
}
export.CheckOneDriveExport(ctx, ac, envs)
return nil
}
func exportSharePointCMD() *cobra.Command {
return &cobra.Command{
Use: "sharepoint",
Short: "run the sharepoint export sanity tests",
DisableAutoGenTag: true,
RunE: sanityTestExportSharePoint,
}
}
func sanityTestExportSharePoint(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
envs := common.EnvVars(ctx)
ac, err := common.GetAC()
if err != nil {
return print.Only(ctx, err)
}
export.CheckSharePointExport(ctx, ac, envs)
return nil
}
// ---------------------------------------------------------------------------
// service commands - restore
// ---------------------------------------------------------------------------
func restoreExchangeCMD() *cobra.Command {
return &cobra.Command{
Use: "exchange",
Short: "run the exchange restore sanity tests",
DisableAutoGenTag: true,
RunE: sanityTestRestoreExchange,
}
}
func sanityTestRestoreExchange(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
envs := common.EnvVars(ctx)
ac, err := common.GetAC()
if err != nil {
return print.Only(ctx, err)
}
restore.CheckEmailRestoration(ctx, ac, envs)
return nil
}
func restoreOneDriveCMD() *cobra.Command {
return &cobra.Command{
Use: "onedrive",
Short: "run the onedrive restore sanity tests",
DisableAutoGenTag: true,
RunE: sanityTestRestoreOneDrive,
}
}
func sanityTestRestoreOneDrive(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
envs := common.EnvVars(ctx)
ac, err := common.GetAC()
if err != nil {
return print.Only(ctx, err)
}
restore.CheckOneDriveRestoration(ctx, ac, envs)
return nil
}
func restoreSharePointCMD() *cobra.Command {
return &cobra.Command{
Use: "sharepoint",
Short: "run the sharepoint restore sanity tests",
DisableAutoGenTag: true,
RunE: sanityTestRestoreSharePoint,
}
}
func sanityTestRestoreSharePoint(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
envs := common.EnvVars(ctx)
ac, err := common.GetAC()
if err != nil {
return print.Only(ctx, err)
}
restore.CheckSharePointRestoration(ctx, ac, envs)
return nil
}
func restoreGroupsCMD() *cobra.Command {
return &cobra.Command{
Use: "groups",
Short: "run the groups restore sanity tests",
DisableAutoGenTag: true,
RunE: sanityTestRestoreGroups,
}
}
func sanityTestRestoreGroups(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
envs := common.EnvVars(ctx)
ac, err := common.GetAC()
if err != nil {
return print.Only(ctx, err)
}
restore.CheckGroupsRestoration(ctx, ac, envs)
return nil
}

View File

@ -8,7 +8,7 @@ require (
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1
github.com/alcionai/clues v0.0.0-20230920212840-728ac1a1d8b8
github.com/armon/go-metrics v0.4.1
github.com/aws/aws-xray-sdk-go v1.8.1
github.com/aws/aws-xray-sdk-go v1.8.2
github.com/cenkalti/backoff/v4 v4.2.1
github.com/google/uuid v1.3.1
github.com/h2non/gock v1.2.0

View File

@ -71,8 +71,8 @@ github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJ
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/aws/aws-sdk-go v1.45.0 h1:qoVOQHuLacxJMO71T49KeE70zm+Tk3vtrl7XO4VUPZc=
github.com/aws/aws-sdk-go v1.45.0/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo=
github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A=
github.com/aws/aws-xray-sdk-go v1.8.2 h1:PVxNWnQG+rAYjxsmhEN97DTO57Dipg6VS0wsu6bXUB0=
github.com/aws/aws-xray-sdk-go v1.8.2/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=

View File

@ -0,0 +1,187 @@
package readers
import (
"bytes"
"encoding/binary"
"io"
"os"
"unsafe"
"github.com/alcionai/clues"
)
// persistedSerializationVersion is the size of the serialization version in
// storage.
//
// The current on-disk format of this field is written in big endian. The
// highest bit denotes if the item is empty because it was deleted between the
// time we told the storage about it and when we needed to get data for it. The
// lowest two bytes are the version number. All other bits are reserved for
// future use.
//
// MSB 31 30 16 8 0 LSB
// +----------+----+---------+--------+-------+
// | del flag | reserved | version number |
// +----------+----+---------+--------+-------+
type persistedSerializationVersion = uint32
// SerializationVersion is the in-memory size of the version number that gets
// added to the persisted serialization version.
//
// Right now it's only a uint16 but we can expand it to be larger so long as the
// expanded size doesn't clash with the flags in the high-order bits.
type SerializationVersion uint16
// DefaultSerializationVersion is the current (default) version number for all
// services. As services evolve their storage format they should begin tracking
// their own version numbers separate from other services.
const DefaultSerializationVersion SerializationVersion = 1
const (
VersionFormatSize = int(unsafe.Sizeof(persistedSerializationVersion(0)))
delInFlightMask persistedSerializationVersion = 1 << ((VersionFormatSize * 8) - 1)
)
// SerializationFormat is a struct describing serialization format versions and
// flags to add for this item.
type SerializationFormat struct {
Version SerializationVersion
DelInFlight bool
}
// NewVersionedBackupReader creates a reader that injects format into the first
// bytes of the returned data. After format has been returned, data is returned
// from baseReaders in the order they're passed in.
func NewVersionedBackupReader(
format SerializationFormat,
baseReaders ...io.ReadCloser,
) (io.ReadCloser, error) {
if format.DelInFlight && len(baseReaders) > 0 {
// This is a conservative check, but we can always loosen it later on if
// needed. At the moment we really don't expect any data if the item was
// deleted.
return nil, clues.New("item marked deleted but has reader(s)")
}
formattedVersion := persistedSerializationVersion(format.Version)
if format.DelInFlight {
formattedVersion |= delInFlightMask
}
formattedBuf := make([]byte, VersionFormatSize)
binary.BigEndian.PutUint32(formattedBuf, formattedVersion)
versionReader := io.NopCloser(bytes.NewReader(formattedBuf))
// Need to add readers individually because types differ.
allReaders := make([]io.Reader, 0, len(baseReaders)+1)
allReaders = append(allReaders, versionReader)
for _, r := range baseReaders {
allReaders = append(allReaders, r)
}
res := &versionedBackupReader{
baseReaders: append([]io.ReadCloser{versionReader}, baseReaders...),
combined: io.MultiReader(allReaders...),
}
return res, nil
}
type versionedBackupReader struct {
// baseReaders is a reference to the original readers so we can close them.
baseReaders []io.ReadCloser
// combined is the reader that will return all data.
combined io.Reader
}
func (vbr *versionedBackupReader) Read(p []byte) (int, error) {
if vbr.combined == nil {
return 0, os.ErrClosed
}
n, err := vbr.combined.Read(p)
if err == io.EOF {
// Golang doesn't allow wrapping of EOF. If we wrap it other things start
// thinking it's an actual error.
return n, err
}
return n, clues.Stack(err).OrNil()
}
func (vbr *versionedBackupReader) Close() error {
if vbr.combined == nil {
return nil
}
vbr.combined = nil
var errs *clues.Err
for i, r := range vbr.baseReaders {
if err := r.Close(); err != nil {
errs = clues.Stack(
errs,
clues.Wrap(err, "closing reader").With("reader_index", i))
}
}
vbr.baseReaders = nil
return errs.OrNil()
}
// NewVersionedRestoreReader wraps baseReader and provides easy access to the
// SerializationFormat info in the first bytes of the data contained in
// baseReader.
func NewVersionedRestoreReader(
baseReader io.ReadCloser,
) (*VersionedRestoreReader, error) {
versionBuf := make([]byte, VersionFormatSize)
// Loop to account for the unlikely case where we get a short read.
for read := 0; read < VersionFormatSize; {
n, err := baseReader.Read(versionBuf[read:])
if err != nil {
return nil, clues.Wrap(err, "reading serialization version")
}
read += n
}
formattedVersion := binary.BigEndian.Uint32(versionBuf)
return &VersionedRestoreReader{
baseReader: baseReader,
format: SerializationFormat{
Version: SerializationVersion(formattedVersion),
DelInFlight: (formattedVersion & delInFlightMask) != 0,
},
}, nil
}
type VersionedRestoreReader struct {
baseReader io.ReadCloser
format SerializationFormat
}
func (vrr *VersionedRestoreReader) Read(p []byte) (int, error) {
n, err := vrr.baseReader.Read(p)
if err == io.EOF {
// Golang doesn't allow wrapping of EOF. If we wrap it other things start
// thinking it's an actual error.
return n, err
}
return n, clues.Stack(err).OrNil()
}
func (vrr *VersionedRestoreReader) Close() error {
return clues.Stack(vrr.baseReader.Close()).OrNil()
}
func (vrr VersionedRestoreReader) Format() SerializationFormat {
return vrr.format
}

View File

@ -0,0 +1,362 @@
package readers_test
import (
"bytes"
"io"
"testing"
"github.com/alcionai/clues"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"golang.org/x/exp/slices"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/tester"
)
type shortReader struct {
maxReadLen int
io.ReadCloser
}
func (s *shortReader) Read(p []byte) (int, error) {
toRead := s.maxReadLen
if len(p) < toRead {
toRead = len(p)
}
return s.ReadCloser.Read(p[:toRead])
}
type SerializationReaderUnitSuite struct {
tester.Suite
}
func TestSerializationReaderUnitSuite(t *testing.T) {
suite.Run(t, &SerializationReaderUnitSuite{Suite: tester.NewUnitSuite(t)})
}
func (suite *SerializationReaderUnitSuite) TestBackupSerializationReader() {
baseData := []byte("hello world")
table := []struct {
name string
format readers.SerializationFormat
inputReaders []io.ReadCloser
expectErr require.ErrorAssertionFunc
expectData []byte
}{
{
name: "DeletedInFlight NoVersion NoReaders",
format: readers.SerializationFormat{
DelInFlight: true,
},
expectErr: require.NoError,
expectData: []byte{0x80, 0x0, 0x0, 0x0},
},
{
name: "DeletedInFlight NoReaders",
format: readers.SerializationFormat{
Version: 42,
DelInFlight: true,
},
expectErr: require.NoError,
expectData: []byte{0x80, 0x0, 0x0, 42},
},
{
name: "NoVersion NoReaders",
expectErr: require.NoError,
expectData: []byte{0x00, 0x0, 0x0, 0x0},
},
{
name: "NoReaders",
format: readers.SerializationFormat{
Version: 42,
},
expectErr: require.NoError,
expectData: []byte{0x00, 0x0, 0x0, 42},
},
{
name: "SingleReader",
format: readers.SerializationFormat{
Version: 42,
},
inputReaders: []io.ReadCloser{io.NopCloser(bytes.NewReader(baseData))},
expectErr: require.NoError,
expectData: append([]byte{0x00, 0x0, 0x0, 42}, baseData...),
},
{
name: "MultipleReaders",
format: readers.SerializationFormat{
Version: 42,
},
inputReaders: []io.ReadCloser{
io.NopCloser(bytes.NewReader(baseData)),
io.NopCloser(bytes.NewReader(baseData)),
},
expectErr: require.NoError,
expectData: append(
append([]byte{0x00, 0x0, 0x0, 42}, baseData...),
baseData...),
},
// Uncomment if we expand the version to 32 bits.
//{
// name: "VersionWithHighBitSet NoReaders Errors",
// format: readers.SerializationFormat{
// Version: 0x80000000,
// },
// expectErr: require.Error,
//},
{
name: "DeletedInFlight SingleReader Errors",
format: readers.SerializationFormat{
DelInFlight: true,
},
inputReaders: []io.ReadCloser{io.NopCloser(bytes.NewReader(baseData))},
expectErr: require.Error,
},
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
r, err := readers.NewVersionedBackupReader(
test.format,
test.inputReaders...)
test.expectErr(t, err, "getting backup reader: %v", clues.ToCore(err))
if err != nil {
return
}
defer func() {
err := r.Close()
assert.NoError(t, err, "closing reader: %v", clues.ToCore(err))
}()
buf, err := io.ReadAll(r)
require.NoError(
t,
err,
"reading serialized data: %v",
clues.ToCore(err))
// Need to use equal because output is order-sensitive.
assert.Equal(t, test.expectData, buf, "serialized data")
})
}
}
func (suite *SerializationReaderUnitSuite) TestBackupSerializationReader_ShortReads() {
t := suite.T()
baseData := []byte("hello world")
expectData := append(
append([]byte{0x00, 0x0, 0x0, 42}, baseData...),
baseData...)
r, err := readers.NewVersionedBackupReader(
readers.SerializationFormat{Version: 42},
io.NopCloser(bytes.NewReader(baseData)),
io.NopCloser(bytes.NewReader(baseData)))
require.NoError(t, err, "getting backup reader: %v", clues.ToCore(err))
defer func() {
err := r.Close()
assert.NoError(t, err, "closing reader: %v", clues.ToCore(err))
}()
buf := make([]byte, len(expectData))
r = &shortReader{
maxReadLen: 3,
ReadCloser: r,
}
for read := 0; ; {
n, err := r.Read(buf[read:])
read += n
if read >= len(buf) {
break
}
require.NoError(t, err, "reading data: %v", clues.ToCore(err))
}
// Need to use equal because output is order-sensitive.
assert.Equal(t, expectData, buf, "serialized data")
}
// TestRestoreSerializationReader checks that we can read previously serialized
// data. For simplicity, it uses the versionedBackupReader to generate the
// input. This should be relatively safe because the tests for
// versionedBackupReader do compare directly against serialized data.
func (suite *SerializationReaderUnitSuite) TestRestoreSerializationReader() {
baseData := []byte("hello world")
table := []struct {
name string
inputReader func(*testing.T) io.ReadCloser
expectErr require.ErrorAssertionFunc
expectVersion readers.SerializationVersion
expectDelInFlight bool
expectData []byte
}{
{
name: "NoVersion NoReaders",
inputReader: func(t *testing.T) io.ReadCloser {
r, err := readers.NewVersionedBackupReader(readers.SerializationFormat{})
require.NoError(t, err, "making reader: %v", clues.ToCore(err))
return r
},
expectErr: require.NoError,
expectData: []byte{},
},
{
name: "DeletedInFlight NoReaders",
inputReader: func(t *testing.T) io.ReadCloser {
r, err := readers.NewVersionedBackupReader(
readers.SerializationFormat{
Version: 42,
DelInFlight: true,
})
require.NoError(t, err, "making reader: %v", clues.ToCore(err))
return r
},
expectErr: require.NoError,
expectVersion: 42,
expectDelInFlight: true,
expectData: []byte{},
},
{
name: "DeletedInFlight SingleReader",
inputReader: func(t *testing.T) io.ReadCloser {
// Need to specify the bytes manually because the backup reader won't
// allow creating something with the deleted flag and data.
return io.NopCloser(bytes.NewReader(append(
[]byte{0x80, 0x0, 0x0, 42},
baseData...)))
},
expectErr: require.NoError,
expectVersion: 42,
expectDelInFlight: true,
expectData: baseData,
},
{
name: "NoVersion SingleReader",
inputReader: func(t *testing.T) io.ReadCloser {
r, err := readers.NewVersionedBackupReader(
readers.SerializationFormat{},
io.NopCloser(bytes.NewReader(baseData)))
require.NoError(t, err, "making reader: %v", clues.ToCore(err))
return r
},
expectErr: require.NoError,
expectData: baseData,
},
{
name: "SingleReader",
inputReader: func(t *testing.T) io.ReadCloser {
r, err := readers.NewVersionedBackupReader(
readers.SerializationFormat{Version: 42},
io.NopCloser(bytes.NewReader(baseData)))
require.NoError(t, err, "making reader: %v", clues.ToCore(err))
return r
},
expectErr: require.NoError,
expectVersion: 42,
expectData: baseData,
},
{
name: "ShortReads SingleReader",
inputReader: func(t *testing.T) io.ReadCloser {
r, err := readers.NewVersionedBackupReader(
readers.SerializationFormat{Version: 42},
io.NopCloser(bytes.NewReader(baseData)))
require.NoError(t, err, "making reader: %v", clues.ToCore(err))
r = &shortReader{
maxReadLen: 3,
ReadCloser: r,
}
return r
},
expectErr: require.NoError,
expectVersion: 42,
expectData: baseData,
},
{
name: "MultipleReaders",
inputReader: func(t *testing.T) io.ReadCloser {
r, err := readers.NewVersionedBackupReader(
readers.SerializationFormat{Version: 42},
io.NopCloser(bytes.NewReader(baseData)),
io.NopCloser(bytes.NewReader(baseData)))
require.NoError(t, err, "making reader: %v", clues.ToCore(err))
return r
},
expectErr: require.NoError,
expectVersion: 42,
expectData: append(slices.Clone(baseData), baseData...),
},
{
name: "EmptyReader Errors",
inputReader: func(t *testing.T) io.ReadCloser {
return io.NopCloser(bytes.NewReader([]byte{}))
},
expectErr: require.Error,
},
{
name: "TruncatedVersion Errors",
inputReader: func(t *testing.T) io.ReadCloser {
return io.NopCloser(bytes.NewReader([]byte{0x80, 0x0}))
},
expectErr: require.Error,
},
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
r, err := readers.NewVersionedRestoreReader(test.inputReader(t))
test.expectErr(t, err, "getting restore reader: %v", clues.ToCore(err))
if err != nil {
return
}
defer func() {
err := r.Close()
assert.NoError(t, err, "closing reader: %v", clues.ToCore(err))
}()
assert.Equal(
t,
test.expectVersion,
r.Format().Version,
"version")
assert.Equal(
t,
test.expectDelInFlight,
r.Format().DelInFlight,
"deleted in flight")
buf, err := io.ReadAll(r)
require.NoError(t, err, "reading serialized data: %v", clues.ToCore(err))
// Need to use equal because output is order-sensitive.
assert.Equal(t, test.expectData, buf, "serialized data")
})
}
}

View File

@ -1,30 +1,38 @@
package data
import (
"bytes"
"context"
"io"
"sync"
"time"
"github.com/alcionai/clues"
"github.com/spatialcurrent/go-lazy/pkg/lazy"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/logger"
)
var (
_ Item = &unindexedPrefetchedItem{}
_ ItemModTime = &unindexedPrefetchedItem{}
_ Item = &prefetchedItem{}
_ ItemInfo = &prefetchedItem{}
_ ItemModTime = &prefetchedItem{}
_ Item = &unindexedLazyItem{}
_ ItemModTime = &unindexedLazyItem{}
_ Item = &lazyItem{}
_ ItemInfo = &lazyItem{}
_ ItemModTime = &lazyItem{}
)
func NewDeletedItem(itemID string) Item {
return &prefetchedItem{
return &unindexedPrefetchedItem{
id: itemID,
deleted: true,
// TODO(ashmrtn): This really doesn't need to be set since deleted items are
@ -34,24 +42,33 @@ func NewDeletedItem(itemID string) Item {
}
}
func NewPrefetchedItem(
func NewUnindexedPrefetchedItem(
reader io.ReadCloser,
itemID string,
info details.ItemInfo,
) Item {
return &prefetchedItem{
id: itemID,
reader: reader,
info: info,
modTime: info.Modified(),
modTime time.Time,
) (*unindexedPrefetchedItem, error) {
r, err := readers.NewVersionedBackupReader(
readers.SerializationFormat{Version: readers.DefaultSerializationVersion},
reader)
if err != nil {
return nil, clues.Stack(err)
}
return &unindexedPrefetchedItem{
id: itemID,
reader: r,
modTime: modTime,
}, nil
}
// prefetchedItem represents a single item retrieved from the remote service.
type prefetchedItem struct {
// unindexedPrefetchedItem represents a single item retrieved from the remote
// service.
//
// This item doesn't implement ItemInfo so it's safe to use for items like
// metadata that shouldn't appear in backup details.
type unindexedPrefetchedItem struct {
id string
reader io.ReadCloser
info details.ItemInfo
// modTime is the modified time of the item. It should match the modTime in
// info if info is present. Here as a separate field so that deleted items
// don't error out by trying to source it from info.
@ -62,26 +79,51 @@ type prefetchedItem struct {
deleted bool
}
func (i prefetchedItem) ID() string {
func (i unindexedPrefetchedItem) ID() string {
return i.id
}
func (i *prefetchedItem) ToReader() io.ReadCloser {
func (i *unindexedPrefetchedItem) ToReader() io.ReadCloser {
return i.reader
}
func (i prefetchedItem) Deleted() bool {
func (i unindexedPrefetchedItem) Deleted() bool {
return i.deleted
}
func (i unindexedPrefetchedItem) ModTime() time.Time {
return i.modTime
}
func NewPrefetchedItem(
reader io.ReadCloser,
itemID string,
info details.ItemInfo,
) (*prefetchedItem, error) {
inner, err := NewUnindexedPrefetchedItem(reader, itemID, info.Modified())
if err != nil {
return nil, clues.Stack(err)
}
return &prefetchedItem{
unindexedPrefetchedItem: inner,
info: info,
}, nil
}
// prefetchedItem represents a single item retrieved from the remote service.
//
// This item implements ItemInfo so it should be used for things that need to
// appear in backup details.
type prefetchedItem struct {
*unindexedPrefetchedItem
info details.ItemInfo
}
func (i prefetchedItem) Info() (details.ItemInfo, error) {
return i.info, nil
}
func (i prefetchedItem) ModTime() time.Time {
return i.modTime
}
type ItemDataGetter interface {
GetData(
context.Context,
@ -89,14 +131,14 @@ type ItemDataGetter interface {
) (io.ReadCloser, *details.ItemInfo, bool, error)
}
func NewLazyItem(
func NewUnindexedLazyItem(
ctx context.Context,
itemGetter ItemDataGetter,
itemID string,
modTime time.Time,
errs *fault.Bus,
) Item {
return &lazyItem{
) *unindexedLazyItem {
return &unindexedLazyItem{
ctx: ctx,
id: itemID,
itemGetter: itemGetter,
@ -105,11 +147,15 @@ func NewLazyItem(
}
}
// lazyItem represents a single item retrieved from the remote service. It
// lazily fetches the item's data when the first call to ToReader().Read() is
// unindexedLazyItem represents a single item retrieved from the remote service.
// It lazily fetches the item's data when the first call to ToReader().Read() is
// made.
type lazyItem struct {
//
// This item doesn't implement ItemInfo so it's safe to use for items like
// metadata that shouldn't appear in backup details.
type unindexedLazyItem struct {
ctx context.Context
mu sync.Mutex
id string
errs *fault.Bus
itemGetter ItemDataGetter
@ -127,17 +173,27 @@ type lazyItem struct {
delInFlight bool
}
func (i lazyItem) ID() string {
func (i *unindexedLazyItem) ID() string {
return i.id
}
func (i *lazyItem) ToReader() io.ReadCloser {
func (i *unindexedLazyItem) ToReader() io.ReadCloser {
return lazy.NewLazyReadCloser(func() (io.ReadCloser, error) {
// Don't allow getting Item info while trying to initialize said info.
// GetData could be a long running call, but in theory nothing should happen
// with the item until a reader is returned anyway.
i.mu.Lock()
defer i.mu.Unlock()
reader, info, delInFlight, err := i.itemGetter.GetData(i.ctx, i.errs)
if err != nil {
return nil, clues.Stack(err)
}
format := readers.SerializationFormat{
Version: readers.DefaultSerializationVersion,
}
// If an item was deleted then return an empty file so we don't fail the
// backup and return a sentinel error when asked for ItemInfo so we don't
// display the item in the backup.
@ -149,21 +205,59 @@ func (i *lazyItem) ToReader() io.ReadCloser {
logger.Ctx(i.ctx).Info("item not found")
i.delInFlight = true
format.DelInFlight = true
r, err := readers.NewVersionedBackupReader(format)
return io.NopCloser(bytes.NewReader([]byte{})), nil
return r, clues.Stack(err).OrNil()
}
i.info = info
return reader, nil
r, err := readers.NewVersionedBackupReader(format, reader)
return r, clues.Stack(err).OrNil()
})
}
func (i lazyItem) Deleted() bool {
func (i *unindexedLazyItem) Deleted() bool {
return false
}
func (i lazyItem) Info() (details.ItemInfo, error) {
func (i *unindexedLazyItem) ModTime() time.Time {
return i.modTime
}
func NewLazyItem(
ctx context.Context,
itemGetter ItemDataGetter,
itemID string,
modTime time.Time,
errs *fault.Bus,
) *lazyItem {
return &lazyItem{
unindexedLazyItem: NewUnindexedLazyItem(
ctx,
itemGetter,
itemID,
modTime,
errs),
}
}
// lazyItem represents a single item retrieved from the remote service. It
// lazily fetches the item's data when the first call to ToReader().Read() is
// made.
//
// This item implements ItemInfo so it should be used for things that need to
// appear in backup details.
type lazyItem struct {
*unindexedLazyItem
}
func (i *lazyItem) Info() (details.ItemInfo, error) {
i.mu.Lock()
defer i.mu.Unlock()
if i.delInFlight {
return details.ItemInfo{}, clues.Stack(ErrNotFound).WithClues(i.ctx)
} else if i.info == nil {
@ -173,7 +267,3 @@ func (i lazyItem) Info() (details.ItemInfo, error) {
return *i.info, nil
}
func (i lazyItem) ModTime() time.Time {
return i.modTime
}

View File

@ -12,6 +12,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/backup/details"
@ -49,6 +50,38 @@ func TestItemUnitSuite(t *testing.T) {
suite.Run(t, &ItemUnitSuite{Suite: tester.NewUnitSuite(t)})
}
func (suite *ItemUnitSuite) TestUnindexedPrefetchedItem() {
prefetch, err := data.NewUnindexedPrefetchedItem(
io.NopCloser(bytes.NewReader([]byte{})),
"foo",
time.Time{})
require.NoError(suite.T(), err, clues.ToCore(err))
var item data.Item = prefetch
_, ok := item.(data.ItemInfo)
assert.False(suite.T(), ok, "unindexedPrefetchedItem implements Info()")
}
func (suite *ItemUnitSuite) TestUnindexedLazyItem() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
lazy := data.NewUnindexedLazyItem(
ctx,
nil,
"foo",
time.Time{},
fault.New(true))
var item data.Item = lazy
_, ok := item.(data.ItemInfo)
assert.False(t, ok, "unindexedLazyItem implements Info()")
}
func (suite *ItemUnitSuite) TestDeletedItem() {
var (
t = suite.T()
@ -115,18 +148,29 @@ func (suite *ItemUnitSuite) TestPrefetchedItem() {
suite.Run(test.name, func() {
t := suite.T()
item := data.NewPrefetchedItem(test.reader, id, test.info)
item, err := data.NewPrefetchedItem(test.reader, id, test.info)
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, id, item.ID(), "ID")
assert.False(t, item.Deleted(), "deleted")
assert.Equal(
t,
test.info.Modified(),
item.(data.ItemModTime).ModTime(),
item.ModTime(),
"mod time")
readData, err := io.ReadAll(item.ToReader())
test.readErr(t, err, clues.ToCore(err), "read error")
r, err := readers.NewVersionedRestoreReader(item.ToReader())
require.NoError(t, err, "version error: %v", clues.ToCore(err))
if err != nil {
return
}
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
assert.False(t, r.Format().DelInFlight)
readData, err := io.ReadAll(r)
test.readErr(t, err, "read error: %v", clues.ToCore(err))
assert.Equal(t, test.expectData, readData, "read data")
})
}
@ -169,6 +213,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
table := []struct {
name string
mid *mockItemDataGetter
versionErr assert.ErrorAssertionFunc
readErr assert.ErrorAssertionFunc
infoErr assert.ErrorAssertionFunc
expectData []byte
@ -180,6 +225,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
reader: io.NopCloser(bytes.NewReader([]byte{})),
info: &details.ItemInfo{Exchange: &details.ExchangeInfo{Modified: now}},
},
versionErr: assert.NoError,
readErr: assert.NoError,
infoErr: assert.NoError,
expectData: []byte{},
@ -190,6 +236,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
reader: io.NopCloser(bytes.NewReader(baseData)),
info: &details.ItemInfo{Exchange: &details.ExchangeInfo{Modified: now}},
},
versionErr: assert.NoError,
readErr: assert.NoError,
infoErr: assert.NoError,
expectData: baseData,
@ -200,6 +247,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
reader: io.NopCloser(bytes.NewReader(baseData)),
info: &details.ItemInfo{OneDrive: &details.OneDriveInfo{Modified: now}},
},
versionErr: assert.NoError,
readErr: assert.NoError,
infoErr: assert.NoError,
expectData: baseData,
@ -209,6 +257,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
mid: &mockItemDataGetter{
err: assert.AnError,
},
versionErr: assert.Error,
readErr: assert.Error,
infoErr: assert.Error,
expectData: []byte{},
@ -224,6 +273,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
},
info: &details.ItemInfo{OneDrive: &details.OneDriveInfo{Modified: now}},
},
versionErr: assert.NoError,
readErr: assert.Error,
infoErr: assert.NoError,
expectData: baseData[:5],
@ -253,15 +303,25 @@ func (suite *ItemUnitSuite) TestLazyItem() {
assert.Equal(
t,
now,
item.(data.ItemModTime).ModTime(),
item.ModTime(),
"mod time")
// Read data to execute lazy reader.
readData, err := io.ReadAll(item.ToReader())
r, err := readers.NewVersionedRestoreReader(item.ToReader())
test.versionErr(t, err, "version error: %v", clues.ToCore(err))
if err != nil {
return
}
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
assert.False(t, r.Format().DelInFlight)
readData, err := io.ReadAll(r)
test.readErr(t, err, clues.ToCore(err), "read error")
assert.Equal(t, test.expectData, readData, "read data")
_, err = item.(data.ItemInfo).Info()
_, err = item.Info()
test.infoErr(t, err, "Info(): %v", clues.ToCore(err))
e := errs.Errors()
@ -301,15 +361,21 @@ func (suite *ItemUnitSuite) TestLazyItem_DeletedInFlight() {
assert.Equal(
t,
now,
item.(data.ItemModTime).ModTime(),
item.ModTime(),
"mod time")
// Read data to execute lazy reader.
readData, err := io.ReadAll(item.ToReader())
r, err := readers.NewVersionedRestoreReader(item.ToReader())
require.NoError(t, err, "version error: %v", clues.ToCore(err))
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
assert.True(t, r.Format().DelInFlight)
readData, err := io.ReadAll(r)
require.NoError(t, err, clues.ToCore(err), "read error")
assert.Empty(t, readData, "read data")
_, err = item.(data.ItemInfo).Info()
_, err = item.Info()
assert.ErrorIs(t, err, data.ErrNotFound, "Info() error")
e := errs.Errors()
@ -341,9 +407,9 @@ func (suite *ItemUnitSuite) TestLazyItem_InfoBeforeReadErrors() {
assert.Equal(
t,
now,
item.(data.ItemModTime).ModTime(),
item.ModTime(),
"mod time")
_, err := item.(data.ItemInfo).Info()
_, err := item.Info()
assert.Error(t, err, "Info() error")
}

View File

@ -3,8 +3,13 @@ package mock
import (
"context"
"io"
"testing"
"time"
"github.com/alcionai/clues"
"github.com/stretchr/testify/require"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/fault"
@ -163,3 +168,106 @@ func (rc RestoreCollection) FetchItemByName(
return res, nil
}
var (
_ data.BackupCollection = &versionedBackupCollection{}
_ data.RestoreCollection = &unversionedRestoreCollection{}
_ data.Item = &itemWrapper{}
)
type itemWrapper struct {
data.Item
reader io.ReadCloser
}
func (i *itemWrapper) ToReader() io.ReadCloser {
return i.reader
}
func NewUnversionedRestoreCollection(
t *testing.T,
col data.RestoreCollection,
) *unversionedRestoreCollection {
return &unversionedRestoreCollection{
RestoreCollection: col,
t: t,
}
}
// unversionedRestoreCollection strips out version format headers on all items.
//
// Wrap data.RestoreCollections in this type if you don't need access to the
// version format header during tests and you know the item readers can't return
// an error.
type unversionedRestoreCollection struct {
data.RestoreCollection
t *testing.T
}
func (c *unversionedRestoreCollection) Items(
ctx context.Context,
errs *fault.Bus,
) <-chan data.Item {
res := make(chan data.Item)
go func() {
defer close(res)
for item := range c.RestoreCollection.Items(ctx, errs) {
r, err := readers.NewVersionedRestoreReader(item.ToReader())
require.NoError(c.t, err, clues.ToCore(err))
res <- &itemWrapper{
Item: item,
reader: r,
}
}
}()
return res
}
func NewVersionedBackupCollection(
t *testing.T,
col data.BackupCollection,
) *versionedBackupCollection {
return &versionedBackupCollection{
BackupCollection: col,
t: t,
}
}
// versionedBackupCollection injects basic version information on all items.
//
// Wrap data.BackupCollections in this type if you don't need to explicitly set
// the version format header during tests, aren't trying to check reader errors
// cases, and aren't populating backup details.
type versionedBackupCollection struct {
data.BackupCollection
t *testing.T
}
func (c *versionedBackupCollection) Items(
ctx context.Context,
errs *fault.Bus,
) <-chan data.Item {
res := make(chan data.Item)
go func() {
defer close(res)
for item := range c.BackupCollection.Items(ctx, errs) {
r, err := readers.NewVersionedBackupReader(
readers.SerializationFormat{
Version: readers.DefaultSerializationVersion,
},
item.ToReader())
require.NoError(c.t, err, clues.ToCore(err))
res <- &itemWrapper{
Item: item,
reader: r,
}
}
}()
return res
}

View File

@ -205,7 +205,7 @@ func (w *conn) commonConnect(
bst,
password,
kopiaOpts); err != nil {
return clues.Wrap(err, "connecting to repo").WithClues(ctx)
return clues.Wrap(err, "connecting to kopia repo").WithClues(ctx)
}
if err := w.open(ctx, cfgFile, password); err != nil {
@ -580,6 +580,10 @@ func (w *conn) SnapshotRoot(man *snapshot.Manifest) (fs.Entry, error) {
}
func (w *conn) UpdatePassword(ctx context.Context, password string, opts repository.Options) error {
if len(password) <= 0 {
return clues.New("empty password provided")
}
kopiaRef := NewConn(w.storage)
if err := kopiaRef.Connect(ctx, opts); err != nil {
return clues.Wrap(err, "connecting kopia client")
@ -587,8 +591,10 @@ func (w *conn) UpdatePassword(ctx context.Context, password string, opts reposit
defer kopiaRef.Close(ctx)
repository := kopiaRef.Repository.(repo.DirectRepository)
err := repository.FormatManager().ChangePassword(ctx, password)
kopiaRepo := kopiaRef.Repository.(repo.DirectRepository)
if err := kopiaRepo.FormatManager().ChangePassword(ctx, password); err != nil {
return clues.Wrap(err, "unable to update password")
}
return errors.Wrap(err, "unable to update password")
return nil
}

View File

@ -22,6 +22,20 @@ import (
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
)
func openLocalKopiaRepo(
t tester.TestT,
ctx context.Context, //revive:disable-line:context-as-argument
) (*conn, error) {
st := storeTD.NewFilesystemStorage(t)
k := NewConn(st)
if err := k.Initialize(ctx, repository.Options{}, repository.Retention{}); err != nil {
return nil, err
}
return k, nil
}
func openKopiaRepo(
t tester.TestT,
ctx context.Context, //revive:disable-line:context-as-argument
@ -81,7 +95,7 @@ func (suite *WrapperIntegrationSuite) TestRepoExistsError() {
ctx, flush := tester.NewContext(t)
defer flush()
st := storeTD.NewPrefixedS3Storage(t)
st := storeTD.NewFilesystemStorage(t)
k := NewConn(st)
err := k.Initialize(ctx, repository.Options{}, repository.Retention{})
@ -101,7 +115,7 @@ func (suite *WrapperIntegrationSuite) TestBadProviderErrors() {
ctx, flush := tester.NewContext(t)
defer flush()
st := storeTD.NewPrefixedS3Storage(t)
st := storeTD.NewFilesystemStorage(t)
st.Provider = storage.ProviderUnknown
k := NewConn(st)
@ -115,7 +129,7 @@ func (suite *WrapperIntegrationSuite) TestConnectWithoutInitErrors() {
ctx, flush := tester.NewContext(t)
defer flush()
st := storeTD.NewPrefixedS3Storage(t)
st := storeTD.NewFilesystemStorage(t)
k := NewConn(st)
err := k.Connect(ctx, repository.Options{})
@ -408,7 +422,7 @@ func (suite *WrapperIntegrationSuite) TestSetUserAndHost() {
Host: "bar",
}
st := storeTD.NewPrefixedS3Storage(t)
st := storeTD.NewFilesystemStorage(t)
k := NewConn(st)
err := k.Initialize(ctx, opts, repository.Retention{})

View File

@ -7,6 +7,7 @@ import (
"github.com/alcionai/clues"
"github.com/kopia/kopia/fs"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/logger"
@ -16,6 +17,7 @@ import (
var (
_ data.RestoreCollection = &kopiaDataCollection{}
_ data.Item = &kopiaDataStream{}
_ data.ItemSize = &kopiaDataStream{}
)
type kopiaDataCollection struct {
@ -23,7 +25,7 @@ type kopiaDataCollection struct {
dir fs.Directory
items []string
counter ByteCounter
expectedVersion uint32
expectedVersion readers.SerializationVersion
}
func (kdc *kopiaDataCollection) Items(
@ -102,7 +104,7 @@ func (kdc kopiaDataCollection) FetchItemByName(
return nil, clues.New("object is not a file").WithClues(ctx)
}
size := f.Size() - int64(versionSize)
size := f.Size() - int64(readers.VersionFormatSize)
if size < 0 {
logger.Ctx(ctx).Infow("negative file size; resetting to 0", "file_size", size)
@ -118,13 +120,32 @@ func (kdc kopiaDataCollection) FetchItemByName(
return nil, clues.Wrap(err, "opening file").WithClues(ctx)
}
// TODO(ashmrtn): Remove this when individual services implement checks for
// version and deleted items.
rr, err := readers.NewVersionedRestoreReader(r)
if err != nil {
return nil, clues.Stack(err).WithClues(ctx)
}
if rr.Format().Version != kdc.expectedVersion {
return nil, clues.New("unexpected data format").
WithClues(ctx).
With(
"read_version", rr.Format().Version,
"expected_version", kdc.expectedVersion)
}
// This is a conservative check, but we shouldn't be seeing items that were
// deleted in flight during restores because there's no way to select them.
if rr.Format().DelInFlight {
return nil, clues.New("selected item marked as deleted in flight").
WithClues(ctx)
}
return &kopiaDataStream{
id: name,
reader: &restoreStreamReader{
ReadCloser: r,
expectedVersion: kdc.expectedVersion,
},
size: size,
id: name,
reader: rr,
size: size,
}, nil
}

View File

@ -13,6 +13,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/data"
dataMock "github.com/alcionai/corso/src/internal/data/mock"
"github.com/alcionai/corso/src/internal/tester"
@ -121,25 +122,35 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() {
)
// Needs to be a function so the readers get refreshed each time.
getLayout := func() fs.Directory {
getLayout := func(t *testing.T) fs.Directory {
format := readers.SerializationFormat{
Version: readers.DefaultSerializationVersion,
}
r1, err := readers.NewVersionedBackupReader(
format,
io.NopCloser(bytes.NewReader(files[0].data)))
require.NoError(t, err, clues.ToCore(err))
r2, err := readers.NewVersionedBackupReader(
format,
io.NopCloser(bytes.NewReader(files[1].data)))
require.NoError(t, err, clues.ToCore(err))
return virtualfs.NewStaticDirectory(encodeAsPath("foo"), []fs.Entry{
&mockFile{
StreamingFile: virtualfs.StreamingFileFromReader(
encodeAsPath(files[0].uuid),
nil),
r: newBackupStreamReader(
serializationVersion,
io.NopCloser(bytes.NewReader(files[0].data))),
size: int64(len(files[0].data) + versionSize),
r: r1,
size: int64(len(files[0].data) + readers.VersionFormatSize),
},
&mockFile{
StreamingFile: virtualfs.StreamingFileFromReader(
encodeAsPath(files[1].uuid),
nil),
r: newBackupStreamReader(
serializationVersion,
io.NopCloser(bytes.NewReader(files[1].data))),
size: int64(len(files[1].data) + versionSize),
r: r2,
size: int64(len(files[1].data) + readers.VersionFormatSize),
},
&mockFile{
StreamingFile: virtualfs.StreamingFileFromReader(
@ -224,10 +235,10 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() {
}
c := kopiaDataCollection{
dir: getLayout(),
dir: getLayout(t),
path: nil,
items: items,
expectedVersion: serializationVersion,
expectedVersion: readers.DefaultSerializationVersion,
}
var (
@ -291,23 +302,34 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() {
// Needs to be a function so we can switch the serialization version as
// needed.
getLayout := func(serVersion uint32) fs.Directory {
getLayout := func(
t *testing.T,
serVersion readers.SerializationVersion,
) fs.Directory {
format := readers.SerializationFormat{Version: serVersion}
r1, err := readers.NewVersionedBackupReader(
format,
io.NopCloser(bytes.NewReader([]byte(noErrFileData))))
require.NoError(t, err, clues.ToCore(err))
r2, err := readers.NewVersionedBackupReader(
format,
errReader.ToReader())
require.NoError(t, err, clues.ToCore(err))
return virtualfs.NewStaticDirectory(encodeAsPath(folder2), []fs.Entry{
&mockFile{
StreamingFile: virtualfs.StreamingFileFromReader(
encodeAsPath(noErrFileName),
nil),
r: newBackupStreamReader(
serVersion,
io.NopCloser(bytes.NewReader([]byte(noErrFileData)))),
r: r1,
},
&mockFile{
StreamingFile: virtualfs.StreamingFileFromReader(
encodeAsPath(errFileName),
nil),
r: newBackupStreamReader(
serVersion,
errReader.ToReader()),
r: r2,
},
&mockFile{
StreamingFile: virtualfs.StreamingFileFromReader(
@ -330,7 +352,7 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() {
table := []struct {
name string
inputName string
inputSerializationVersion uint32
inputSerializationVersion readers.SerializationVersion
expectedData []byte
lookupErr assert.ErrorAssertionFunc
readErr assert.ErrorAssertionFunc
@ -339,7 +361,7 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() {
{
name: "FileFound_NoError",
inputName: noErrFileName,
inputSerializationVersion: serializationVersion,
inputSerializationVersion: readers.DefaultSerializationVersion,
expectedData: []byte(noErrFileData),
lookupErr: assert.NoError,
readErr: assert.NoError,
@ -347,21 +369,20 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() {
{
name: "FileFound_ReadError",
inputName: errFileName,
inputSerializationVersion: serializationVersion,
inputSerializationVersion: readers.DefaultSerializationVersion,
lookupErr: assert.NoError,
readErr: assert.Error,
},
{
name: "FileFound_VersionError",
inputName: noErrFileName,
inputSerializationVersion: serializationVersion + 1,
lookupErr: assert.NoError,
readErr: assert.Error,
inputSerializationVersion: readers.DefaultSerializationVersion + 1,
lookupErr: assert.Error,
},
{
name: "FileNotFound",
inputName: "foo",
inputSerializationVersion: serializationVersion + 1,
inputSerializationVersion: readers.DefaultSerializationVersion + 1,
lookupErr: assert.Error,
notFoundErr: true,
},
@ -373,14 +394,14 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() {
ctx, flush := tester.NewContext(t)
defer flush()
root := getLayout(test.inputSerializationVersion)
root := getLayout(t, test.inputSerializationVersion)
c := &i64counter{}
col := &kopiaDataCollection{
path: pth,
dir: root,
counter: c,
expectedVersion: serializationVersion,
expectedVersion: readers.DefaultSerializationVersion,
}
s, err := col.FetchItemByName(ctx, test.inputName)

View File

@ -16,12 +16,11 @@ func filesystemStorage(
repoOpts repository.Options,
s storage.Storage,
) (blob.Storage, error) {
cfg, err := s.StorageConfig()
fsCfg, err := s.ToFilesystemConfig()
if err != nil {
return nil, clues.Stack(err).WithClues(ctx)
}
fsCfg := cfg.(*storage.FilesystemConfig)
opts := filesystem.Options{
Path: fsCfg.Path,
}

View File

@ -12,6 +12,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/m365/service/exchange/mock"
"github.com/alcionai/corso/src/internal/tester"
@ -150,20 +151,27 @@ func (suite *MergeCollectionUnitSuite) TestFetchItemByName() {
require.NoError(suite.T(), err, clues.ToCore(err))
// Needs to be a function so the readers get refreshed each time.
layouts := []func() fs.Directory{
layouts := []func(t *testing.T) fs.Directory{
// Has the following;
// - file1: data[0]
// - errOpen: (error opening file)
func() fs.Directory {
func(t *testing.T) fs.Directory {
format := readers.SerializationFormat{
Version: readers.DefaultSerializationVersion,
}
r1, err := readers.NewVersionedBackupReader(
format,
io.NopCloser(bytes.NewReader(fileData1)))
require.NoError(t, err, clues.ToCore(err))
return virtualfs.NewStaticDirectory(encodeAsPath(colPaths[0]), []fs.Entry{
&mockFile{
StreamingFile: virtualfs.StreamingFileFromReader(
encodeAsPath(fileName1),
nil),
r: newBackupStreamReader(
serializationVersion,
io.NopCloser(bytes.NewReader(fileData1))),
size: int64(len(fileData1) + versionSize),
r: r1,
size: int64(len(fileData1) + readers.VersionFormatSize),
},
&mockFile{
StreamingFile: virtualfs.StreamingFileFromReader(
@ -178,34 +186,47 @@ func (suite *MergeCollectionUnitSuite) TestFetchItemByName() {
// - file1: data[1]
// - file2: data[0]
// - errOpen: data[2]
func() fs.Directory {
func(t *testing.T) fs.Directory {
format := readers.SerializationFormat{
Version: readers.DefaultSerializationVersion,
}
r1, err := readers.NewVersionedBackupReader(
format,
io.NopCloser(bytes.NewReader(fileData2)))
require.NoError(t, err, clues.ToCore(err))
r2, err := readers.NewVersionedBackupReader(
format,
io.NopCloser(bytes.NewReader(fileData1)))
require.NoError(t, err, clues.ToCore(err))
r3, err := readers.NewVersionedBackupReader(
format,
io.NopCloser(bytes.NewReader(fileData3)))
require.NoError(t, err, clues.ToCore(err))
return virtualfs.NewStaticDirectory(encodeAsPath(colPaths[1]), []fs.Entry{
&mockFile{
StreamingFile: virtualfs.StreamingFileFromReader(
encodeAsPath(fileName1),
nil),
r: newBackupStreamReader(
serializationVersion,
io.NopCloser(bytes.NewReader(fileData2))),
size: int64(len(fileData2) + versionSize),
r: r1,
size: int64(len(fileData2) + readers.VersionFormatSize),
},
&mockFile{
StreamingFile: virtualfs.StreamingFileFromReader(
encodeAsPath(fileName2),
nil),
r: newBackupStreamReader(
serializationVersion,
io.NopCloser(bytes.NewReader(fileData1))),
size: int64(len(fileData1) + versionSize),
r: r2,
size: int64(len(fileData1) + readers.VersionFormatSize),
},
&mockFile{
StreamingFile: virtualfs.StreamingFileFromReader(
encodeAsPath(fileOpenErrName),
nil),
r: newBackupStreamReader(
serializationVersion,
io.NopCloser(bytes.NewReader(fileData3))),
size: int64(len(fileData3) + versionSize),
r: r3,
size: int64(len(fileData3) + readers.VersionFormatSize),
},
})
},
@ -257,9 +278,9 @@ func (suite *MergeCollectionUnitSuite) TestFetchItemByName() {
for i, layout := range layouts {
col := &kopiaDataCollection{
path: pth,
dir: layout(),
dir: layout(t),
counter: c,
expectedVersion: serializationVersion,
expectedVersion: readers.DefaultSerializationVersion,
}
err := dc.addCollection(colPaths[i], col)

View File

@ -29,7 +29,7 @@ type fooModel struct {
//revive:disable-next-line:context-as-argument
func getModelStore(t *testing.T, ctx context.Context) *ModelStore {
c, err := openKopiaRepo(t, ctx)
c, err := openLocalKopiaRepo(t, ctx)
require.NoError(t, err, clues.ToCore(err))
return &ModelStore{c: c, modelVersion: globalModelVersion}
@ -856,7 +856,7 @@ func openConnAndModelStore(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
) (*conn, *ModelStore) {
st := storeTD.NewPrefixedS3Storage(t)
st := storeTD.NewFilesystemStorage(t)
c := NewConn(st)
err := c.Initialize(ctx, repository.Options{}, repository.Retention{})

View File

@ -20,13 +20,11 @@ func s3BlobStorage(
repoOpts repository.Options,
s storage.Storage,
) (blob.Storage, error) {
sc, err := s.StorageConfig()
cfg, err := s.ToS3Config()
if err != nil {
return nil, clues.Stack(err).WithClues(ctx)
}
cfg := sc.(*storage.S3Config)
endpoint := defaultS3Endpoint
if len(cfg.Endpoint) > 0 {
endpoint = cfg.Endpoint

View File

@ -1,19 +1,14 @@
package kopia
import (
"bytes"
"context"
"encoding/base64"
"encoding/binary"
"errors"
"io"
"os"
"runtime/trace"
"strings"
"sync"
"sync/atomic"
"time"
"unsafe"
"github.com/alcionai/clues"
"github.com/kopia/kopia/fs"
@ -37,101 +32,6 @@ import (
const maxInflateTraversalDepth = 500
var versionSize = int(unsafe.Sizeof(serializationVersion))
func newBackupStreamReader(version uint32, reader io.ReadCloser) *backupStreamReader {
buf := make([]byte, versionSize)
binary.BigEndian.PutUint32(buf, version)
bufReader := io.NopCloser(bytes.NewReader(buf))
return &backupStreamReader{
readers: []io.ReadCloser{bufReader, reader},
combined: io.NopCloser(io.MultiReader(bufReader, reader)),
}
}
// backupStreamReader is a wrapper around the io.Reader that other Corso
// components return when backing up information. It injects a version number at
// the start of the data stream. Future versions of Corso may not need this if
// they use more complex serialization logic as serialization/version injection
// will be handled by other components.
type backupStreamReader struct {
readers []io.ReadCloser
combined io.ReadCloser
}
func (rw *backupStreamReader) Read(p []byte) (n int, err error) {
if rw.combined == nil {
return 0, os.ErrClosed
}
return rw.combined.Read(p)
}
func (rw *backupStreamReader) Close() error {
if rw.combined == nil {
return nil
}
rw.combined = nil
var errs *clues.Err
for _, r := range rw.readers {
err := r.Close()
if err != nil {
errs = clues.Stack(clues.Wrap(err, "closing reader"), errs)
}
}
return errs.OrNil()
}
// restoreStreamReader is a wrapper around the io.Reader that kopia returns when
// reading data from an item. It examines and strips off the version number of
// the restored data. Future versions of Corso may not need this if they use
// more complex serialization logic as version checking/deserialization will be
// handled by other components. A reader that returns a version error is no
// longer valid and should not be used once the version error is returned.
type restoreStreamReader struct {
io.ReadCloser
expectedVersion uint32
readVersion bool
}
func (rw *restoreStreamReader) checkVersion() error {
versionBuf := make([]byte, versionSize)
for newlyRead := 0; newlyRead < versionSize; {
n, err := rw.ReadCloser.Read(versionBuf[newlyRead:])
if err != nil {
return clues.Wrap(err, "reading data format version")
}
newlyRead += n
}
version := binary.BigEndian.Uint32(versionBuf)
if version != rw.expectedVersion {
return clues.New("unexpected data format").With("read_version", version)
}
return nil
}
func (rw *restoreStreamReader) Read(p []byte) (n int, err error) {
if !rw.readVersion {
rw.readVersion = true
if err := rw.checkVersion(); err != nil {
return 0, err
}
}
return rw.ReadCloser.Read(p)
}
type itemDetails struct {
infoer data.ItemInfo
repoPath path.Path
@ -436,7 +336,7 @@ func collectionEntries(
entry := virtualfs.StreamingFileWithModTimeFromReader(
encodedName,
modTime,
newBackupStreamReader(serializationVersion, e.ToReader()))
e.ToReader())
err = ctr(ctx, entry)
if err != nil {

View File

@ -14,7 +14,6 @@ import (
"github.com/kopia/kopia/repo/manifest"
"github.com/kopia/kopia/snapshot"
"github.com/kopia/kopia/snapshot/snapshotfs"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
@ -124,12 +123,6 @@ func expectFileData(
return
}
// Need to wrap with a restore stream reader to remove the version.
r = &restoreStreamReader{
ReadCloser: io.NopCloser(r),
expectedVersion: serializationVersion,
}
got, err := io.ReadAll(r)
if !assert.NoError(t, err, "reading data in file", name, clues.ToCore(err)) {
return
@ -226,135 +219,6 @@ func getDirEntriesForEntry(
// ---------------
// unit tests
// ---------------
type limitedRangeReader struct {
readLen int
io.ReadCloser
}
func (lrr *limitedRangeReader) Read(p []byte) (int, error) {
if len(p) == 0 {
// Not well specified behavior, defer to underlying reader.
return lrr.ReadCloser.Read(p)
}
toRead := lrr.readLen
if len(p) < toRead {
toRead = len(p)
}
return lrr.ReadCloser.Read(p[:toRead])
}
type VersionReadersUnitSuite struct {
tester.Suite
}
func TestVersionReadersUnitSuite(t *testing.T) {
suite.Run(t, &VersionReadersUnitSuite{Suite: tester.NewUnitSuite(t)})
}
func (suite *VersionReadersUnitSuite) TestWriteAndRead() {
inputData := []byte("This is some data for the reader to test with")
table := []struct {
name string
readVersion uint32
writeVersion uint32
check assert.ErrorAssertionFunc
}{
{
name: "SameVersionSucceeds",
readVersion: 42,
writeVersion: 42,
check: assert.NoError,
},
{
name: "DifferentVersionsFail",
readVersion: 7,
writeVersion: 42,
check: assert.Error,
},
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
baseReader := bytes.NewReader(inputData)
reversible := &restoreStreamReader{
expectedVersion: test.readVersion,
ReadCloser: newBackupStreamReader(
test.writeVersion,
io.NopCloser(baseReader)),
}
defer reversible.Close()
allData, err := io.ReadAll(reversible)
test.check(t, err, clues.ToCore(err))
if err != nil {
return
}
assert.Equal(t, inputData, allData)
})
}
}
func readAllInParts(
t *testing.T,
partLen int,
reader io.ReadCloser,
) ([]byte, int) {
res := []byte{}
read := 0
tmp := make([]byte, partLen)
for {
n, err := reader.Read(tmp)
if errors.Is(err, io.EOF) {
break
}
require.NoError(t, err, clues.ToCore(err))
read += n
res = append(res, tmp[:n]...)
}
return res, read
}
func (suite *VersionReadersUnitSuite) TestWriteHandlesShortReads() {
t := suite.T()
inputData := []byte("This is some data for the reader to test with")
version := uint32(42)
baseReader := bytes.NewReader(inputData)
versioner := newBackupStreamReader(version, io.NopCloser(baseReader))
expectedToWrite := len(inputData) + int(versionSize)
// "Write" all the data.
versionedData, writtenLen := readAllInParts(t, 1, versioner)
assert.Equal(t, expectedToWrite, writtenLen)
// Read all of the data back.
baseReader = bytes.NewReader(versionedData)
reader := &restoreStreamReader{
expectedVersion: version,
// Be adversarial and only allow reads of length 1 from the byte reader.
ReadCloser: &limitedRangeReader{
readLen: 1,
ReadCloser: io.NopCloser(baseReader),
},
}
readData, readLen := readAllInParts(t, 1, reader)
// This reports the bytes read and returned to the user, excluding the version
// that is stripped off at the start.
assert.Equal(t, len(inputData), readLen)
assert.Equal(t, inputData, readData)
}
type CorsoProgressUnitSuite struct {
tester.Suite
targetFilePath path.Path
@ -2420,9 +2284,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsCorrectSubt
encodeElements(inboxFileName1)[0],
time.Time{},
// Wrap with a backup reader so it gets the version injected.
newBackupStreamReader(
serializationVersion,
io.NopCloser(bytes.NewReader(inboxFileData1v2)))),
io.NopCloser(bytes.NewReader(inboxFileData1v2))),
}),
}),
virtualfs.NewStaticDirectory(
@ -2582,9 +2444,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsMigrateSubt
virtualfs.StreamingFileWithModTimeFromReader(
encodeElements(inboxFileName1)[0],
time.Time{},
newBackupStreamReader(
serializationVersion,
io.NopCloser(bytes.NewReader(inboxFileData1)))),
io.NopCloser(bytes.NewReader(inboxFileData1))),
}),
}),
virtualfs.NewStaticDirectory(
@ -2596,9 +2456,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsMigrateSubt
virtualfs.StreamingFileWithModTimeFromReader(
encodeElements(contactsFileName1)[0],
time.Time{},
newBackupStreamReader(
serializationVersion,
io.NopCloser(bytes.NewReader(contactsFileData1)))),
io.NopCloser(bytes.NewReader(contactsFileData1))),
}),
}),
})
@ -2817,15 +2675,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_SelectiveSubtreeP
virtualfs.StreamingFileWithModTimeFromReader(
encodeElements(fileName5)[0],
time.Time{},
newBackupStreamReader(
serializationVersion,
io.NopCloser(bytes.NewReader(fileData5)))),
io.NopCloser(bytes.NewReader(fileData5))),
virtualfs.StreamingFileWithModTimeFromReader(
encodeElements(fileName6)[0],
time.Time{},
newBackupStreamReader(
serializationVersion,
io.NopCloser(bytes.NewReader(fileData6)))),
io.NopCloser(bytes.NewReader(fileData6))),
})
counters[folderID3] = count
@ -2835,15 +2689,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_SelectiveSubtreeP
virtualfs.StreamingFileWithModTimeFromReader(
encodeElements(fileName3)[0],
time.Time{},
newBackupStreamReader(
serializationVersion,
io.NopCloser(bytes.NewReader(fileData3)))),
io.NopCloser(bytes.NewReader(fileData3))),
virtualfs.StreamingFileWithModTimeFromReader(
encodeElements(fileName4)[0],
time.Time{},
newBackupStreamReader(
serializationVersion,
io.NopCloser(bytes.NewReader(fileData4)))),
io.NopCloser(bytes.NewReader(fileData4))),
folder,
})
counters[folderID2] = count
@ -2859,15 +2709,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_SelectiveSubtreeP
virtualfs.StreamingFileWithModTimeFromReader(
encodeElements(fileName1)[0],
time.Time{},
newBackupStreamReader(
serializationVersion,
io.NopCloser(bytes.NewReader(fileData1)))),
io.NopCloser(bytes.NewReader(fileData1))),
virtualfs.StreamingFileWithModTimeFromReader(
encodeElements(fileName2)[0],
time.Time{},
newBackupStreamReader(
serializationVersion,
io.NopCloser(bytes.NewReader(fileData2)))),
io.NopCloser(bytes.NewReader(fileData2))),
folder,
folder4,
})
@ -2879,15 +2725,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_SelectiveSubtreeP
virtualfs.StreamingFileWithModTimeFromReader(
encodeElements(fileName7)[0],
time.Time{},
newBackupStreamReader(
serializationVersion,
io.NopCloser(bytes.NewReader(fileData7)))),
io.NopCloser(bytes.NewReader(fileData7))),
virtualfs.StreamingFileWithModTimeFromReader(
encodeElements(fileName8)[0],
time.Time{},
newBackupStreamReader(
serializationVersion,
io.NopCloser(bytes.NewReader(fileData8)))),
io.NopCloser(bytes.NewReader(fileData8))),
})
counters[folderID5] = count

View File

@ -18,6 +18,7 @@ import (
"golang.org/x/exp/maps"
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/diagnostics"
"github.com/alcionai/corso/src/internal/observe"
@ -36,8 +37,6 @@ const (
// possibly corresponding to who is making the backup.
corsoHost = "corso-host"
corsoUser = "corso"
serializationVersion uint32 = 1
)
// common manifest tags
@ -447,7 +446,7 @@ func loadDirsAndItems(
dir: dir,
items: dirItems.items,
counter: bcounter,
expectedVersion: serializationVersion,
expectedVersion: readers.DefaultSerializationVersion,
}
if err := mergeCol.addCollection(dirItems.dir.String(), dc); err != nil {

View File

@ -184,7 +184,7 @@ func (suite *BasicKopiaIntegrationSuite) TestMaintenance_FirstRun_NoChanges() {
ctx, flush := tester.NewContext(t)
defer flush()
k, err := openKopiaRepo(t, ctx)
k, err := openLocalKopiaRepo(t, ctx)
require.NoError(t, err, clues.ToCore(err))
w := &Wrapper{k}
@ -204,7 +204,7 @@ func (suite *BasicKopiaIntegrationSuite) TestMaintenance_WrongUser_NoForce_Fails
ctx, flush := tester.NewContext(t)
defer flush()
k, err := openKopiaRepo(t, ctx)
k, err := openLocalKopiaRepo(t, ctx)
require.NoError(t, err, clues.ToCore(err))
w := &Wrapper{k}
@ -241,7 +241,7 @@ func (suite *BasicKopiaIntegrationSuite) TestMaintenance_WrongUser_Force_Succeed
ctx, flush := tester.NewContext(t)
defer flush()
k, err := openKopiaRepo(t, ctx)
k, err := openLocalKopiaRepo(t, ctx)
require.NoError(t, err, clues.ToCore(err))
w := &Wrapper{k}
@ -754,7 +754,7 @@ func (suite *KopiaIntegrationSuite) SetupTest() {
t := suite.T()
suite.ctx, suite.flush = tester.NewContext(t)
c, err := openKopiaRepo(t, suite.ctx)
c, err := openLocalKopiaRepo(t, suite.ctx)
require.NoError(t, err, clues.ToCore(err))
suite.w = &Wrapper{c}
@ -1245,7 +1245,7 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
ctx, flush := tester.NewContext(t)
defer flush()
k, err := openKopiaRepo(t, ctx)
k, err := openLocalKopiaRepo(t, ctx)
require.NoError(t, err, clues.ToCore(err))
err = k.Compression(ctx, "s2-default")
@ -1268,7 +1268,10 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
ctx,
[]identity.Reasoner{r},
nil,
[]data.BackupCollection{dc1, dc2},
[]data.BackupCollection{
dataMock.NewVersionedBackupCollection(t, dc1),
dataMock.NewVersionedBackupCollection(t, dc2),
},
nil,
nil,
true,
@ -1556,7 +1559,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
//nolint:forbidigo
suite.ctx, _ = logger.CtxOrSeed(context.Background(), ls)
c, err := openKopiaRepo(t, suite.ctx)
c, err := openLocalKopiaRepo(t, suite.ctx)
require.NoError(t, err, clues.ToCore(err))
suite.w = &Wrapper{c}
@ -1577,12 +1580,15 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
})
}
collections = append(collections, collection)
collections = append(
collections,
dataMock.NewVersionedBackupCollection(t, collection))
}
r := NewReason(testTenant, testUser, path.ExchangeService, path.EmailCategory)
stats, deets, _, err := suite.w.ConsumeBackupCollections(
// Other tests check basic things about deets so not doing that again here.
stats, _, _, err := suite.w.ConsumeBackupCollections(
suite.ctx,
[]identity.Reasoner{r},
nil,
@ -1597,8 +1603,6 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
require.Equal(t, stats.TotalDirectoryCount, expectedDirs)
require.Equal(t, stats.IgnoredErrorCount, 0)
require.False(t, stats.Incomplete)
// 6 file and 2 folder entries.
assert.Len(t, deets.Details().Entries, expectedFiles+2)
suite.snapshotID = manifest.ID(stats.SnapshotID)
}
@ -1629,7 +1633,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
excludePrefix bool
expectedCachedItems int
expectedUncachedItems int
cols func() []data.BackupCollection
cols func(t *testing.T) []data.BackupCollection
backupIDCheck require.ValueAssertionFunc
restoreCheck assert.ErrorAssertionFunc
}{
@ -1638,7 +1642,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
excludeItem: true,
expectedCachedItems: len(suite.filesByPath) - 1,
expectedUncachedItems: 0,
cols: func() []data.BackupCollection {
cols: func(t *testing.T) []data.BackupCollection {
return nil
},
backupIDCheck: require.NotEmpty,
@ -1650,7 +1654,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
excludePrefix: true,
expectedCachedItems: len(suite.filesByPath) - 1,
expectedUncachedItems: 0,
cols: func() []data.BackupCollection {
cols: func(t *testing.T) []data.BackupCollection {
return nil
},
backupIDCheck: require.NotEmpty,
@ -1661,7 +1665,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
// No snapshot should be made since there were no changes.
expectedCachedItems: 0,
expectedUncachedItems: 0,
cols: func() []data.BackupCollection {
cols: func(t *testing.T) []data.BackupCollection {
return nil
},
// Backup doesn't run.
@ -1671,7 +1675,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
name: "NoExcludeItemWithChanges",
expectedCachedItems: len(suite.filesByPath),
expectedUncachedItems: 1,
cols: func() []data.BackupCollection {
cols: func(t *testing.T) []data.BackupCollection {
c := exchMock.NewCollection(
suite.testPath1,
suite.testPath1,
@ -1679,7 +1683,9 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
c.ColState = data.NotMovedState
c.PrevPath = suite.testPath1
return []data.BackupCollection{c}
return []data.BackupCollection{
dataMock.NewVersionedBackupCollection(t, c),
}
},
backupIDCheck: require.NotEmpty,
restoreCheck: assert.NoError,
@ -1717,7 +1723,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
Manifest: man,
Reasons: []identity.Reasoner{r},
}),
test.cols(),
test.cols(t),
excluded,
nil,
true,

View File

@ -100,7 +100,7 @@ func (ctrl *Controller) ProduceBackupCollections(
}
case path.GroupsService:
colls, ssmb, canUsePreviousBackup, err = groups.ProduceBackupCollections(
colls, ssmb, err = groups.ProduceBackupCollections(
ctx,
bpc,
ctrl.AC,
@ -111,6 +111,10 @@ func (ctrl *Controller) ProduceBackupCollections(
return nil, nil, false, err
}
// canUsePreviousBacukp can be always returned true for groups as we
// return a tombstone collection in case the metadata read fails
canUsePreviousBackup = true
default:
return nil, nil, false, clues.Wrap(clues.New(service.String()), "service not supported").WithClues(ctx)
}

View File

@ -11,6 +11,9 @@ import (
"github.com/stretchr/testify/suite"
inMock "github.com/alcionai/corso/src/internal/common/idname/mock"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/data/mock"
"github.com/alcionai/corso/src/internal/m365/service/exchange"
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
"github.com/alcionai/corso/src/internal/m365/service/sharepoint"
@ -458,9 +461,8 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Lists() {
for item := range collection.Items(ctx, fault.New(true)) {
t.Log("File: " + item.ID())
bs, err := io.ReadAll(item.ToReader())
_, err := io.ReadAll(item.ToReader())
require.NoError(t, err, clues.ToCore(err))
t.Log(string(bs))
}
}
}
@ -575,3 +577,123 @@ func (suite *GroupsCollectionIntgSuite) TestCreateGroupsCollection_SharePoint()
assert.NotZero(t, status.Successes)
t.Log(status.String())
}
func (suite *GroupsCollectionIntgSuite) TestCreateGroupsCollection_SharePoint_InvalidMetadata() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
var (
groupID = tconfig.M365GroupID(t)
ctrl = newController(ctx, t, path.GroupsService)
groupIDs = []string{groupID}
)
id, name, err := ctrl.PopulateProtectedResourceIDAndName(ctx, groupID, nil)
require.NoError(t, err, clues.ToCore(err))
sel := selectors.NewGroupsBackup(groupIDs)
sel.Include(sel.LibraryFolders([]string{"test"}, selectors.PrefixMatch()))
sel.SetDiscreteOwnerIDName(id, name)
site, err := suite.connector.AC.Groups().GetRootSite(ctx, groupID)
require.NoError(t, err, clues.ToCore(err))
pth, err := path.Build(
suite.tenantID,
groupID,
path.GroupsService,
path.LibrariesCategory,
true,
odConsts.SitesPathDir,
ptr.Val(site.GetId()))
require.NoError(t, err, clues.ToCore(err))
mmc := []data.RestoreCollection{
mock.Collection{
Path: pth,
ItemData: []data.Item{
&mock.Item{
ItemID: "previouspath",
Reader: io.NopCloser(bytes.NewReader([]byte("invalid"))),
},
},
},
}
bpc := inject.BackupProducerConfig{
LastBackupVersion: version.NoBackup,
Options: control.DefaultOptions(),
ProtectedResource: inMock.NewProvider(id, name),
Selector: sel.Selector,
MetadataCollections: mmc,
}
collections, excludes, canUsePreviousBackup, err := ctrl.ProduceBackupCollections(
ctx,
bpc,
fault.New(true))
require.NoError(t, err, clues.ToCore(err))
assert.True(t, canUsePreviousBackup, "can use previous backup")
// No excludes yet as this isn't an incremental backup.
assert.True(t, excludes.Empty())
// we don't know an exact count of drives this will produce,
// but it should be more than one.
assert.Greater(t, len(collections), 1)
p, err := path.BuildMetadata(
suite.tenantID,
groupID,
path.GroupsService,
path.LibrariesCategory,
false)
require.NoError(t, err, clues.ToCore(err))
p, err = p.Append(false, odConsts.SitesPathDir)
require.NoError(t, err, clues.ToCore(err))
foundSitesMetadata := false
foundRootTombstone := false
sp, err := path.BuildPrefix(
suite.tenantID,
groupID,
path.GroupsService,
path.LibrariesCategory)
require.NoError(t, err, clues.ToCore(err))
sp, err = sp.Append(false, odConsts.SitesPathDir, ptr.Val(site.GetId()))
require.NoError(t, err, clues.ToCore(err))
for _, coll := range collections {
if coll.State() == data.DeletedState {
if coll.PreviousPath() != nil && coll.PreviousPath().String() == sp.String() {
foundRootTombstone = true
}
continue
}
sitesMetadataCollection := coll.FullPath().String() == p.String()
for object := range coll.Items(ctx, fault.New(true)) {
if object.ID() == "previouspath" && sitesMetadataCollection {
foundSitesMetadata = true
}
buf := &bytes.Buffer{}
_, err := buf.ReadFrom(object.ToReader())
assert.NoError(t, err, "reading item", clues.ToCore(err))
}
}
assert.True(t, foundSitesMetadata, "missing sites metadata")
assert.True(t, foundRootTombstone, "missing root tombstone")
status := ctrl.Wait()
assert.NotZero(t, status.Successes)
t.Log(status.String())
}

View File

@ -33,11 +33,7 @@ const (
MaxOneNoteFileSize = 2 * 1024 * 1024 * 1024
)
var (
_ data.BackupCollection = &Collection{}
_ data.Item = &metadata.Item{}
_ data.ItemModTime = &metadata.Item{}
)
var _ data.BackupCollection = &Collection{}
// Collection represents a set of OneDrive objects retrieved from M365
type Collection struct {
@ -588,14 +584,25 @@ func (oc *Collection) streamDriveItem(
return progReader, nil
})
oc.data <- &metadata.Item{
ItemID: metaFileName + metaSuffix,
Data: metaReader,
storeItem, err := data.NewUnindexedPrefetchedItem(
metaReader,
metaFileName+metaSuffix,
// Metadata file should always use the latest time as
// permissions change does not update mod time.
Mod: time.Now(),
time.Now())
if err != nil {
errs.AddRecoverable(ctx, clues.Stack(err).
WithClues(ctx).
Label(fault.LabelForceNoBackupCreation))
return
}
// We wrap the reader with a lazy reader so that the progress bar is only
// initialized if the file is read. Since we're not actually lazily reading
// data just use the eager item implementation.
oc.data <- storeItem
// Item read successfully, add to collection
if isFile {
atomic.AddInt64(&stats.itemsRead, 1)

View File

@ -19,6 +19,7 @@ import (
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
metaTD "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata/testdata"
@ -256,7 +257,7 @@ func (suite *CollectionUnitSuite) TestCollection() {
mt := readItem.(data.ItemModTime)
assert.Equal(t, now, mt.ModTime())
readData, err := io.ReadAll(readItem.ToReader())
rr, err := readers.NewVersionedRestoreReader(readItem.ToReader())
test.expectErr(t, err)
if err != nil {
@ -267,13 +268,25 @@ func (suite *CollectionUnitSuite) TestCollection() {
return
}
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
assert.False(t, rr.Format().DelInFlight)
readData, err := io.ReadAll(rr)
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, stubItemContent, readData)
readItemMeta := readItems[1]
assert.Equal(t, stubItemID+metadata.MetaFileSuffix, readItemMeta.ID())
rr, err = readers.NewVersionedRestoreReader(readItemMeta.ToReader())
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
assert.False(t, rr.Format().DelInFlight)
readMeta := metadata.Metadata{}
err = json.NewDecoder(readItemMeta.ToReader()).Decode(&readMeta)
err = json.NewDecoder(rr).Decode(&readMeta)
require.NoError(t, err, clues.ToCore(err))
metaTD.AssertMetadataEqual(t, stubMeta, readMeta)
@ -485,12 +498,18 @@ func (suite *CollectionUnitSuite) TestCollectionPermissionBackupLatestModTime()
for _, i := range readItems {
if strings.HasSuffix(i.ID(), metadata.MetaFileSuffix) {
content, err := io.ReadAll(i.ToReader())
rr, err := readers.NewVersionedRestoreReader(i.ToReader())
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
assert.False(t, rr.Format().DelInFlight)
content, err := io.ReadAll(rr)
require.NoError(t, err, clues.ToCore(err))
require.Equal(t, `{"filename":"Fake Item","permissionMode":1}`, string(content))
im, ok := i.(data.ItemModTime)
require.Equal(t, ok, true, "modtime interface")
require.True(t, ok, "modtime interface")
require.Greater(t, im.ModTime(), mtime, "permissions time greater than mod time")
}
}

View File

@ -135,11 +135,6 @@ func deserializeMetadata(
continue
}
if err == nil {
// Successful decode.
continue
}
// This is conservative, but report an error if either any of the items
// for any of the deserialized maps have duplicate drive IDs or there's
// some other problem deserializing things. This will cause the entire
@ -147,7 +142,9 @@ func deserializeMetadata(
// these cases. We can make the logic for deciding when to continue vs.
// when to fail less strict in the future if needed.
if err != nil {
return nil, nil, false, clues.Stack(err).WithClues(ictx)
errs.Fail(clues.Stack(err).WithClues(ictx))
return map[string]string{}, map[string]map[string]string{}, false, nil
}
}
}

View File

@ -17,6 +17,7 @@ import (
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
pmMock "github.com/alcionai/corso/src/internal/common/prefixmatcher/mock"
"github.com/alcionai/corso/src/internal/data"
dataMock "github.com/alcionai/corso/src/internal/data/mock"
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
"github.com/alcionai/corso/src/internal/m365/graph"
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
@ -984,7 +985,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
{
// Bad formats are logged but skip adding entries to the maps and don't
// return an error.
name: "BadFormat",
name: "BadFormat",
expectedDeltas: map[string]string{},
expectedPaths: map[string]map[string]string{},
cols: []func() []graph.MetadataCollectionEntry{
func() []graph.MetadataCollectionEntry {
return []graph.MetadataCollectionEntry{
@ -995,7 +998,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
},
},
canUsePreviousBackup: false,
errCheck: assert.Error,
errCheck: assert.NoError,
},
{
// Unexpected files are logged and skipped. They don't cause an error to
@ -1060,10 +1063,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
}
},
},
expectedDeltas: nil,
expectedPaths: nil,
expectedDeltas: map[string]string{},
expectedPaths: map[string]map[string]string{},
canUsePreviousBackup: false,
errCheck: assert.Error,
errCheck: assert.NoError,
},
{
name: "DriveAlreadyFound_Deltas",
@ -1090,10 +1093,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
}
},
},
expectedDeltas: nil,
expectedPaths: nil,
expectedDeltas: map[string]string{},
expectedPaths: map[string]map[string]string{},
canUsePreviousBackup: false,
errCheck: assert.Error,
errCheck: assert.NoError,
},
}
@ -1121,7 +1124,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
func(*support.ControllerOperationStatus) {})
require.NoError(t, err, clues.ToCore(err))
cols = append(cols, data.NoFetchRestoreCollection{Collection: mc})
cols = append(cols, dataMock.NewUnversionedRestoreCollection(
t,
data.NoFetchRestoreCollection{Collection: mc}))
}
deltas, paths, canUsePreviousBackup, err := deserializeMetadata(ctx, cols)
@ -2294,7 +2299,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
func(*support.ControllerOperationStatus) {})
assert.NoError(t, err, "creating metadata collection", clues.ToCore(err))
prevMetadata := []data.RestoreCollection{data.NoFetchRestoreCollection{Collection: mc}}
prevMetadata := []data.RestoreCollection{
dataMock.NewUnversionedRestoreCollection(t, data.NoFetchRestoreCollection{Collection: mc}),
}
errs := fault.New(true)
delList := prefixmatcher.NewStringSetBuilder()
@ -2321,7 +2328,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
deltas, paths, _, err := deserializeMetadata(
ctx,
[]data.RestoreCollection{
data.NoFetchRestoreCollection{Collection: baseCol},
dataMock.NewUnversionedRestoreCollection(
t,
data.NoFetchRestoreCollection{Collection: baseCol}),
})
if !assert.NoError(t, err, "deserializing metadata", clues.ToCore(err)) {
continue

View File

@ -1,7 +1,6 @@
package metadata
import (
"io"
"time"
)
@ -41,17 +40,3 @@ type Metadata struct {
Permissions []Permission `json:"permissions,omitempty"`
LinkShares []LinkShare `json:"linkShares,omitempty"`
}
type Item struct {
ItemID string
Data io.ReadCloser
Mod time.Time
}
// Deleted implements an interface function. However, OneDrive items are marked
// as deleted by adding them to the exclude list so this can always return
// false.
func (i *Item) Deleted() bool { return false }
func (i *Item) ID() string { return i.ItemID }
func (i *Item) ToReader() io.ReadCloser { return i.Data }
func (i *Item) ModTime() time.Time { return i.Mod }

View File

@ -15,7 +15,9 @@ import (
inMock "github.com/alcionai/corso/src/internal/common/idname/mock"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/data"
dataMock "github.com/alcionai/corso/src/internal/data/mock"
"github.com/alcionai/corso/src/internal/m365/graph"
"github.com/alcionai/corso/src/internal/m365/support"
"github.com/alcionai/corso/src/internal/operations/inject"
@ -322,7 +324,7 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
require.NoError(t, err, clues.ToCore(err))
cdps, canUsePreviousBackup, err := ParseMetadataCollections(ctx, []data.RestoreCollection{
data.NoFetchRestoreCollection{Collection: coll},
dataMock.NewUnversionedRestoreCollection(t, data.NoFetchRestoreCollection{Collection: coll}),
})
test.expectError(t, err, clues.ToCore(err))
@ -591,7 +593,7 @@ func (suite *BackupIntgSuite) TestDelta() {
require.NotNil(t, metadata, "collections contains a metadata collection")
cdps, canUsePreviousBackup, err := ParseMetadataCollections(ctx, []data.RestoreCollection{
data.NoFetchRestoreCollection{Collection: metadata},
dataMock.NewUnversionedRestoreCollection(t, data.NoFetchRestoreCollection{Collection: metadata}),
})
require.NoError(t, err, clues.ToCore(err))
assert.True(t, canUsePreviousBackup, "can use previous backup")
@ -666,7 +668,12 @@ func (suite *BackupIntgSuite) TestMailSerializationRegression() {
for stream := range streamChannel {
buf := &bytes.Buffer{}
read, err := buf.ReadFrom(stream.ToReader())
rr, err := readers.NewVersionedRestoreReader(stream.ToReader())
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
read, err := buf.ReadFrom(rr)
assert.NoError(t, err, clues.ToCore(err))
assert.NotZero(t, read)
@ -744,7 +751,13 @@ func (suite *BackupIntgSuite) TestContactSerializationRegression() {
for stream := range edc.Items(ctx, fault.New(true)) {
buf := &bytes.Buffer{}
read, err := buf.ReadFrom(stream.ToReader())
rr, err := readers.NewVersionedRestoreReader(stream.ToReader())
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
read, err := buf.ReadFrom(rr)
assert.NoError(t, err, clues.ToCore(err))
assert.NotZero(t, read)
@ -878,7 +891,12 @@ func (suite *BackupIntgSuite) TestEventsSerializationRegression() {
for item := range edc.Items(ctx, fault.New(true)) {
buf := &bytes.Buffer{}
read, err := buf.ReadFrom(item.ToReader())
rr, err := readers.NewVersionedRestoreReader(item.ToReader())
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
read, err := buf.ReadFrom(rr)
assert.NoError(t, err, clues.ToCore(err))
assert.NotZero(t, read)
@ -1198,7 +1216,9 @@ func checkMetadata(
) {
catPaths, _, err := ParseMetadataCollections(
ctx,
[]data.RestoreCollection{data.NoFetchRestoreCollection{Collection: c}})
[]data.RestoreCollection{
dataMock.NewUnversionedRestoreCollection(t, data.NoFetchRestoreCollection{Collection: c}),
})
if !assert.NoError(t, err, "getting metadata", clues.ToCore(err)) {
return
}

View File

@ -278,10 +278,21 @@ func (col *prefetchCollection) streamItems(
return
}
stream <- data.NewPrefetchedItem(
item, err := data.NewPrefetchedItem(
io.NopCloser(bytes.NewReader(itemData)),
id,
details.ItemInfo{Exchange: info})
if err != nil {
el.AddRecoverable(
ctx,
clues.Stack(err).
WithClues(ctx).
Label(fault.LabelForceNoBackupCreation))
return
}
stream <- item
atomic.AddInt64(&success, 1)
atomic.AddInt64(&totalBytes, info.Size)

View File

@ -17,6 +17,7 @@ import (
"golang.org/x/exp/slices"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/m365/collection/exchange/mock"
"github.com/alcionai/corso/src/internal/m365/graph"
@ -55,13 +56,20 @@ func (suite *CollectionUnitSuite) TestPrefetchedItem_Reader() {
suite.Run(test.name, func() {
t := suite.T()
ed := data.NewPrefetchedItem(
ed, err := data.NewPrefetchedItem(
io.NopCloser(bytes.NewReader(test.readData)),
"itemID",
details.ItemInfo{})
require.NoError(t, err, clues.ToCore(err))
r, err := readers.NewVersionedRestoreReader(ed.ToReader())
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
assert.False(t, r.Format().DelInFlight)
buf := &bytes.Buffer{}
_, err := buf.ReadFrom(ed.ToReader())
_, err = buf.ReadFrom(r)
assert.NoError(t, err, "reading data: %v", clues.ToCore(err))
assert.Equal(t, test.readData, buf.Bytes(), "read data")
assert.Equal(t, "itemID", ed.ID(), "item ID")
@ -493,11 +501,11 @@ func (suite *CollectionUnitSuite) TestLazyItem_NoRead_GetInfo_Errors() {
time.Now(),
fault.New(true))
_, err := li.(data.ItemInfo).Info()
_, err := li.Info()
assert.Error(suite.T(), err, "Info without reading data should error")
}
func (suite *CollectionUnitSuite) TestLazyItem() {
func (suite *CollectionUnitSuite) TestLazyItem_GetDataErrors() {
var (
parentPath = "inbox/private/silly cats"
now = time.Now()
@ -505,44 +513,19 @@ func (suite *CollectionUnitSuite) TestLazyItem() {
table := []struct {
name string
modTime time.Time
getErr error
serializeErr error
expectModTime time.Time
expectReadErrType error
dataCheck assert.ValueAssertionFunc
expectInfoErr bool
expectInfoErrType error
}{
{
name: "ReturnsEmptyReaderOnDeletedInFlight",
modTime: now,
getErr: graph.ErrDeletedInFlight,
dataCheck: assert.Empty,
expectInfoErr: true,
expectInfoErrType: data.ErrNotFound,
},
{
name: "ReturnsValidReaderAndInfo",
modTime: now,
dataCheck: assert.NotEmpty,
expectModTime: now,
},
{
name: "ReturnsErrorOnGenericGetError",
modTime: now,
getErr: assert.AnError,
expectReadErrType: assert.AnError,
dataCheck: assert.Empty,
expectInfoErr: true,
},
{
name: "ReturnsErrorOnGenericSerializeError",
modTime: now,
serializeErr: assert.AnError,
expectReadErrType: assert.AnError,
dataCheck: assert.Empty,
expectInfoErr: true,
},
}
@ -575,47 +558,128 @@ func (suite *CollectionUnitSuite) TestLazyItem() {
userID: "userID",
itemID: "itemID",
getter: getter,
modTime: test.modTime,
modTime: now,
immutableIDs: false,
parentPath: parentPath,
},
"itemID",
test.modTime,
now,
fault.New(true))
assert.False(t, li.Deleted(), "item shouldn't be marked deleted")
assert.Equal(
t,
test.modTime,
li.(data.ItemModTime).ModTime(),
"item mod time")
assert.Equal(t, now, li.ModTime(), "item mod time")
readData, err := io.ReadAll(li.ToReader())
if test.expectReadErrType == nil {
assert.NoError(t, err, "reading item data: %v", clues.ToCore(err))
} else {
assert.ErrorIs(t, err, test.expectReadErrType, "read error")
}
test.dataCheck(t, readData, "read item data")
info, err := li.(data.ItemInfo).Info()
// Didn't expect an error getting info, it should be valid.
if !test.expectInfoErr {
assert.NoError(t, err, "getting item info: %v", clues.ToCore(err))
assert.Equal(t, parentPath, info.Exchange.ParentPath)
assert.Equal(t, test.expectModTime, info.Modified())
return
}
_, err := readers.NewVersionedRestoreReader(li.ToReader())
assert.ErrorIs(t, err, test.expectReadErrType)
// Should get some form of error when trying to get info.
_, err = li.Info()
assert.Error(t, err, "Info()")
if test.expectInfoErrType != nil {
assert.ErrorIs(t, err, test.expectInfoErrType, "Info() error")
}
})
}
}
func (suite *CollectionUnitSuite) TestLazyItem_ReturnsEmptyReaderOnDeletedInFlight() {
var (
t = suite.T()
parentPath = "inbox/private/silly cats"
now = time.Now()
)
ctx, flush := tester.NewContext(t)
defer flush()
getter := &mock.ItemGetSerialize{GetErr: graph.ErrDeletedInFlight}
li := data.NewLazyItem(
ctx,
&lazyItemGetter{
userID: "userID",
itemID: "itemID",
getter: getter,
modTime: now,
immutableIDs: false,
parentPath: parentPath,
},
"itemID",
now,
fault.New(true))
assert.False(t, li.Deleted(), "item shouldn't be marked deleted")
assert.Equal(
t,
now,
li.ModTime(),
"item mod time")
r, err := readers.NewVersionedRestoreReader(li.ToReader())
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
assert.True(t, r.Format().DelInFlight)
readData, err := io.ReadAll(r)
assert.NoError(t, err, "reading item data: %v", clues.ToCore(err))
assert.Empty(t, readData, "read item data")
_, err = li.Info()
assert.ErrorIs(t, err, data.ErrNotFound, "Info() error")
}
func (suite *CollectionUnitSuite) TestLazyItem() {
var (
t = suite.T()
parentPath = "inbox/private/silly cats"
now = time.Now()
)
ctx, flush := tester.NewContext(t)
defer flush()
// Exact data type doesn't really matter.
testData := models.NewMessage()
testData.SetSubject(ptr.To("hello world"))
getter := &mock.ItemGetSerialize{GetData: testData}
li := data.NewLazyItem(
ctx,
&lazyItemGetter{
userID: "userID",
itemID: "itemID",
getter: getter,
modTime: now,
immutableIDs: false,
parentPath: parentPath,
},
"itemID",
now,
fault.New(true))
assert.False(t, li.Deleted(), "item shouldn't be marked deleted")
assert.Equal(
t,
now,
li.ModTime(),
"item mod time")
r, err := readers.NewVersionedRestoreReader(li.ToReader())
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
assert.False(t, r.Format().DelInFlight)
readData, err := io.ReadAll(r)
assert.NoError(t, err, "reading item data: %v", clues.ToCore(err))
assert.NotEmpty(t, readData, "read item data")
info, err := li.Info()
assert.NoError(t, err, "getting item info: %v", clues.ToCore(err))
assert.Equal(t, parentPath, info.Exchange.ParentPath)
assert.Equal(t, now, info.Modified())
}

View File

@ -67,6 +67,15 @@ func (bh channelsBackupHandler) canonicalPath(
false)
}
func (bh channelsBackupHandler) PathPrefix(tenantID string) (path.Path, error) {
return path.Build(
tenantID,
bh.protectedResource,
path.GroupsService,
path.ChannelMessagesCategory,
false)
}
func (bh channelsBackupHandler) GetChannelMessage(
ctx context.Context,
teamID, channelID, itemID string,

View File

@ -150,27 +150,47 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) {
parentFolderID,
id)
if err != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "writing channel message to serializer"))
el.AddRecoverable(
ctx,
clues.Wrap(err, "writing channel message to serializer").Label(fault.LabelForceNoBackupCreation))
return
}
if err := writer.WriteObjectValue("", item); err != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "writing channel message to serializer"))
el.AddRecoverable(
ctx,
clues.Wrap(err, "writing channel message to serializer").Label(fault.LabelForceNoBackupCreation))
return
}
itemData, err := writer.GetSerializedContent()
if err != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "serializing channel message"))
el.AddRecoverable(
ctx,
clues.Wrap(err, "serializing channel message").Label(fault.LabelForceNoBackupCreation))
return
}
info.ParentPath = col.LocationPath().String()
col.stream <- data.NewPrefetchedItem(
storeItem, err := data.NewPrefetchedItem(
io.NopCloser(bytes.NewReader(itemData)),
id,
details.ItemInfo{Groups: info})
if err != nil {
el.AddRecoverable(
ctx,
clues.Stack(err).
WithClues(ctx).
Label(fault.LabelForceNoBackupCreation))
return
}
col.stream <- storeItem
atomic.AddInt64(&streamedItems, 1)
atomic.AddInt64(&totalBytes, info.Size)

View File

@ -11,6 +11,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/m365/collection/groups/mock"
"github.com/alcionai/corso/src/internal/m365/support"
@ -48,13 +49,20 @@ func (suite *CollectionUnitSuite) TestPrefetchedItem_Reader() {
suite.Run(test.name, func() {
t := suite.T()
ed := data.NewPrefetchedItem(
ed, err := data.NewPrefetchedItem(
io.NopCloser(bytes.NewReader(test.readData)),
"itemID",
details.ItemInfo{})
require.NoError(t, err, clues.ToCore(err))
r, err := readers.NewVersionedRestoreReader(ed.ToReader())
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
assert.False(t, r.Format().DelInFlight)
buf := &bytes.Buffer{}
_, err := buf.ReadFrom(ed.ToReader())
_, err = buf.ReadFrom(r)
assert.NoError(t, err, "reading data: %v", clues.ToCore(err))
assert.Equal(t, test.readData, buf.Bytes(), "read data")
assert.Equal(t, "itemID", ed.ID(), "item ID")

View File

@ -4,7 +4,6 @@ import (
"bytes"
"context"
"io"
"time"
"github.com/alcionai/clues"
"github.com/microsoft/kiota-abstractions-go/serialization"
@ -40,12 +39,7 @@ const (
Pages DataCategory = 2
)
var (
_ data.BackupCollection = &Collection{}
_ data.Item = &Item{}
_ data.ItemInfo = &Item{}
_ data.ItemModTime = &Item{}
)
var _ data.BackupCollection = &Collection{}
// Collection is the SharePoint.List implementation of data.Collection. SharePoint.Libraries collections are supported
// by the oneDrive.Collection as the calls are identical for populating the Collection
@ -120,43 +114,6 @@ func (sc *Collection) Items(
return sc.data
}
type Item struct {
id string
data io.ReadCloser
info *details.SharePointInfo
modTime time.Time
// true if the item was marked by graph as deleted.
deleted bool
}
func NewItem(name string, d io.ReadCloser) *Item {
return &Item{
id: name,
data: d,
}
}
func (sd *Item) ID() string {
return sd.id
}
func (sd *Item) ToReader() io.ReadCloser {
return sd.data
}
func (sd Item) Deleted() bool {
return sd.deleted
}
func (sd *Item) Info() (details.ItemInfo, error) {
return details.ItemInfo{SharePoint: sd.info}, nil
}
func (sd *Item) ModTime() time.Time {
return sd.modTime
}
func (sc *Collection) finishPopulation(
ctx context.Context,
metrics support.CollectionMetrics,
@ -251,21 +208,20 @@ func (sc *Collection) retrieveLists(
size := int64(len(byteArray))
if size > 0 {
t := time.Now()
if t1 := lst.GetLastModifiedDateTime(); t1 != nil {
t = *t1
}
metrics.Bytes += size
metrics.Successes++
sc.data <- &Item{
id: ptr.Val(lst.GetId()),
data: io.NopCloser(bytes.NewReader(byteArray)),
info: ListToSPInfo(lst, size),
modTime: t,
item, err := data.NewPrefetchedItem(
io.NopCloser(bytes.NewReader(byteArray)),
ptr.Val(lst.GetId()),
details.ItemInfo{SharePoint: ListToSPInfo(lst, size)})
if err != nil {
el.AddRecoverable(ctx, clues.Stack(err).WithClues(ctx).Label(fault.LabelForceNoBackupCreation))
continue
}
sc.data <- item
progress <- struct{}{}
}
}
@ -322,13 +278,17 @@ func (sc *Collection) retrievePages(
if size > 0 {
metrics.Bytes += size
metrics.Successes++
sc.data <- &Item{
id: ptr.Val(pg.GetId()),
data: io.NopCloser(bytes.NewReader(byteArray)),
info: pageToSPInfo(pg, root, size),
modTime: ptr.OrNow(pg.GetLastModifiedDateTime()),
item, err := data.NewPrefetchedItem(
io.NopCloser(bytes.NewReader(byteArray)),
ptr.Val(pg.GetId()),
details.ItemInfo{SharePoint: pageToSPInfo(pg, root, size)})
if err != nil {
el.AddRecoverable(ctx, clues.Stack(err).WithClues(ctx).Label(fault.LabelForceNoBackupCreation))
continue
}
sc.data <- item
progress <- struct{}{}
}
}

View File

@ -19,6 +19,7 @@ import (
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/control/testdata"
"github.com/alcionai/corso/src/pkg/fault"
@ -58,21 +59,6 @@ func TestSharePointCollectionSuite(t *testing.T) {
})
}
func (suite *SharePointCollectionSuite) TestCollection_Item_Read() {
t := suite.T()
m := []byte("test message")
name := "aFile"
sc := &Item{
id: name,
data: io.NopCloser(bytes.NewReader(m)),
}
readData, err := io.ReadAll(sc.ToReader())
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, name, sc.id)
assert.Equal(t, readData, m)
}
// TestListCollection tests basic functionality to create
// SharePoint collection and to use the data stream channel.
func (suite *SharePointCollectionSuite) TestCollection_Items() {
@ -88,7 +74,7 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() {
name, itemName string
scope selectors.SharePointScope
getDir func(t *testing.T) path.Path
getItem func(t *testing.T, itemName string) *Item
getItem func(t *testing.T, itemName string) data.Item
}{
{
name: "List",
@ -106,7 +92,7 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() {
return dir
},
getItem: func(t *testing.T, name string) *Item {
getItem: func(t *testing.T, name string) data.Item {
ow := kioser.NewJsonSerializationWriter()
listing := spMock.ListDefault(name)
listing.SetDisplayName(&name)
@ -117,11 +103,11 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() {
byteArray, err := ow.GetSerializedContent()
require.NoError(t, err, clues.ToCore(err))
data := &Item{
id: name,
data: io.NopCloser(bytes.NewReader(byteArray)),
info: ListToSPInfo(listing, int64(len(byteArray))),
}
data, err := data.NewPrefetchedItem(
io.NopCloser(bytes.NewReader(byteArray)),
name,
details.ItemInfo{SharePoint: ListToSPInfo(listing, int64(len(byteArray)))})
require.NoError(t, err, clues.ToCore(err))
return data
},
@ -142,16 +128,16 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() {
return dir
},
getItem: func(t *testing.T, itemName string) *Item {
getItem: func(t *testing.T, itemName string) data.Item {
byteArray := spMock.Page(itemName)
page, err := betaAPI.CreatePageFromBytes(byteArray)
require.NoError(t, err, clues.ToCore(err))
data := &Item{
id: itemName,
data: io.NopCloser(bytes.NewReader(byteArray)),
info: betaAPI.PageInfo(page, int64(len(byteArray))),
}
data, err := data.NewPrefetchedItem(
io.NopCloser(bytes.NewReader(byteArray)),
itemName,
details.ItemInfo{SharePoint: betaAPI.PageInfo(page, int64(len(byteArray)))})
require.NoError(t, err, clues.ToCore(err))
return data
},
@ -210,11 +196,11 @@ func (suite *SharePointCollectionSuite) TestListCollection_Restore() {
byteArray, err := service.Serialize(listing)
require.NoError(t, err, clues.ToCore(err))
listData := &Item{
id: testName,
data: io.NopCloser(bytes.NewReader(byteArray)),
info: ListToSPInfo(listing, int64(len(byteArray))),
}
listData, err := data.NewPrefetchedItem(
io.NopCloser(bytes.NewReader(byteArray)),
testName,
details.ItemInfo{SharePoint: ListToSPInfo(listing, int64(len(byteArray)))})
require.NoError(t, err, clues.ToCore(err))
destName := testdata.DefaultRestoreConfig("").Location

View File

@ -79,20 +79,29 @@ func NewController(
return nil, clues.Wrap(err, "creating api client").WithClues(ctx)
}
rc := resource.UnknownResource
var rCli *resourceClient
switch pst {
case path.ExchangeService, path.OneDriveService:
rc = resource.Users
case path.GroupsService:
rc = resource.Groups
case path.SharePointService:
rc = resource.Sites
}
// no failure for unknown service.
// In that case we create a controller that doesn't attempt to look up any resource
// data. This case helps avoid unnecessary service calls when the end user is running
// repo init and connect commands via the CLI. All other callers should be expected
// to pass in a known service, or else expect downstream failures.
if pst != path.UnknownService {
rc := resource.UnknownResource
rCli, err := getResourceClient(rc, ac)
if err != nil {
return nil, clues.Wrap(err, "creating resource client").WithClues(ctx)
switch pst {
case path.ExchangeService, path.OneDriveService:
rc = resource.Users
case path.GroupsService:
rc = resource.Groups
case path.SharePointService:
rc = resource.Sites
}
rCli, err = getResourceClient(rc, ac)
if err != nil {
return nil, clues.Wrap(err, "creating resource client").WithClues(ctx)
}
}
ctrl := Controller{
@ -110,6 +119,10 @@ func NewController(
return &ctrl, nil
}
func (ctrl *Controller) VerifyAccess(ctx context.Context) error {
return ctrl.AC.Access().GetToken(ctx)
}
// ---------------------------------------------------------------------------
// Processing Status
// ---------------------------------------------------------------------------
@ -195,7 +208,7 @@ func getResourceClient(rc resource.Category, ac api.Client) (*resourceClient, er
case resource.Groups:
return &resourceClient{enum: rc, getter: ac.Groups()}, nil
default:
return nil, clues.New("unrecognized owner resource enum").With("resource_enum", rc)
return nil, clues.New("unrecognized owner resource type").With("resource_enum", rc)
}
}

View File

@ -861,7 +861,7 @@ func (suite *ControllerIntegrationSuite) TestRestoreAndBackup_core() {
},
},
{
name: "MultipleContactsSingleFolder",
name: "MultipleContactsInRestoreFolder",
service: path.ExchangeService,
collections: []stub.ColInfo{
{
@ -887,49 +887,77 @@ func (suite *ControllerIntegrationSuite) TestRestoreAndBackup_core() {
},
},
},
{
name: "MultipleContactsMultipleFolders",
service: path.ExchangeService,
collections: []stub.ColInfo{
{
PathElements: []string{"Work"},
Category: path.ContactsCategory,
Items: []stub.ItemInfo{
{
Name: "someencodeditemID",
Data: exchMock.ContactBytes("Ghimley"),
LookupKey: "Ghimley",
},
{
Name: "someencodeditemID2",
Data: exchMock.ContactBytes("Irgot"),
LookupKey: "Irgot",
},
{
Name: "someencodeditemID3",
Data: exchMock.ContactBytes("Jannes"),
LookupKey: "Jannes",
},
},
},
{
PathElements: []string{"Personal"},
Category: path.ContactsCategory,
Items: []stub.ItemInfo{
{
Name: "someencodeditemID4",
Data: exchMock.ContactBytes("Argon"),
LookupKey: "Argon",
},
{
Name: "someencodeditemID5",
Data: exchMock.ContactBytes("Bernard"),
LookupKey: "Bernard",
},
},
},
},
},
// TODO(ashmrtn): Re-enable when we can restore contacts to nested folders.
//{
// name: "MultipleContactsSingleFolder",
// service: path.ExchangeService,
// collections: []stub.ColInfo{
// {
// PathElements: []string{"Contacts"},
// Category: path.ContactsCategory,
// Items: []stub.ItemInfo{
// {
// Name: "someencodeditemID",
// Data: exchMock.ContactBytes("Ghimley"),
// LookupKey: "Ghimley",
// },
// {
// Name: "someencodeditemID2",
// Data: exchMock.ContactBytes("Irgot"),
// LookupKey: "Irgot",
// },
// {
// Name: "someencodeditemID3",
// Data: exchMock.ContactBytes("Jannes"),
// LookupKey: "Jannes",
// },
// },
// },
// },
//},
//{
// name: "MultipleContactsMultipleFolders",
// service: path.ExchangeService,
// collections: []stub.ColInfo{
// {
// PathElements: []string{"Work"},
// Category: path.ContactsCategory,
// Items: []stub.ItemInfo{
// {
// Name: "someencodeditemID",
// Data: exchMock.ContactBytes("Ghimley"),
// LookupKey: "Ghimley",
// },
// {
// Name: "someencodeditemID2",
// Data: exchMock.ContactBytes("Irgot"),
// LookupKey: "Irgot",
// },
// {
// Name: "someencodeditemID3",
// Data: exchMock.ContactBytes("Jannes"),
// LookupKey: "Jannes",
// },
// },
// },
// {
// PathElements: []string{"Personal"},
// Category: path.ContactsCategory,
// Items: []stub.ItemInfo{
// {
// Name: "someencodeditemID4",
// Data: exchMock.ContactBytes("Argon"),
// LookupKey: "Argon",
// },
// {
// Name: "someencodeditemID5",
// Data: exchMock.ContactBytes("Bernard"),
// LookupKey: "Bernard",
// },
// },
// },
// },
//},
// {
// name: "MultipleEventsSingleCalendar",
// service: path.ExchangeService,
@ -1017,34 +1045,35 @@ func (suite *ControllerIntegrationSuite) TestRestoreAndBackup_core() {
func (suite *ControllerIntegrationSuite) TestMultiFolderBackupDifferentNames() {
table := []restoreBackupInfo{
{
name: "Contacts",
service: path.ExchangeService,
collections: []stub.ColInfo{
{
PathElements: []string{"Work"},
Category: path.ContactsCategory,
Items: []stub.ItemInfo{
{
Name: "someencodeditemID",
Data: exchMock.ContactBytes("Ghimley"),
LookupKey: "Ghimley",
},
},
},
{
PathElements: []string{"Personal"},
Category: path.ContactsCategory,
Items: []stub.ItemInfo{
{
Name: "someencodeditemID2",
Data: exchMock.ContactBytes("Irgot"),
LookupKey: "Irgot",
},
},
},
},
},
// TODO(ashmrtn): Re-enable when we can restore contacts to nested folders.
//{
// name: "Contacts",
// service: path.ExchangeService,
// collections: []stub.ColInfo{
// {
// PathElements: []string{"Work"},
// Category: path.ContactsCategory,
// Items: []stub.ItemInfo{
// {
// Name: "someencodeditemID",
// Data: exchMock.ContactBytes("Ghimley"),
// LookupKey: "Ghimley",
// },
// },
// },
// {
// PathElements: []string{"Personal"},
// Category: path.ContactsCategory,
// Items: []stub.ItemInfo{
// {
// Name: "someencodeditemID2",
// Data: exchMock.ContactBytes("Irgot"),
// LookupKey: "Irgot",
// },
// },
// },
// },
//},
// {
// name: "Events",
// service: path.ExchangeService,

View File

@ -70,6 +70,7 @@ const (
NoSPLicense errorMessage = "Tenant does not have a SPO license"
parameterDeltaTokenNotSupported errorMessage = "Parameter 'DeltaToken' not supported for this request"
usersCannotBeResolved errorMessage = "One or more users could not be resolved"
requestedSiteCouldNotBeFound errorMessage = "Requested site could not be found"
)
const (
@ -259,6 +260,10 @@ func IsErrUsersCannotBeResolved(err error) bool {
return hasErrorCode(err, noResolvedUsers) || hasErrorMessage(err, usersCannotBeResolved)
}
func IsErrSiteNotFound(err error) bool {
return hasErrorMessage(err, requestedSiteCouldNotBeFound)
}
// ---------------------------------------------------------------------------
// error parsers
// ---------------------------------------------------------------------------

View File

@ -628,6 +628,51 @@ func (suite *GraphErrorsUnitSuite) TestIsErrUsersCannotBeResolved() {
}
}
func (suite *GraphErrorsUnitSuite) TestIsErrSiteCouldNotBeFound() {
table := []struct {
name string
err error
expect assert.BoolAssertionFunc
}{
{
name: "nil",
err: nil,
expect: assert.False,
},
{
name: "non-matching",
err: assert.AnError,
expect: assert.False,
},
{
name: "non-matching oDataErr",
err: odErrMsg("InvalidRequest", "cant resolve sites"),
expect: assert.False,
},
{
name: "matching oDataErr msg",
err: odErrMsg("InvalidRequest", string(requestedSiteCouldNotBeFound)),
expect: assert.True,
},
// next two tests are to make sure the checks are case insensitive
{
name: "oDataErr uppercase",
err: odErrMsg("InvalidRequest", strings.ToUpper(string(requestedSiteCouldNotBeFound))),
expect: assert.True,
},
{
name: "oDataErr lowercase",
err: odErrMsg("InvalidRequest", strings.ToLower(string(requestedSiteCouldNotBeFound))),
expect: assert.True,
},
}
for _, test := range table {
suite.Run(test.name, func() {
test.expect(suite.T(), IsErrSiteNotFound(test.err))
})
}
}
func (suite *GraphErrorsUnitSuite) TestIsErrCannotOpenFileAttachment() {
table := []struct {
name string

View File

@ -5,6 +5,7 @@ import (
"context"
"encoding/json"
"io"
"time"
"github.com/alcionai/clues"
@ -16,7 +17,7 @@ import (
var (
_ data.BackupCollection = &MetadataCollection{}
_ data.Item = &MetadataItem{}
_ data.Item = &metadataItem{}
)
// MetadataCollection in a simple collection that assumes all items to be
@ -24,7 +25,7 @@ var (
// created. This collection has no logic for lazily fetching item data.
type MetadataCollection struct {
fullPath path.Path
items []MetadataItem
items []metadataItem
statusUpdater support.StatusUpdater
}
@ -40,23 +41,34 @@ func NewMetadataEntry(fileName string, mData any) MetadataCollectionEntry {
return MetadataCollectionEntry{fileName, mData}
}
func (mce MetadataCollectionEntry) toMetadataItem() (MetadataItem, error) {
func (mce MetadataCollectionEntry) toMetadataItem() (metadataItem, error) {
if len(mce.fileName) == 0 {
return MetadataItem{}, clues.New("missing metadata filename")
return metadataItem{}, clues.New("missing metadata filename")
}
if mce.data == nil {
return MetadataItem{}, clues.New("missing metadata")
return metadataItem{}, clues.New("missing metadata")
}
buf := &bytes.Buffer{}
encoder := json.NewEncoder(buf)
if err := encoder.Encode(mce.data); err != nil {
return MetadataItem{}, clues.Wrap(err, "serializing metadata")
return metadataItem{}, clues.Wrap(err, "serializing metadata")
}
return NewMetadataItem(mce.fileName, buf.Bytes()), nil
item, err := data.NewUnindexedPrefetchedItem(
io.NopCloser(buf),
mce.fileName,
time.Now())
if err != nil {
return metadataItem{}, clues.Stack(err)
}
return metadataItem{
Item: item,
size: int64(buf.Len()),
}, nil
}
// MakeMetadataCollection creates a metadata collection that has a file
@ -71,7 +83,7 @@ func MakeMetadataCollection(
return nil, nil
}
items := make([]MetadataItem, 0, len(metadata))
items := make([]metadataItem, 0, len(metadata))
for _, md := range metadata {
item, err := md.toMetadataItem()
@ -89,7 +101,7 @@ func MakeMetadataCollection(
func NewMetadataCollection(
p path.Path,
items []MetadataItem,
items []metadataItem,
statusUpdater support.StatusUpdater,
) *MetadataCollection {
return &MetadataCollection{
@ -148,7 +160,7 @@ func (md MetadataCollection) Items(
defer close(res)
for _, item := range md.items {
totalBytes += int64(len(item.data))
totalBytes += item.size
res <- item
}
}()
@ -156,36 +168,7 @@ func (md MetadataCollection) Items(
return res
}
// MetadataItem is an in-memory data.Item implementation. MetadataItem does
// not implement additional interfaces like data.ItemInfo, so it should only
// be used for items with a small amount of content that don't need to be added
// to backup details.
//
// Currently the expected use-case for this struct are storing metadata for a
// backup like delta tokens or a mapping of container IDs to container paths.
type MetadataItem struct {
// uuid is an ID that can be used to refer to the item.
uuid string
// data is a buffer of data that the item refers to.
data []byte
}
func NewMetadataItem(uuid string, itemData []byte) MetadataItem {
return MetadataItem{
uuid: uuid,
data: itemData,
}
}
func (mi MetadataItem) ID() string {
return mi.uuid
}
// TODO(ashmrtn): Fill in once we know how to handle this.
func (mi MetadataItem) Deleted() bool {
return false
}
func (mi MetadataItem) ToReader() io.ReadCloser {
return io.NopCloser(bytes.NewReader(mi.data))
type metadataItem struct {
data.Item
size int64
}

View File

@ -1,9 +1,11 @@
package graph
import (
"bytes"
"encoding/json"
"io"
"testing"
"time"
"github.com/alcionai/clues"
"github.com/google/uuid"
@ -11,6 +13,8 @@ import (
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/m365/support"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/fault"
@ -63,10 +67,21 @@ func (suite *MetadataCollectionUnitSuite) TestItems() {
len(itemData),
"Requires same number of items and data")
items := []MetadataItem{}
items := []metadataItem{}
for i := 0; i < len(itemNames); i++ {
items = append(items, NewMetadataItem(itemNames[i], itemData[i]))
item, err := data.NewUnindexedPrefetchedItem(
io.NopCloser(bytes.NewReader(itemData[i])),
itemNames[i],
time.Time{})
require.NoError(t, err, clues.ToCore(err))
items = append(
items,
metadataItem{
Item: item,
size: int64(len(itemData[i])),
})
}
p, err := path.Build(
@ -92,7 +107,13 @@ func (suite *MetadataCollectionUnitSuite) TestItems() {
for s := range c.Items(ctx, fault.New(true)) {
gotNames = append(gotNames, s.ID())
buf, err := io.ReadAll(s.ToReader())
rr, err := readers.NewVersionedRestoreReader(s.ToReader())
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
assert.False(t, rr.Format().DelInFlight)
buf, err := io.ReadAll(rr)
if !assert.NoError(t, err, clues.ToCore(err)) {
continue
}
@ -193,11 +214,17 @@ func (suite *MetadataCollectionUnitSuite) TestMakeMetadataCollection() {
for item := range col.Items(ctx, fault.New(true)) {
assert.Equal(t, test.metadata.fileName, item.ID())
rr, err := readers.NewVersionedRestoreReader(item.ToReader())
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
assert.False(t, rr.Format().DelInFlight)
gotMap := map[string]string{}
decoder := json.NewDecoder(item.ToReader())
decoder := json.NewDecoder(rr)
itemCount++
err := decoder.Decode(&gotMap)
err = decoder.Decode(&gotMap)
if !assert.NoError(t, err, clues.ToCore(err)) {
continue
}

View File

@ -16,6 +16,7 @@ import (
"golang.org/x/exp/slices"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
odStub "github.com/alcionai/corso/src/internal/m365/service/onedrive/stub"
@ -573,7 +574,12 @@ func compareExchangeEmail(
expected map[string][]byte,
item data.Item,
) {
itemData, err := io.ReadAll(item.ToReader())
rr := versionedReadWrapper(t, item.ToReader())
if rr == nil {
return
}
itemData, err := io.ReadAll(rr)
if !assert.NoError(t, err, "reading collection item", item.ID(), clues.ToCore(err)) {
return
}
@ -600,7 +606,12 @@ func compareExchangeContact(
expected map[string][]byte,
item data.Item,
) {
itemData, err := io.ReadAll(item.ToReader())
rr := versionedReadWrapper(t, item.ToReader())
if rr == nil {
return
}
itemData, err := io.ReadAll(rr)
if !assert.NoError(t, err, "reading collection item", item.ID(), clues.ToCore(err)) {
return
}
@ -628,7 +639,12 @@ func compareExchangeEvent(
expected map[string][]byte,
item data.Item,
) {
itemData, err := io.ReadAll(item.ToReader())
rr := versionedReadWrapper(t, item.ToReader())
if rr == nil {
return
}
itemData, err := io.ReadAll(rr)
if !assert.NoError(t, err, "reading collection item", item.ID(), clues.ToCore(err)) {
return
}
@ -718,7 +734,12 @@ func compareDriveItem(
return false
}
buf, err := io.ReadAll(item.ToReader())
rr := versionedReadWrapper(t, item.ToReader())
if rr == nil {
return true
}
buf, err := io.ReadAll(rr)
if !assert.NoError(t, err, clues.ToCore(err)) {
return true
}
@ -751,10 +772,6 @@ func compareDriveItem(
}
if isMeta {
var itemType *metadata.Item
assert.IsType(t, itemType, item)
var (
itemMeta metadata.Metadata
expectedMeta metadata.Metadata
@ -854,6 +871,29 @@ func compareDriveItem(
return true
}
// versionedReaderWrapper strips out the version format header and checks it
// meets the current standard for all service types. If it doesn't meet the
// standard, returns nil. Else returns the versionedRestoreReader.
func versionedReadWrapper(
t *testing.T,
reader io.ReadCloser,
) io.ReadCloser {
rr, err := readers.NewVersionedRestoreReader(reader)
if !assert.NoError(t, err, clues.ToCore(err)) {
return nil
}
if !assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version) {
return nil
}
if !assert.False(t, rr.Format().DelInFlight) {
return nil
}
return rr
}
// compareItem compares the data returned by backup with the expected data.
// Returns true if a comparison was done else false. Bool return is mostly used
// to exclude OneDrive permissions for the root right now.
@ -923,30 +963,9 @@ func checkHasCollections(
continue
}
fp := g.FullPath()
loc := g.(data.LocationPather).LocationPath()
if fp.Service() == path.OneDriveService ||
(fp.Service() == path.SharePointService && fp.Category() == path.LibrariesCategory) {
dp, err := path.ToDrivePath(fp)
if !assert.NoError(t, err, clues.ToCore(err)) {
continue
}
loc = path.BuildDriveLocation(dp.DriveID, loc.Elements()...)
}
p, err := loc.ToDataLayerPath(
fp.Tenant(),
fp.ProtectedResource(),
fp.Service(),
fp.Category(),
false)
if !assert.NoError(t, err, clues.ToCore(err)) {
continue
}
gotNames = append(gotNames, p.String())
gotNames = append(gotNames, loc.String())
}
assert.ElementsMatch(t, expectedNames, gotNames, "returned collections")
@ -967,14 +986,18 @@ func checkCollections(
for _, returned := range got {
var (
hasItems bool
service = returned.FullPath().Service()
category = returned.FullPath().Category()
expectedColData = expected[returned.FullPath().String()]
folders = returned.FullPath().Elements()
rootDir = folders[len(folders)-1] == mci.RestoreCfg.Location
expectedColDataByLoc map[string][]byte
hasItems bool
service = returned.FullPath().Service()
category = returned.FullPath().Category()
folders = returned.FullPath().Elements()
rootDir = folders[len(folders)-1] == mci.RestoreCfg.Location
)
if p, ok := returned.(data.LocationPather); ok {
expectedColDataByLoc = expected[p.LocationPath().String()]
}
// Need to iterate through all items even if we don't expect to find a match
// because otherwise we'll deadlock waiting for the status. Unexpected or
// missing collection paths will be reported by checkHasCollections.
@ -994,14 +1017,14 @@ func checkCollections(
hasItems = true
gotItems++
if expectedColData == nil {
if expectedColDataByLoc == nil {
continue
}
if !compareItem(
t,
returned.FullPath(),
expectedColData,
expectedColDataByLoc,
service,
category,
item,

View File

@ -84,6 +84,7 @@ func (ctrl *Controller) ConsumeRestoreCollections(
rcc,
ctrl.AC,
ctrl.backupDriveIDNames,
ctrl.backupSiteIDWebURL,
dcs,
deets,
errs,

View File

@ -22,6 +22,7 @@ import (
"github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/backup/identity"
"github.com/alcionai/corso/src/pkg/backup/metadata"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/logger"
"github.com/alcionai/corso/src/pkg/path"
@ -35,19 +36,18 @@ func ProduceBackupCollections(
creds account.M365Config,
su support.StatusUpdater,
errs *fault.Bus,
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, bool, error) {
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, error) {
b, err := bpc.Selector.ToGroupsBackup()
if err != nil {
return nil, nil, false, clues.Wrap(err, "groupsDataCollection: parsing selector")
return nil, nil, clues.Wrap(err, "groupsDataCollection: parsing selector")
}
var (
el = errs.Local()
collections = []data.BackupCollection{}
categories = map[path.CategoryType]struct{}{}
ssmb = prefixmatcher.NewStringSetBuilder()
canUsePreviousBackup bool
sitesPreviousPaths = map[string]string{}
el = errs.Local()
collections = []data.BackupCollection{}
categories = map[path.CategoryType]struct{}{}
ssmb = prefixmatcher.NewStringSetBuilder()
sitesPreviousPaths = map[string]string{}
)
ctx = clues.Add(
@ -55,9 +55,12 @@ func ProduceBackupCollections(
"group_id", clues.Hide(bpc.ProtectedResource.ID()),
"group_name", clues.Hide(bpc.ProtectedResource.Name()))
group, err := ac.Groups().GetByID(ctx, bpc.ProtectedResource.ID())
group, err := ac.Groups().GetByID(
ctx,
bpc.ProtectedResource.ID(),
api.CallConfig{})
if err != nil {
return nil, nil, false, clues.Wrap(err, "getting group").WithClues(ctx)
return nil, nil, clues.Wrap(err, "getting group").WithClues(ctx)
}
isTeam := api.IsTeam(ctx, group)
@ -76,12 +79,9 @@ func ProduceBackupCollections(
switch scope.Category().PathType() {
case path.LibrariesCategory:
// TODO(meain): Private channels get a separate SharePoint
// site. We should also back those up and not just the
// default one.
resp, err := ac.Groups().GetRootSite(ctx, bpc.ProtectedResource.ID())
sites, err := ac.Groups().GetAllSites(ctx, bpc.ProtectedResource.ID(), errs)
if err != nil {
return nil, nil, false, err
return nil, nil, err
}
siteMetadataCollection := map[string][]data.RestoreCollection{}
@ -92,39 +92,47 @@ func ProduceBackupCollections(
siteMetadataCollection[siteID] = append(siteMetadataCollection[siteID], c)
}
pr := idname.NewProvider(ptr.Val(resp.GetId()), ptr.Val(resp.GetName()))
sbpc := inject.BackupProducerConfig{
LastBackupVersion: bpc.LastBackupVersion,
Options: bpc.Options,
ProtectedResource: pr,
Selector: bpc.Selector,
MetadataCollections: siteMetadataCollection[ptr.Val(resp.GetId())],
}
for _, s := range sites {
pr := idname.NewProvider(ptr.Val(s.GetId()), ptr.Val(s.GetName()))
sbpc := inject.BackupProducerConfig{
LastBackupVersion: bpc.LastBackupVersion,
Options: bpc.Options,
ProtectedResource: pr,
Selector: bpc.Selector,
MetadataCollections: siteMetadataCollection[ptr.Val(s.GetId())],
}
bh := drive.NewGroupBackupHandler(
bpc.ProtectedResource.ID(),
ptr.Val(resp.GetId()),
ac.Drives(),
scope)
bh := drive.NewGroupBackupHandler(
bpc.ProtectedResource.ID(),
ptr.Val(s.GetId()),
ac.Drives(),
scope)
cp, err := bh.SitePathPrefix(creds.AzureTenantID)
if err != nil {
return nil, nil, false, clues.Wrap(err, "getting canonical path")
}
sp, err := bh.SitePathPrefix(creds.AzureTenantID)
if err != nil {
return nil, nil, clues.Wrap(err, "getting site path")
}
sitesPreviousPaths[ptr.Val(resp.GetId())] = cp.String()
sitesPreviousPaths[ptr.Val(s.GetId())] = sp.String()
dbcs, canUsePreviousBackup, err = site.CollectLibraries(
ctx,
sbpc,
bh,
creds.AzureTenantID,
ssmb,
su,
errs)
if err != nil {
el.AddRecoverable(ctx, err)
continue
cs, canUsePreviousBackup, err := site.CollectLibraries(
ctx,
sbpc,
bh,
creds.AzureTenantID,
ssmb,
su,
errs)
if err != nil {
el.AddRecoverable(ctx, err)
continue
}
if !canUsePreviousBackup {
dbcs = append(dbcs, data.NewTombstoneCollection(sp, control.Options{}))
}
dbcs = append(dbcs, cs...)
}
case path.ChannelMessagesCategory:
@ -132,10 +140,12 @@ func ProduceBackupCollections(
continue
}
dbcs, canUsePreviousBackup, err = groups.CreateCollections(
bh := groups.NewChannelBackupHandler(bpc.ProtectedResource.ID(), ac.Channels())
cs, canUsePreviousBackup, err := groups.CreateCollections(
ctx,
bpc,
groups.NewChannelBackupHandler(bpc.ProtectedResource.ID(), ac.Channels()),
bh,
creds.AzureTenantID,
scope,
su,
@ -144,6 +154,17 @@ func ProduceBackupCollections(
el.AddRecoverable(ctx, err)
continue
}
if !canUsePreviousBackup {
tp, err := bh.PathPrefix(creds.AzureTenantID)
if err != nil {
return nil, nil, clues.Wrap(err, "getting message path")
}
dbcs = append(dbcs, data.NewTombstoneCollection(tp, control.Options{}))
}
dbcs = append(dbcs, cs...)
}
collections = append(collections, dbcs...)
@ -162,7 +183,7 @@ func ProduceBackupCollections(
su,
errs)
if err != nil {
return nil, nil, false, err
return nil, nil, err
}
collections = append(collections, baseCols...)
@ -175,12 +196,12 @@ func ProduceBackupCollections(
sitesPreviousPaths,
su)
if err != nil {
return nil, nil, false, err
return nil, nil, err
}
collections = append(collections, md)
return collections, ssmb.ToReader(), canUsePreviousBackup, el.Failure()
return collections, ssmb.ToReader(), el.Failure()
}
func getSitesMetadataCollection(

View File

@ -7,18 +7,15 @@ import (
"github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/alcionai/corso/src/pkg/filters"
"github.com/alcionai/corso/src/pkg/services/m365/api"
)
type getByIDer interface {
GetByID(ctx context.Context, identifier string) (models.Groupable, error)
}
func IsServiceEnabled(
ctx context.Context,
gbi getByIDer,
gbi api.GetByIDer[models.Groupable],
resource string,
) (bool, error) {
resp, err := gbi.GetByID(ctx, resource)
resp, err := gbi.GetByID(ctx, resource, api.CallConfig{})
if err != nil {
return false, clues.Wrap(err, "getting group").WithClues(ctx)
}

View File

@ -12,6 +12,7 @@ import (
"github.com/alcionai/corso/src/internal/m365/graph"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/services/m365/api"
)
type EnabledUnitSuite struct {
@ -22,14 +23,18 @@ func TestEnabledUnitSuite(t *testing.T) {
suite.Run(t, &EnabledUnitSuite{Suite: tester.NewUnitSuite(t)})
}
var _ getByIDer = mockGBI{}
var _ api.GetByIDer[models.Groupable] = mockGBI{}
type mockGBI struct {
group models.Groupable
err error
}
func (m mockGBI) GetByID(ctx context.Context, identifier string) (models.Groupable, error) {
func (m mockGBI) GetByID(
ctx context.Context,
identifier string,
_ api.CallConfig,
) (models.Groupable, error) {
return m.group, m.err
}
@ -56,13 +61,13 @@ func (suite *EnabledUnitSuite) TestIsServiceEnabled() {
table := []struct {
name string
mock func(context.Context) getByIDer
mock func(context.Context) api.GetByIDer[models.Groupable]
expect assert.BoolAssertionFunc
expectErr assert.ErrorAssertionFunc
}{
{
name: "ok",
mock: func(ctx context.Context) getByIDer {
mock: func(ctx context.Context) api.GetByIDer[models.Groupable] {
return mockGBI{
group: unified,
}
@ -72,7 +77,7 @@ func (suite *EnabledUnitSuite) TestIsServiceEnabled() {
},
{
name: "non-unified group",
mock: func(ctx context.Context) getByIDer {
mock: func(ctx context.Context) api.GetByIDer[models.Groupable] {
return mockGBI{
group: nonUnified,
}
@ -82,7 +87,7 @@ func (suite *EnabledUnitSuite) TestIsServiceEnabled() {
},
{
name: "group not found",
mock: func(ctx context.Context) getByIDer {
mock: func(ctx context.Context) api.GetByIDer[models.Groupable] {
return mockGBI{
err: graph.Stack(ctx, odErrMsg(string(graph.RequestResourceNotFound), "message")),
}
@ -92,7 +97,7 @@ func (suite *EnabledUnitSuite) TestIsServiceEnabled() {
},
{
name: "arbitrary error",
mock: func(ctx context.Context) getByIDer {
mock: func(ctx context.Context) api.GetByIDer[models.Groupable] {
return mockGBI{
err: assert.AnError,
}

Some files were not shown because too many files have changed in this diff Show More