Merge branch 'main' into updateKopiaPassword

This commit is contained in:
neha_gupta 2023-10-04 12:45:19 +05:30 committed by GitHub
commit 23cbb7ca33
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
137 changed files with 4888 additions and 3355 deletions

View File

@ -45,6 +45,9 @@ runs:
shell: bash shell: bash
working-directory: src working-directory: src
run: | run: |
echo "---------------------------"
echo Backup ${{ inputs.service }} ${{ inputs.kind }}
echo "---------------------------"
set -euo pipefail set -euo pipefail
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-backup-${{ inputs.service }}-${{inputs.kind }}.log CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-backup-${{ inputs.service }}-${{inputs.kind }}.log
./corso backup create '${{ inputs.service }}' \ ./corso backup create '${{ inputs.service }}' \
@ -61,6 +64,9 @@ runs:
shell: bash shell: bash
working-directory: src working-directory: src
run: | run: |
echo "---------------------------"
echo Restore ${{ inputs.service }} ${{ inputs.kind }}
echo "---------------------------"
set -euo pipefail set -euo pipefail
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-restore-${{ inputs.service }}-${{inputs.kind }}.log CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-restore-${{ inputs.service }}-${{inputs.kind }}.log
./corso restore '${{ inputs.service }}' \ ./corso restore '${{ inputs.service }}' \
@ -85,11 +91,14 @@ runs:
SANITY_TEST_KIND: restore SANITY_TEST_KIND: restore
SANITY_TEST_FOLDER: ${{ steps.restore.outputs.result }} SANITY_TEST_FOLDER: ${{ steps.restore.outputs.result }}
SANITY_TEST_SERVICE: ${{ inputs.service }} SANITY_TEST_SERVICE: ${{ inputs.service }}
TEST_DATA: ${{ inputs.test-folder }} SANITY_TEST_DATA: ${{ inputs.test-folder }}
BASE_BACKUP: ${{ inputs.base-backup }} SANITY_BASE_BACKUP: ${{ inputs.base-backup }}
run: | run: |
echo "---------------------------"
echo Sanity Test Restore ${{ inputs.service }} ${{ inputs.kind }}
echo "---------------------------"
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-validate-${{ inputs.service }}-${{inputs.kind }}.log CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-validate-${{ inputs.service }}-${{inputs.kind }}.log
./sanity-test ./sanity-test restore ${{ inputs.service }}
- name: Export ${{ inputs.service }} ${{ inputs.kind }} - name: Export ${{ inputs.service }} ${{ inputs.kind }}
if: inputs.with-export == true if: inputs.with-export == true
@ -97,6 +106,9 @@ runs:
shell: bash shell: bash
working-directory: src working-directory: src
run: | run: |
echo "---------------------------"
echo Export ${{ inputs.service }} ${{ inputs.kind }}
echo "---------------------------"
set -euo pipefail set -euo pipefail
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-restore-${{ inputs.service }}-${{inputs.kind }}.log CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-restore-${{ inputs.service }}-${{inputs.kind }}.log
./corso export '${{ inputs.service }}' \ ./corso export '${{ inputs.service }}' \
@ -116,11 +128,14 @@ runs:
SANITY_TEST_KIND: export SANITY_TEST_KIND: export
SANITY_TEST_FOLDER: /tmp/export-${{ inputs.service }}-${{inputs.kind }} SANITY_TEST_FOLDER: /tmp/export-${{ inputs.service }}-${{inputs.kind }}
SANITY_TEST_SERVICE: ${{ inputs.service }} SANITY_TEST_SERVICE: ${{ inputs.service }}
TEST_DATA: ${{ inputs.test-folder }} SANITY_TEST_DATA: ${{ inputs.test-folder }}
BASE_BACKUP: ${{ inputs.base-backup }} SANITY_BASE_BACKUP: ${{ inputs.base-backup }}
run: | run: |
echo "---------------------------"
echo Sanity-Test Export ${{ inputs.service }} ${{ inputs.kind }}
echo "---------------------------"
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-validate-${{ inputs.service }}-${{inputs.kind }}.log CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-validate-${{ inputs.service }}-${{inputs.kind }}.log
./sanity-test ./sanity-test export ${{ inputs.service }}
- name: Export archive ${{ inputs.service }} ${{ inputs.kind }} - name: Export archive ${{ inputs.service }} ${{ inputs.kind }}
if: inputs.with-export == true if: inputs.with-export == true
@ -128,6 +143,9 @@ runs:
shell: bash shell: bash
working-directory: src working-directory: src
run: | run: |
echo "---------------------------"
echo Export Archive ${{ inputs.service }} ${{ inputs.kind }}
echo "---------------------------"
set -euo pipefail set -euo pipefail
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-restore-${{ inputs.service }}-${{inputs.kind }}.log CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-restore-${{ inputs.service }}-${{inputs.kind }}.log
./corso export '${{ inputs.service }}' \ ./corso export '${{ inputs.service }}' \
@ -150,16 +168,22 @@ runs:
SANITY_TEST_KIND: export SANITY_TEST_KIND: export
SANITY_TEST_FOLDER: /tmp/export-${{ inputs.service }}-${{inputs.kind }}-unzipped SANITY_TEST_FOLDER: /tmp/export-${{ inputs.service }}-${{inputs.kind }}-unzipped
SANITY_TEST_SERVICE: ${{ inputs.service }} SANITY_TEST_SERVICE: ${{ inputs.service }}
TEST_DATA: ${{ inputs.test-folder }} SANITY_TEST_DATA: ${{ inputs.test-folder }}
BASE_BACKUP: ${{ inputs.base-backup }} SANITY_BASE_BACKUP: ${{ inputs.base-backup }}
run: | run: |
echo "---------------------------"
echo Sanity-Test Export Archive ${{ inputs.service }} ${{ inputs.kind }}
echo "---------------------------"
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-validate-${{ inputs.service }}-${{inputs.kind }}.log CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-validate-${{ inputs.service }}-${{inputs.kind }}.log
./sanity-test ./sanity-test export ${{ inputs.service }}
- name: List ${{ inputs.service }} ${{ inputs.kind }} - name: List ${{ inputs.service }} ${{ inputs.kind }}
shell: bash shell: bash
working-directory: src working-directory: src
run: | run: |
echo "---------------------------"
echo Backup list ${{ inputs.service }} ${{ inputs.kind }}
echo "---------------------------"
set -euo pipefail set -euo pipefail
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-backup-list-${{ inputs.service }}-${{inputs.kind }}.log CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-backup-list-${{ inputs.service }}-${{inputs.kind }}.log
./corso backup list ${{ inputs.service }} \ ./corso backup list ${{ inputs.service }} \
@ -178,6 +202,9 @@ runs:
shell: bash shell: bash
working-directory: src working-directory: src
run: | run: |
echo "---------------------------"
echo Backup List w/ Backup ${{ inputs.service }} ${{ inputs.kind }}
echo "---------------------------"
set -euo pipefail set -euo pipefail
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-backup-list-single-${{ inputs.service }}-${{inputs.kind }}.log CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-backup-list-single-${{ inputs.service }}-${{inputs.kind }}.log
./corso backup list ${{ inputs.service }} \ ./corso backup list ${{ inputs.service }} \
@ -193,7 +220,13 @@ runs:
exit 1 exit 1
fi fi
# Upload the original go test output as an artifact for later review. - if: always()
shell: bash
run: |
echo "---------------------------"
echo Logging Results
echo "---------------------------"
- name: Upload test log - name: Upload test log
if: always() if: always()
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v3

View File

@ -31,7 +31,7 @@ runs:
- name: use url or blank val - name: use url or blank val
shell: bash shell: bash
run: | run: |
echo "STEP=${{ github.action || '' }}" >> $GITHUB_ENV echo "STEP=${{ env.trimmed_ref || '' }}" >> $GITHUB_ENV
echo "JOB=${{ github.job || '' }}" >> $GITHUB_ENV echo "JOB=${{ github.job || '' }}" >> $GITHUB_ENV
echo "LOGS=${{ github.run_id && env.logurl || '-' }}" >> $GITHUB_ENV echo "LOGS=${{ github.run_id && env.logurl || '-' }}" >> $GITHUB_ENV
echo "COMMIT=${{ github.sha && env.commiturl || '-' }}" >> $GITHUB_ENV echo "COMMIT=${{ github.sha && env.commiturl || '-' }}" >> $GITHUB_ENV
@ -51,7 +51,7 @@ runs:
"type": "section", "type": "section",
"text": { "text": {
"type": "mrkdwn", "type": "mrkdwn",
"text": "${{ inputs.msg }} :: ${{ env.JOB }} - ${{ env.STEP }}\n${{ env.LOGS }} ${{ env.COMMIT }} ${{ env.REF }}" "text": "${{ inputs.msg }}\n${{ env.JOB }} :: ${{ env.STEP }}\n${{ env.LOGS }} ${{ env.COMMIT }} ${{ env.REF }}"
} }
} }
] ]

View File

@ -181,7 +181,7 @@ jobs:
uses: ./.github/actions/backup-restore-test uses: ./.github/actions/backup-restore-test
with: with:
service: exchange service: exchange
kind: initial kind: first-backup
backup-args: '--mailbox "${{ env.TEST_USER }}" --data "email"' backup-args: '--mailbox "${{ env.TEST_USER }}" --data "email"'
restore-args: '--email-folder ${{ env.RESTORE_DEST_PFX }}${{ steps.repo-init.outputs.result }}' restore-args: '--email-folder ${{ env.RESTORE_DEST_PFX }}${{ steps.repo-init.outputs.result }}'
test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.repo-init.outputs.result }}' test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.repo-init.outputs.result }}'
@ -249,7 +249,7 @@ jobs:
uses: ./.github/actions/backup-restore-test uses: ./.github/actions/backup-restore-test
with: with:
service: onedrive service: onedrive
kind: initial kind: first-backup
backup-args: '--user "${{ env.TEST_USER }}"' backup-args: '--user "${{ env.TEST_USER }}"'
restore-args: '--folder ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-onedrive.outputs.result }}' restore-args: '--folder ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-onedrive.outputs.result }}'
test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-onedrive.outputs.result }}' test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-onedrive.outputs.result }}'
@ -305,7 +305,7 @@ jobs:
uses: ./.github/actions/backup-restore-test uses: ./.github/actions/backup-restore-test
with: with:
service: sharepoint service: sharepoint
kind: initial kind: first-backup
backup-args: '--site "${{ secrets.CORSO_M365_TEST_SITE_URL }}"' backup-args: '--site "${{ secrets.CORSO_M365_TEST_SITE_URL }}"'
restore-args: '--folder ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-sharepoint.outputs.result }}' restore-args: '--folder ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-sharepoint.outputs.result }}'
test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-sharepoint.outputs.result }}' test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-sharepoint.outputs.result }}'
@ -362,12 +362,34 @@ jobs:
uses: ./.github/actions/backup-restore-test uses: ./.github/actions/backup-restore-test
with: with:
service: groups service: groups
kind: initial kind: first-backup
backup-args: '--group "${{ vars.CORSO_M365_TEST_TEAM_ID }}"' backup-args: '--group "${{ vars.CORSO_M365_TEST_TEAM_ID }}"'
test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }}' test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }}'
log-dir: ${{ env.CORSO_LOG_DIR }} log-dir: ${{ env.CORSO_LOG_DIR }}
# TODO: incrementals # generate some more enteries for incremental check
# - name: Groups - Create new data (for incremental)
# working-directory: ./src/cmd/factory
# run: |
# go run . sharepoint files \
# --site ${{ secrets.CORSO_M365_TEST_GROUPS_SITE_URL }} \
# --user ${{ env.TEST_USER }} \
# --secondaryuser ${{ env.CORSO_SECONDARY_M365_TEST_USER_ID }} \
# --tenant ${{ secrets.TENANT_ID }} \
# --destination ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }} \
# --count 4
# - name: Groups - Incremental backup
# id: groups-incremental
# uses: ./.github/actions/backup-restore-test
# with:
# service: groups
# kind: incremental
# backup-args: '--site "${{ secrets.CORSO_M365_TEST_GROUPS_SITE_URL }}"'
# restore-args: '--folder ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }}'
# test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }}'
# log-dir: ${{ env.CORSO_LOG_DIR }}
# with-export: true
########################################################################################################################################## ##########################################################################################################################################

View File

@ -12,6 +12,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Reduce backup runtime for OneDrive and SharePoint incremental backups that have no file changes. - Reduce backup runtime for OneDrive and SharePoint incremental backups that have no file changes.
- Increase Exchange backup performance by lazily fetching data only for items whose content changed. - Increase Exchange backup performance by lazily fetching data only for items whose content changed.
- Added `--backups` flag to delete multiple backups in `corso backup delete` command. - Added `--backups` flag to delete multiple backups in `corso backup delete` command.
- Backup now includes all sites that belongs to a team, not just the root site.
## Fixed ## Fixed
- Teams Channels that cannot support delta tokens (those without messages) fall back to non-delta enumeration and no longer fail a backup. - Teams Channels that cannot support delta tokens (those without messages) fall back to non-delta enumeration and no longer fail a backup.

View File

@ -16,6 +16,8 @@ import (
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/m365/graph"
"github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/logger"
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/repository"
@ -48,12 +50,12 @@ func AddCommands(cmd *cobra.Command) {
for _, sc := range subCommandFuncs { for _, sc := range subCommandFuncs {
subCommand := sc() subCommand := sc()
flags.AddAllProviderFlags(subCommand)
flags.AddAllStorageFlags(subCommand)
backupC.AddCommand(subCommand) backupC.AddCommand(subCommand)
for _, addBackupTo := range serviceCommands { for _, addBackupTo := range serviceCommands {
addBackupTo(subCommand) sc := addBackupTo(subCommand)
flags.AddAllProviderFlags(sc)
flags.AddAllStorageFlags(sc)
} }
} }
} }
@ -163,7 +165,7 @@ func handleDeleteCmd(cmd *cobra.Command, args []string) error {
// standard set of selector behavior that we want used in the cli // standard set of selector behavior that we want used in the cli
var defaultSelectorConfig = selectors.Config{OnlyMatchItemNames: true} var defaultSelectorConfig = selectors.Config{OnlyMatchItemNames: true}
func runBackups( func genericCreateCommand(
ctx context.Context, ctx context.Context,
r repository.Repositoryer, r repository.Repositoryer,
serviceName string, serviceName string,
@ -332,6 +334,65 @@ func genericListCommand(
return nil return nil
} }
func genericDetailsCommand(
cmd *cobra.Command,
backupID string,
sel selectors.Selector,
) (*details.Details, error) {
ctx := cmd.Context()
r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.OneDriveService)
if err != nil {
return nil, clues.Stack(err)
}
defer utils.CloseRepo(ctx, r)
return genericDetailsCore(
ctx,
r,
backupID,
sel,
rdao.Opts)
}
func genericDetailsCore(
ctx context.Context,
bg repository.BackupGetter,
backupID string,
sel selectors.Selector,
opts control.Options,
) (*details.Details, error) {
ctx = clues.Add(ctx, "backup_id", backupID)
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
d, _, errs := bg.GetBackupDetails(ctx, backupID)
// TODO: log/track recoverable errors
if errs.Failure() != nil {
if errors.Is(errs.Failure(), data.ErrNotFound) {
return nil, clues.New("no backup exists with the id " + backupID)
}
return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository")
}
if opts.SkipReduce {
return d, nil
}
d, err := sel.Reduce(ctx, d, errs)
if err != nil {
return nil, clues.Wrap(err, "filtering backup details to selection")
}
return d, nil
}
// ---------------------------------------------------------------------------
// helper funcs
// ---------------------------------------------------------------------------
func ifShow(flag string) bool { func ifShow(flag string) bool {
return strings.ToLower(strings.TrimSpace(flag)) == "show" return strings.ToLower(strings.TrimSpace(flag)) == "show"
} }

View File

@ -0,0 +1,68 @@
package backup
import (
"testing"
"github.com/alcionai/clues"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/cli/utils/testdata"
"github.com/alcionai/corso/src/internal/tester"
dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/selectors"
)
type BackupUnitSuite struct {
tester.Suite
}
func TestBackupUnitSuite(t *testing.T) {
suite.Run(t, &BackupUnitSuite{Suite: tester.NewUnitSuite(t)})
}
func (suite *BackupUnitSuite) TestGenericDetailsCore() {
t := suite.T()
expected := append(
append(
dtd.GetItemsForVersion(
t,
path.ExchangeService,
path.EmailCategory,
0,
-1),
dtd.GetItemsForVersion(
t,
path.ExchangeService,
path.EventsCategory,
0,
-1)...),
dtd.GetItemsForVersion(
t,
path.ExchangeService,
path.ContactsCategory,
0,
-1)...)
ctx, flush := tester.NewContext(t)
defer flush()
bg := testdata.VersionedBackupGetter{
Details: dtd.GetDetailsSetForVersion(t, 0),
}
sel := selectors.NewExchangeBackup([]string{"user-id"})
sel.Include(sel.AllData())
output, err := genericDetailsCore(
ctx,
bg,
"backup-ID",
sel.Selector,
control.DefaultOptions())
assert.NoError(t, err, clues.ToCore(err))
assert.ElementsMatch(t, expected, output.Entries)
}

View File

@ -1,21 +1,15 @@
package backup package backup
import ( import (
"context"
"github.com/alcionai/clues" "github.com/alcionai/clues"
"github.com/pkg/errors"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/pflag" "github.com/spf13/pflag"
"github.com/alcionai/corso/src/cli/flags" "github.com/alcionai/corso/src/cli/flags"
. "github.com/alcionai/corso/src/cli/print" . "github.com/alcionai/corso/src/cli/print"
"github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/repository"
"github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/selectors"
) )
@ -182,7 +176,7 @@ func createExchangeCmd(cmd *cobra.Command, args []string) error {
selectorSet = append(selectorSet, discSel.Selector) selectorSet = append(selectorSet, discSel.Selector)
} }
return runBackups( return genericCreateCommand(
ctx, ctx,
r, r,
"Exchange", "Exchange",
@ -272,74 +266,31 @@ func detailsExchangeCmd(cmd *cobra.Command, args []string) error {
return nil return nil
} }
return runDetailsExchangeCmd(cmd)
}
func runDetailsExchangeCmd(cmd *cobra.Command) error {
ctx := cmd.Context() ctx := cmd.Context()
opts := utils.MakeExchangeOpts(cmd) opts := utils.MakeExchangeOpts(cmd)
r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.ExchangeService) sel := utils.IncludeExchangeRestoreDataSelectors(opts)
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
utils.FilterExchangeRestoreInfoSelectors(sel, opts)
ds, err := genericDetailsCommand(cmd, flags.BackupIDFV, sel.Selector)
if err != nil { if err != nil {
return Only(ctx, err) return Only(ctx, err)
} }
defer utils.CloseRepo(ctx, r) if len(ds.Entries) > 0 {
ds.PrintEntries(ctx)
ds, err := runDetailsExchangeCmd( } else {
ctx,
r,
flags.BackupIDFV,
opts,
rdao.Opts.SkipReduce)
if err != nil {
return Only(ctx, err)
}
if len(ds.Entries) == 0 {
Info(ctx, selectors.ErrorNoMatchingItems) Info(ctx, selectors.ErrorNoMatchingItems)
return nil
} }
ds.PrintEntries(ctx)
return nil return nil
} }
// runDetailsExchangeCmd actually performs the lookup in backup details.
// the fault.Errors return is always non-nil. Callers should check if
// errs.Failure() == nil.
func runDetailsExchangeCmd(
ctx context.Context,
r repository.BackupGetter,
backupID string,
opts utils.ExchangeOpts,
skipReduce bool,
) (*details.Details, error) {
if err := utils.ValidateExchangeRestoreFlags(backupID, opts); err != nil {
return nil, err
}
ctx = clues.Add(ctx, "backup_id", backupID)
d, _, errs := r.GetBackupDetails(ctx, backupID)
// TODO: log/track recoverable errors
if errs.Failure() != nil {
if errors.Is(errs.Failure(), data.ErrNotFound) {
return nil, clues.New("No backup exists with the id " + backupID)
}
return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository")
}
ctx = clues.Add(ctx, "details_entries", len(d.Entries))
if !skipReduce {
sel := utils.IncludeExchangeRestoreDataSelectors(opts)
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
utils.FilterExchangeRestoreInfoSelectors(sel, opts)
d = sel.Reduce(ctx, d, errs)
}
return d, nil
}
// ------------------------------------------------------------------------------------------------ // ------------------------------------------------------------------------------------------------
// backup delete // backup delete
// ------------------------------------------------------------------------------------------------ // ------------------------------------------------------------------------------------------------

View File

@ -55,7 +55,7 @@ func (suite *NoBackupExchangeE2ESuite) SetupSuite() {
defer flush() defer flush()
suite.its = newIntegrationTesterSetup(t) suite.its = newIntegrationTesterSetup(t)
suite.dpnd = prepM365Test(t, ctx) suite.dpnd = prepM365Test(t, ctx, path.ExchangeService)
} }
func (suite *NoBackupExchangeE2ESuite) TestExchangeBackupListCmd_noBackups() { func (suite *NoBackupExchangeE2ESuite) TestExchangeBackupListCmd_noBackups() {
@ -109,7 +109,7 @@ func (suite *BackupExchangeE2ESuite) SetupSuite() {
defer flush() defer flush()
suite.its = newIntegrationTesterSetup(t) suite.its = newIntegrationTesterSetup(t)
suite.dpnd = prepM365Test(t, ctx) suite.dpnd = prepM365Test(t, ctx, path.ExchangeService)
} }
func (suite *BackupExchangeE2ESuite) TestExchangeBackupCmd_email() { func (suite *BackupExchangeE2ESuite) TestExchangeBackupCmd_email() {
@ -336,7 +336,7 @@ func (suite *PreparedBackupExchangeE2ESuite) SetupSuite() {
defer flush() defer flush()
suite.its = newIntegrationTesterSetup(t) suite.its = newIntegrationTesterSetup(t)
suite.dpnd = prepM365Test(t, ctx) suite.dpnd = prepM365Test(t, ctx, path.ExchangeService)
suite.backupOps = make(map[path.CategoryType]string) suite.backupOps = make(map[path.CategoryType]string)
var ( var (
@ -579,7 +579,7 @@ func (suite *BackupDeleteExchangeE2ESuite) SetupSuite() {
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
defer flush() defer flush()
suite.dpnd = prepM365Test(t, ctx) suite.dpnd = prepM365Test(t, ctx, path.ExchangeService)
m365UserID := tconfig.M365UserID(t) m365UserID := tconfig.M365UserID(t)
users := []string{m365UserID} users := []string{m365UserID}

View File

@ -1,8 +1,6 @@
package backup package backup
import ( import (
"bytes"
"fmt"
"strconv" "strconv"
"testing" "testing"
@ -14,11 +12,9 @@ import (
"github.com/alcionai/corso/src/cli/flags" "github.com/alcionai/corso/src/cli/flags"
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
cliTD "github.com/alcionai/corso/src/cli/testdata"
"github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/cli/utils"
utilsTD "github.com/alcionai/corso/src/cli/utils/testdata"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/version"
dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata"
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
) )
@ -92,76 +88,46 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() {
func (suite *ExchangeUnitSuite) TestBackupCreateFlags() { func (suite *ExchangeUnitSuite) TestBackupCreateFlags() {
t := suite.T() t := suite.T()
cmd := &cobra.Command{Use: createCommand} cmd := cliTD.SetUpCmdHasFlags(
t,
// persistent flags not added by addCommands &cobra.Command{Use: createCommand},
flags.AddRunModeFlag(cmd, true) addExchangeCommands,
[]cliTD.UseCobraCommandFn{
c := addExchangeCommands(cmd) flags.AddAllProviderFlags,
require.NotNil(t, c) flags.AddAllStorageFlags,
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
exchangeServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
}, },
flagsTD.PreparedProviderFlags(), flagsTD.WithFlags(
flagsTD.PreparedStorageFlags()) exchangeServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.MailBoxFN, flagsTD.FlgInputs(flagsTD.MailboxInput),
"--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.ExchangeCategoryDataInput),
"--" + flags.FetchParallelismFN, flagsTD.FetchParallelism,
"--" + flags.DeltaPageSizeFN, flagsTD.DeltaPageSize,
// Test arg parsing for few args // bool flags
args := []string{ "--" + flags.FailFastFN,
exchangeServiceCommand, "--" + flags.DisableIncrementalsFN,
"--" + flags.RunModeFN, flags.RunModeFlagTest, "--" + flags.ForceItemDataDownloadFN,
"--" + flags.DisableDeltaFN,
"--" + flags.MailBoxFN, flagsTD.FlgInputs(flagsTD.MailboxInput), "--" + flags.EnableImmutableIDFN,
"--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.ExchangeCategoryDataInput), "--" + flags.DisableConcurrencyLimiterFN,
},
"--" + flags.FetchParallelismFN, flagsTD.FetchParallelism, flagsTD.PreparedProviderFlags(),
"--" + flags.DeltaPageSizeFN, flagsTD.DeltaPageSize, flagsTD.PreparedStorageFlags()))
// bool flags
"--" + flags.FailFastFN,
"--" + flags.DisableIncrementalsFN,
"--" + flags.ForceItemDataDownloadFN,
"--" + flags.DisableDeltaFN,
"--" + flags.EnableImmutableIDFN,
"--" + flags.DisableConcurrencyLimiterFN,
}
args = append(args, flagsTD.PreparedProviderFlags()...)
args = append(args, flagsTD.PreparedStorageFlags()...)
cmd.SetArgs(args)
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
opts := utils.MakeExchangeOpts(cmd) opts := utils.MakeExchangeOpts(cmd)
co := utils.Control() co := utils.Control()
assert.ElementsMatch(t, flagsTD.MailboxInput, opts.Users) assert.ElementsMatch(t, flagsTD.MailboxInput, opts.Users)
// no assertion for category data input
assert.Equal(t, flagsTD.FetchParallelism, strconv.Itoa(co.Parallelism.ItemFetch)) assert.Equal(t, flagsTD.FetchParallelism, strconv.Itoa(co.Parallelism.ItemFetch))
assert.Equal(t, flagsTD.DeltaPageSize, strconv.Itoa(int(co.DeltaPageSize))) assert.Equal(t, flagsTD.DeltaPageSize, strconv.Itoa(int(co.DeltaPageSize)))
// bool flags
assert.Equal(t, control.FailFast, co.FailureHandling) assert.Equal(t, control.FailFast, co.FailureHandling)
assert.True(t, co.ToggleFeatures.DisableIncrementals) assert.True(t, co.ToggleFeatures.DisableIncrementals)
assert.True(t, co.ToggleFeatures.ForceItemDataDownload) assert.True(t, co.ToggleFeatures.ForceItemDataDownload)
assert.True(t, co.ToggleFeatures.DisableDelta) assert.True(t, co.ToggleFeatures.DisableDelta)
assert.True(t, co.ToggleFeatures.ExchangeImmutableIDs) assert.True(t, co.ToggleFeatures.ExchangeImmutableIDs)
assert.True(t, co.ToggleFeatures.DisableConcurrencyLimiter) assert.True(t, co.ToggleFeatures.DisableConcurrencyLimiter)
flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd)
} }
@ -169,36 +135,25 @@ func (suite *ExchangeUnitSuite) TestBackupCreateFlags() {
func (suite *ExchangeUnitSuite) TestBackupListFlags() { func (suite *ExchangeUnitSuite) TestBackupListFlags() {
t := suite.T() t := suite.T()
cmd := &cobra.Command{Use: listCommand} cmd := cliTD.SetUpCmdHasFlags(
t,
// persistent flags not added by addCommands &cobra.Command{Use: listCommand},
flags.AddRunModeFlag(cmd, true) addExchangeCommands,
[]cliTD.UseCobraCommandFn{
c := addExchangeCommands(cmd) flags.AddAllProviderFlags,
require.NotNil(t, c) flags.AddAllStorageFlags,
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
exchangeServiceCommand, []string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
}, },
flagsTD.PreparedBackupListFlags(), flagsTD.WithFlags(
flagsTD.PreparedProviderFlags(), exchangeServiceCommand,
flagsTD.PreparedStorageFlags()) []string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
cmd.SetOut(new(bytes.Buffer)) // drop output "--" + flags.BackupFN, flagsTD.BackupInput,
cmd.SetErr(new(bytes.Buffer)) // drop output },
flagsTD.PreparedBackupListFlags(),
err := cmd.Execute() flagsTD.PreparedProviderFlags(),
assert.NoError(t, err, clues.ToCore(err)) flagsTD.PreparedStorageFlags()))
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
flagsTD.AssertBackupListFlags(t, cmd) flagsTD.AssertBackupListFlags(t, cmd)
flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd)
@ -207,41 +162,28 @@ func (suite *ExchangeUnitSuite) TestBackupListFlags() {
func (suite *ExchangeUnitSuite) TestBackupDetailsFlags() { func (suite *ExchangeUnitSuite) TestBackupDetailsFlags() {
t := suite.T() t := suite.T()
cmd := &cobra.Command{Use: detailsCommand} cmd := cliTD.SetUpCmdHasFlags(
t,
// persistent flags not added by addCommands &cobra.Command{Use: detailsCommand},
flags.AddRunModeFlag(cmd, true) addExchangeCommands,
[]cliTD.UseCobraCommandFn{
c := addExchangeCommands(cmd) flags.AddAllProviderFlags,
require.NotNil(t, c) flags.AddAllStorageFlags,
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
exchangeServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.SkipReduceFN,
}, },
flagsTD.PreparedProviderFlags(), flagsTD.WithFlags(
flagsTD.PreparedStorageFlags()) exchangeServiceCommand,
[]string{
cmd.SetOut(new(bytes.Buffer)) // drop output "--" + flags.RunModeFN, flags.RunModeFlagTest,
cmd.SetErr(new(bytes.Buffer)) // drop output "--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.SkipReduceFN,
err := cmd.Execute() },
assert.NoError(t, err, clues.ToCore(err)) flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
co := utils.Control() co := utils.Control()
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.True(t, co.SkipReduce) assert.True(t, co.SkipReduce)
flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd)
} }
@ -249,36 +191,24 @@ func (suite *ExchangeUnitSuite) TestBackupDetailsFlags() {
func (suite *ExchangeUnitSuite) TestBackupDeleteFlags() { func (suite *ExchangeUnitSuite) TestBackupDeleteFlags() {
t := suite.T() t := suite.T()
cmd := &cobra.Command{Use: deleteCommand} cmd := cliTD.SetUpCmdHasFlags(
t,
// persistent flags not added by addCommands &cobra.Command{Use: deleteCommand},
flags.AddRunModeFlag(cmd, true) addExchangeCommands,
[]cliTD.UseCobraCommandFn{
c := addExchangeCommands(cmd) flags.AddAllProviderFlags,
require.NotNil(t, c) flags.AddAllStorageFlags,
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
exchangeServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
}, },
flagsTD.PreparedProviderFlags(), flagsTD.WithFlags(
flagsTD.PreparedStorageFlags()) exchangeServiceCommand,
[]string{
cmd.SetOut(new(bytes.Buffer)) // drop output "--" + flags.RunModeFN, flags.RunModeFlagTest,
cmd.SetErr(new(bytes.Buffer)) // drop output "--" + flags.BackupFN, flagsTD.BackupInput,
},
err := cmd.Execute() flagsTD.PreparedProviderFlags(),
assert.NoError(t, err, clues.ToCore(err)) flagsTD.PreparedStorageFlags()))
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd)
} }
@ -434,51 +364,3 @@ func (suite *ExchangeUnitSuite) TestExchangeBackupCreateSelectors() {
}) })
} }
} }
func (suite *ExchangeUnitSuite) TestExchangeBackupDetailsSelectors() {
for v := 0; v <= version.Backup; v++ {
suite.Run(fmt.Sprintf("version%d", v), func() {
for _, test := range utilsTD.ExchangeOptionDetailLookups {
suite.Run(test.Name, func() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
bg := utilsTD.VersionedBackupGetter{
Details: dtd.GetDetailsSetForVersion(t, v),
}
output, err := runDetailsExchangeCmd(
ctx,
bg,
"backup-ID",
test.Opts(t, v),
false)
assert.NoError(t, err, clues.ToCore(err))
assert.ElementsMatch(t, test.Expected(t, v), output.Entries)
})
}
})
}
}
func (suite *ExchangeUnitSuite) TestExchangeBackupDetailsSelectorsBadFormats() {
for _, test := range utilsTD.BadExchangeOptionsFormats {
suite.Run(test.Name, func() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
output, err := runDetailsExchangeCmd(
ctx,
test.BackupGetter,
"backup-ID",
test.Opts(t, version.Backup),
false)
assert.Error(t, err, clues.ToCore(err))
assert.Empty(t, output)
})
}
}

View File

@ -2,7 +2,6 @@ package backup
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"github.com/alcionai/clues" "github.com/alcionai/clues"
@ -14,12 +13,9 @@ import (
. "github.com/alcionai/corso/src/cli/print" . "github.com/alcionai/corso/src/cli/print"
"github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/internal/common/idname"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/filters" "github.com/alcionai/corso/src/pkg/filters"
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/repository"
"github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/selectors"
"github.com/alcionai/corso/src/pkg/services/m365" "github.com/alcionai/corso/src/pkg/services/m365"
) )
@ -174,7 +170,7 @@ func createGroupsCmd(cmd *cobra.Command, args []string) error {
selectorSet = append(selectorSet, discSel.Selector) selectorSet = append(selectorSet, discSel.Selector)
} }
return runBackups( return genericCreateCommand(
ctx, ctx,
r, r,
"Group", "Group",
@ -225,74 +221,31 @@ func detailsGroupsCmd(cmd *cobra.Command, args []string) error {
return nil return nil
} }
return runDetailsGroupsCmd(cmd)
}
func runDetailsGroupsCmd(cmd *cobra.Command) error {
ctx := cmd.Context() ctx := cmd.Context()
opts := utils.MakeGroupsOpts(cmd) opts := utils.MakeGroupsOpts(cmd)
r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.GroupsService) sel := utils.IncludeGroupsRestoreDataSelectors(ctx, opts)
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
utils.FilterGroupsRestoreInfoSelectors(sel, opts)
ds, err := genericDetailsCommand(cmd, flags.BackupIDFV, sel.Selector)
if err != nil { if err != nil {
return Only(ctx, err) return Only(ctx, err)
} }
defer utils.CloseRepo(ctx, r) if len(ds.Entries) > 0 {
ds.PrintEntries(ctx)
ds, err := runDetailsGroupsCmd( } else {
ctx,
r,
flags.BackupIDFV,
opts,
rdao.Opts.SkipReduce)
if err != nil {
return Only(ctx, err)
}
if len(ds.Entries) == 0 {
Info(ctx, selectors.ErrorNoMatchingItems) Info(ctx, selectors.ErrorNoMatchingItems)
return nil
} }
ds.PrintEntries(ctx)
return nil return nil
} }
// runDetailsGroupsCmd actually performs the lookup in backup details.
// the fault.Errors return is always non-nil. Callers should check if
// errs.Failure() == nil.
func runDetailsGroupsCmd(
ctx context.Context,
r repository.BackupGetter,
backupID string,
opts utils.GroupsOpts,
skipReduce bool,
) (*details.Details, error) {
if err := utils.ValidateGroupsRestoreFlags(backupID, opts); err != nil {
return nil, err
}
ctx = clues.Add(ctx, "backup_id", backupID)
d, _, errs := r.GetBackupDetails(ctx, backupID)
// TODO: log/track recoverable errors
if errs.Failure() != nil {
if errors.Is(errs.Failure(), data.ErrNotFound) {
return nil, clues.New("no backup exists with the id " + backupID)
}
return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository")
}
ctx = clues.Add(ctx, "details_entries", len(d.Entries))
if !skipReduce {
sel := utils.IncludeGroupsRestoreDataSelectors(ctx, opts)
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
utils.FilterGroupsRestoreInfoSelectors(sel, opts)
d = sel.Reduce(ctx, d, errs)
}
return d, nil
}
// ------------------------------------------------------------------------------------------------ // ------------------------------------------------------------------------------------------------
// backup delete // backup delete
// ------------------------------------------------------------------------------------------------ // ------------------------------------------------------------------------------------------------

View File

@ -56,7 +56,7 @@ func (suite *NoBackupGroupsE2ESuite) SetupSuite() {
defer flush() defer flush()
suite.its = newIntegrationTesterSetup(t) suite.its = newIntegrationTesterSetup(t)
suite.dpnd = prepM365Test(t, ctx) suite.dpnd = prepM365Test(t, ctx, path.GroupsService)
} }
func (suite *NoBackupGroupsE2ESuite) TestGroupsBackupListCmd_noBackups() { func (suite *NoBackupGroupsE2ESuite) TestGroupsBackupListCmd_noBackups() {
@ -110,7 +110,7 @@ func (suite *BackupGroupsE2ESuite) SetupSuite() {
defer flush() defer flush()
suite.its = newIntegrationTesterSetup(t) suite.its = newIntegrationTesterSetup(t)
suite.dpnd = prepM365Test(t, ctx) suite.dpnd = prepM365Test(t, ctx, path.GroupsService)
} }
func (suite *BackupGroupsE2ESuite) TestGroupsBackupCmd_channelMessages() { func (suite *BackupGroupsE2ESuite) TestGroupsBackupCmd_channelMessages() {
@ -287,7 +287,7 @@ func (suite *PreparedBackupGroupsE2ESuite) SetupSuite() {
defer flush() defer flush()
suite.its = newIntegrationTesterSetup(t) suite.its = newIntegrationTesterSetup(t)
suite.dpnd = prepM365Test(t, ctx) suite.dpnd = prepM365Test(t, ctx, path.GroupsService)
suite.backupOps = make(map[path.CategoryType]string) suite.backupOps = make(map[path.CategoryType]string)
var ( var (
@ -515,7 +515,7 @@ func (suite *BackupDeleteGroupsE2ESuite) SetupSuite() {
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
defer flush() defer flush()
suite.dpnd = prepM365Test(t, ctx) suite.dpnd = prepM365Test(t, ctx, path.GroupsService)
m365GroupID := tconfig.M365GroupID(t) m365GroupID := tconfig.M365GroupID(t)
groups := []string{m365GroupID} groups := []string{m365GroupID}

View File

@ -1,7 +1,6 @@
package backup package backup
import ( import (
"bytes"
"strconv" "strconv"
"testing" "testing"
@ -13,6 +12,7 @@ import (
"github.com/alcionai/corso/src/cli/flags" "github.com/alcionai/corso/src/cli/flags"
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
cliTD "github.com/alcionai/corso/src/cli/testdata"
"github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
@ -128,70 +128,38 @@ func (suite *GroupsUnitSuite) TestValidateGroupsBackupCreateFlags() {
func (suite *GroupsUnitSuite) TestBackupCreateFlags() { func (suite *GroupsUnitSuite) TestBackupCreateFlags() {
t := suite.T() t := suite.T()
cmd := &cobra.Command{Use: createCommand} cmd := cliTD.SetUpCmdHasFlags(
t,
// persistent flags not added by addCommands &cobra.Command{Use: createCommand},
flags.AddRunModeFlag(cmd, true) addGroupsCommands,
[]cliTD.UseCobraCommandFn{
c := addGroupsCommands(cmd) flags.AddAllProviderFlags,
require.NotNil(t, c) flags.AddAllStorageFlags,
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
groupsServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
}, },
flagsTD.PreparedProviderFlags(), flagsTD.WithFlags(
flagsTD.PreparedStorageFlags()) groupsServiceCommand,
[]string{
// Test arg parsing for few args "--" + flags.RunModeFN, flags.RunModeFlagTest,
args := []string{ "--" + flags.GroupFN, flagsTD.FlgInputs(flagsTD.GroupsInput),
groupsServiceCommand, "--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.GroupsCategoryDataInput),
"--" + flags.RunModeFN, flags.RunModeFlagTest, "--" + flags.FetchParallelismFN, flagsTD.FetchParallelism,
"--" + flags.FailFastFN,
"--" + flags.GroupFN, flagsTD.FlgInputs(flagsTD.GroupsInput), "--" + flags.DisableIncrementalsFN,
"--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.GroupsCategoryDataInput), "--" + flags.ForceItemDataDownloadFN,
"--" + flags.DisableDeltaFN,
"--" + flags.FetchParallelismFN, flagsTD.FetchParallelism, },
flagsTD.PreparedProviderFlags(),
// bool flags flagsTD.PreparedStorageFlags()))
"--" + flags.FailFastFN,
"--" + flags.DisableIncrementalsFN,
"--" + flags.ForceItemDataDownloadFN,
"--" + flags.DisableDeltaFN,
}
args = append(args, flagsTD.PreparedProviderFlags()...)
args = append(args, flagsTD.PreparedStorageFlags()...)
cmd.SetArgs(args)
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
opts := utils.MakeGroupsOpts(cmd) opts := utils.MakeGroupsOpts(cmd)
co := utils.Control() co := utils.Control()
assert.ElementsMatch(t, flagsTD.GroupsInput, opts.Groups) assert.ElementsMatch(t, flagsTD.GroupsInput, opts.Groups)
// no assertion for category data input
assert.Equal(t, flagsTD.FetchParallelism, strconv.Itoa(co.Parallelism.ItemFetch)) assert.Equal(t, flagsTD.FetchParallelism, strconv.Itoa(co.Parallelism.ItemFetch))
// bool flags
assert.Equal(t, control.FailFast, co.FailureHandling) assert.Equal(t, control.FailFast, co.FailureHandling)
assert.True(t, co.ToggleFeatures.DisableIncrementals) assert.True(t, co.ToggleFeatures.DisableIncrementals)
assert.True(t, co.ToggleFeatures.ForceItemDataDownload) assert.True(t, co.ToggleFeatures.ForceItemDataDownload)
assert.True(t, co.ToggleFeatures.DisableDelta) assert.True(t, co.ToggleFeatures.DisableDelta)
flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd)
} }
@ -199,37 +167,25 @@ func (suite *GroupsUnitSuite) TestBackupCreateFlags() {
func (suite *GroupsUnitSuite) TestBackupListFlags() { func (suite *GroupsUnitSuite) TestBackupListFlags() {
t := suite.T() t := suite.T()
cmd := &cobra.Command{Use: listCommand} cmd := cliTD.SetUpCmdHasFlags(
t,
// persistent flags not added by addCommands &cobra.Command{Use: listCommand},
flags.AddRunModeFlag(cmd, true) addGroupsCommands,
[]cliTD.UseCobraCommandFn{
c := addGroupsCommands(cmd) flags.AddAllProviderFlags,
require.NotNil(t, c) flags.AddAllStorageFlags,
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
groupsServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
}, },
flagsTD.PreparedBackupListFlags(), flagsTD.WithFlags(
flagsTD.PreparedProviderFlags(), groupsServiceCommand,
flagsTD.PreparedStorageFlags()) []string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
cmd.SetOut(new(bytes.Buffer)) // drop output "--" + flags.BackupFN, flagsTD.BackupInput,
cmd.SetErr(new(bytes.Buffer)) // drop output },
flagsTD.PreparedBackupListFlags(),
err := cmd.Execute() flagsTD.PreparedProviderFlags(),
assert.NoError(t, err, clues.ToCore(err)) flagsTD.PreparedStorageFlags()))
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
flagsTD.AssertBackupListFlags(t, cmd) flagsTD.AssertBackupListFlags(t, cmd)
flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd)
@ -238,41 +194,28 @@ func (suite *GroupsUnitSuite) TestBackupListFlags() {
func (suite *GroupsUnitSuite) TestBackupDetailsFlags() { func (suite *GroupsUnitSuite) TestBackupDetailsFlags() {
t := suite.T() t := suite.T()
cmd := &cobra.Command{Use: detailsCommand} cmd := cliTD.SetUpCmdHasFlags(
t,
// persistent flags not added by addCommands &cobra.Command{Use: detailsCommand},
flags.AddRunModeFlag(cmd, true) addGroupsCommands,
[]cliTD.UseCobraCommandFn{
c := addGroupsCommands(cmd) flags.AddAllProviderFlags,
require.NotNil(t, c) flags.AddAllStorageFlags,
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
groupsServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.SkipReduceFN,
}, },
flagsTD.PreparedProviderFlags(), flagsTD.WithFlags(
flagsTD.PreparedStorageFlags()) groupsServiceCommand,
[]string{
cmd.SetOut(new(bytes.Buffer)) // drop output "--" + flags.RunModeFN, flags.RunModeFlagTest,
cmd.SetErr(new(bytes.Buffer)) // drop output "--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.SkipReduceFN,
err := cmd.Execute() },
assert.NoError(t, err, clues.ToCore(err)) flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
co := utils.Control() co := utils.Control()
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.True(t, co.SkipReduce) assert.True(t, co.SkipReduce)
flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd)
} }
@ -280,48 +223,24 @@ func (suite *GroupsUnitSuite) TestBackupDetailsFlags() {
func (suite *GroupsUnitSuite) TestBackupDeleteFlags() { func (suite *GroupsUnitSuite) TestBackupDeleteFlags() {
t := suite.T() t := suite.T()
cmd := &cobra.Command{Use: deleteCommand} cmd := cliTD.SetUpCmdHasFlags(
t,
// persistent flags not added by addCommands &cobra.Command{Use: deleteCommand},
flags.AddRunModeFlag(cmd, true) addGroupsCommands,
[]cliTD.UseCobraCommandFn{
c := addGroupsCommands(cmd) flags.AddAllProviderFlags,
require.NotNil(t, c) flags.AddAllStorageFlags,
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
groupsServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
}, },
flagsTD.PreparedProviderFlags(), flagsTD.WithFlags(
flagsTD.PreparedStorageFlags()) groupsServiceCommand,
[]string{
// Test arg parsing for few args "--" + flags.RunModeFN, flags.RunModeFlagTest,
args := []string{ "--" + flags.BackupFN, flagsTD.BackupInput,
groupsServiceCommand, },
"--" + flags.RunModeFN, flags.RunModeFlagTest, flagsTD.PreparedProviderFlags(),
"--" + flags.BackupFN, flagsTD.BackupInput, flagsTD.PreparedStorageFlags()))
}
args = append(args, flagsTD.PreparedProviderFlags()...)
args = append(args, flagsTD.PreparedStorageFlags()...)
cmd.SetArgs(args)
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd)
} }

View File

@ -21,7 +21,7 @@ import (
"github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository" "github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/repository"
"github.com/alcionai/corso/src/pkg/services/m365/api" "github.com/alcionai/corso/src/pkg/services/m365/api"
"github.com/alcionai/corso/src/pkg/services/m365/api/mock" "github.com/alcionai/corso/src/pkg/services/m365/api/mock"
@ -133,6 +133,7 @@ type dependencies struct {
func prepM365Test( func prepM365Test(
t *testing.T, t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument ctx context.Context, //revive:disable-line:context-as-argument
pst path.ServiceType,
) dependencies { ) dependencies {
var ( var (
acct = tconfig.NewM365Account(t) acct = tconfig.NewM365Account(t)
@ -140,11 +141,9 @@ func prepM365Test(
recorder = strings.Builder{} recorder = strings.Builder{}
) )
sc, err := st.StorageConfig() cfg, err := st.ToS3Config()
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
cfg := sc.(*storage.S3Config)
force := map[string]string{ force := map[string]string{
tconfig.TestCfgAccountProvider: account.ProviderM365.String(), tconfig.TestCfgAccountProvider: account.ProviderM365.String(),
tconfig.TestCfgStorageProvider: storage.ProviderS3.String(), tconfig.TestCfgStorageProvider: storage.ProviderS3.String(),
@ -162,7 +161,9 @@ func prepM365Test(
repository.NewRepoID) repository.NewRepoID)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
err = repo.Initialize(ctx, ctrlRepo.Retention{}) err = repo.Initialize(ctx, repository.InitConfig{
Service: pst,
})
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
return dependencies{ return dependencies{

View File

@ -1,21 +1,15 @@
package backup package backup
import ( import (
"context"
"github.com/alcionai/clues" "github.com/alcionai/clues"
"github.com/pkg/errors"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/pflag" "github.com/spf13/pflag"
"github.com/alcionai/corso/src/cli/flags" "github.com/alcionai/corso/src/cli/flags"
. "github.com/alcionai/corso/src/cli/print" . "github.com/alcionai/corso/src/cli/print"
"github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/repository"
"github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/selectors"
) )
@ -162,7 +156,7 @@ func createOneDriveCmd(cmd *cobra.Command, args []string) error {
selectorSet = append(selectorSet, discSel.Selector) selectorSet = append(selectorSet, discSel.Selector)
} }
return runBackups( return genericCreateCommand(
ctx, ctx,
r, r,
"OneDrive", "OneDrive",
@ -229,74 +223,31 @@ func detailsOneDriveCmd(cmd *cobra.Command, args []string) error {
return nil return nil
} }
return runDetailsOneDriveCmd(cmd)
}
func runDetailsOneDriveCmd(cmd *cobra.Command) error {
ctx := cmd.Context() ctx := cmd.Context()
opts := utils.MakeOneDriveOpts(cmd) opts := utils.MakeOneDriveOpts(cmd)
r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.OneDriveService) sel := utils.IncludeOneDriveRestoreDataSelectors(opts)
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
utils.FilterOneDriveRestoreInfoSelectors(sel, opts)
ds, err := genericDetailsCommand(cmd, flags.BackupIDFV, sel.Selector)
if err != nil { if err != nil {
return Only(ctx, err) return Only(ctx, err)
} }
defer utils.CloseRepo(ctx, r) if len(ds.Entries) > 0 {
ds.PrintEntries(ctx)
ds, err := runDetailsOneDriveCmd( } else {
ctx,
r,
flags.BackupIDFV,
opts,
rdao.Opts.SkipReduce)
if err != nil {
return Only(ctx, err)
}
if len(ds.Entries) == 0 {
Info(ctx, selectors.ErrorNoMatchingItems) Info(ctx, selectors.ErrorNoMatchingItems)
return nil
} }
ds.PrintEntries(ctx)
return nil return nil
} }
// runDetailsOneDriveCmd actually performs the lookup in backup details.
// the fault.Errors return is always non-nil. Callers should check if
// errs.Failure() == nil.
func runDetailsOneDriveCmd(
ctx context.Context,
r repository.BackupGetter,
backupID string,
opts utils.OneDriveOpts,
skipReduce bool,
) (*details.Details, error) {
if err := utils.ValidateOneDriveRestoreFlags(backupID, opts); err != nil {
return nil, err
}
ctx = clues.Add(ctx, "backup_id", backupID)
d, _, errs := r.GetBackupDetails(ctx, backupID)
// TODO: log/track recoverable errors
if errs.Failure() != nil {
if errors.Is(errs.Failure(), data.ErrNotFound) {
return nil, clues.New("no backup exists with the id " + backupID)
}
return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository")
}
ctx = clues.Add(ctx, "details_entries", len(d.Entries))
if !skipReduce {
sel := utils.IncludeOneDriveRestoreDataSelectors(opts)
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
utils.FilterOneDriveRestoreInfoSelectors(sel, opts)
d = sel.Reduce(ctx, d, errs)
}
return d, nil
}
// `corso backup delete onedrive [<flag>...]` // `corso backup delete onedrive [<flag>...]`
func oneDriveDeleteCmd() *cobra.Command { func oneDriveDeleteCmd() *cobra.Command {
return &cobra.Command{ return &cobra.Command{

View File

@ -20,6 +20,7 @@ import (
"github.com/alcionai/corso/src/internal/operations" "github.com/alcionai/corso/src/internal/operations"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/selectors"
selTD "github.com/alcionai/corso/src/pkg/selectors/testdata" selTD "github.com/alcionai/corso/src/pkg/selectors/testdata"
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata" storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
@ -48,7 +49,7 @@ func (suite *NoBackupOneDriveE2ESuite) SetupSuite() {
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
defer flush() defer flush()
suite.dpnd = prepM365Test(t, ctx) suite.dpnd = prepM365Test(t, ctx, path.OneDriveService)
} }
func (suite *NoBackupOneDriveE2ESuite) TestOneDriveBackupListCmd_empty() { func (suite *NoBackupOneDriveE2ESuite) TestOneDriveBackupListCmd_empty() {
@ -139,7 +140,7 @@ func (suite *BackupDeleteOneDriveE2ESuite) SetupSuite() {
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
defer flush() defer flush()
suite.dpnd = prepM365Test(t, ctx) suite.dpnd = prepM365Test(t, ctx, path.OneDriveService)
var ( var (
m365UserID = tconfig.M365UserID(t) m365UserID = tconfig.M365UserID(t)

View File

@ -1,8 +1,6 @@
package backup package backup
import ( import (
"bytes"
"fmt"
"testing" "testing"
"github.com/alcionai/clues" "github.com/alcionai/clues"
@ -13,11 +11,9 @@ import (
"github.com/alcionai/corso/src/cli/flags" "github.com/alcionai/corso/src/cli/flags"
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
cliTD "github.com/alcionai/corso/src/cli/testdata"
"github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/cli/utils"
utilsTD "github.com/alcionai/corso/src/cli/utils/testdata"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/version"
dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata"
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
) )
@ -92,48 +88,33 @@ func (suite *OneDriveUnitSuite) TestAddOneDriveCommands() {
func (suite *OneDriveUnitSuite) TestBackupCreateFlags() { func (suite *OneDriveUnitSuite) TestBackupCreateFlags() {
t := suite.T() t := suite.T()
cmd := &cobra.Command{Use: createCommand} cmd := cliTD.SetUpCmdHasFlags(
t,
// persistent flags not added by addCommands &cobra.Command{Use: createCommand},
flags.AddRunModeFlag(cmd, true) addOneDriveCommands,
[]cliTD.UseCobraCommandFn{
c := addOneDriveCommands(cmd) flags.AddAllProviderFlags,
require.NotNil(t, c) flags.AddAllStorageFlags,
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
oneDriveServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.UserFN, flagsTD.FlgInputs(flagsTD.UsersInput),
"--" + flags.FailFastFN,
"--" + flags.DisableIncrementalsFN,
"--" + flags.ForceItemDataDownloadFN,
}, },
flagsTD.PreparedProviderFlags(), flagsTD.WithFlags(
flagsTD.PreparedStorageFlags()) oneDriveServiceCommand,
[]string{
cmd.SetOut(new(bytes.Buffer)) // drop output "--" + flags.RunModeFN, flags.RunModeFlagTest,
cmd.SetErr(new(bytes.Buffer)) // drop output "--" + flags.UserFN, flagsTD.FlgInputs(flagsTD.UsersInput),
"--" + flags.FailFastFN,
err := cmd.Execute() "--" + flags.DisableIncrementalsFN,
assert.NoError(t, err, clues.ToCore(err)) "--" + flags.ForceItemDataDownloadFN,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
opts := utils.MakeOneDriveOpts(cmd) opts := utils.MakeOneDriveOpts(cmd)
co := utils.Control() co := utils.Control()
assert.ElementsMatch(t, flagsTD.UsersInput, opts.Users) assert.ElementsMatch(t, flagsTD.UsersInput, opts.Users)
// no assertion for category data input
// bool flags
assert.Equal(t, control.FailFast, co.FailureHandling) assert.Equal(t, control.FailFast, co.FailureHandling)
assert.True(t, co.ToggleFeatures.DisableIncrementals) assert.True(t, co.ToggleFeatures.DisableIncrementals)
assert.True(t, co.ToggleFeatures.ForceItemDataDownload) assert.True(t, co.ToggleFeatures.ForceItemDataDownload)
flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd)
} }
@ -141,37 +122,25 @@ func (suite *OneDriveUnitSuite) TestBackupCreateFlags() {
func (suite *OneDriveUnitSuite) TestBackupListFlags() { func (suite *OneDriveUnitSuite) TestBackupListFlags() {
t := suite.T() t := suite.T()
cmd := &cobra.Command{Use: listCommand} cmd := cliTD.SetUpCmdHasFlags(
t,
// persistent flags not added by addCommands &cobra.Command{Use: listCommand},
flags.AddRunModeFlag(cmd, true) addOneDriveCommands,
[]cliTD.UseCobraCommandFn{
c := addOneDriveCommands(cmd) flags.AddAllProviderFlags,
require.NotNil(t, c) flags.AddAllStorageFlags,
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
oneDriveServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
}, },
flagsTD.PreparedBackupListFlags(), flagsTD.WithFlags(
flagsTD.PreparedProviderFlags(), oneDriveServiceCommand,
flagsTD.PreparedStorageFlags()) []string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
cmd.SetOut(new(bytes.Buffer)) // drop output "--" + flags.BackupFN, flagsTD.BackupInput,
cmd.SetErr(new(bytes.Buffer)) // drop output },
flagsTD.PreparedBackupListFlags(),
err := cmd.Execute() flagsTD.PreparedProviderFlags(),
assert.NoError(t, err, clues.ToCore(err)) flagsTD.PreparedStorageFlags()))
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
flagsTD.AssertBackupListFlags(t, cmd) flagsTD.AssertBackupListFlags(t, cmd)
flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd)
@ -180,41 +149,28 @@ func (suite *OneDriveUnitSuite) TestBackupListFlags() {
func (suite *OneDriveUnitSuite) TestBackupDetailsFlags() { func (suite *OneDriveUnitSuite) TestBackupDetailsFlags() {
t := suite.T() t := suite.T()
cmd := &cobra.Command{Use: detailsCommand} cmd := cliTD.SetUpCmdHasFlags(
t,
// persistent flags not added by addCommands &cobra.Command{Use: detailsCommand},
flags.AddRunModeFlag(cmd, true) addOneDriveCommands,
[]cliTD.UseCobraCommandFn{
c := addOneDriveCommands(cmd) flags.AddAllProviderFlags,
require.NotNil(t, c) flags.AddAllStorageFlags,
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
oneDriveServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.SkipReduceFN,
}, },
flagsTD.PreparedProviderFlags(), flagsTD.WithFlags(
flagsTD.PreparedStorageFlags()) oneDriveServiceCommand,
[]string{
cmd.SetOut(new(bytes.Buffer)) // drop output "--" + flags.RunModeFN, flags.RunModeFlagTest,
cmd.SetErr(new(bytes.Buffer)) // drop output "--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.SkipReduceFN,
err := cmd.Execute() },
assert.NoError(t, err, clues.ToCore(err)) flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
co := utils.Control() co := utils.Control()
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.True(t, co.SkipReduce) assert.True(t, co.SkipReduce)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd)
} }
@ -222,36 +178,24 @@ func (suite *OneDriveUnitSuite) TestBackupDetailsFlags() {
func (suite *OneDriveUnitSuite) TestBackupDeleteFlags() { func (suite *OneDriveUnitSuite) TestBackupDeleteFlags() {
t := suite.T() t := suite.T()
cmd := &cobra.Command{Use: deleteCommand} cmd := cliTD.SetUpCmdHasFlags(
t,
// persistent flags not added by addCommands &cobra.Command{Use: deleteCommand},
flags.AddRunModeFlag(cmd, true) addOneDriveCommands,
[]cliTD.UseCobraCommandFn{
c := addOneDriveCommands(cmd) flags.AddAllProviderFlags,
require.NotNil(t, c) flags.AddAllStorageFlags,
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
oneDriveServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
}, },
flagsTD.PreparedProviderFlags(), flagsTD.WithFlags(
flagsTD.PreparedStorageFlags()) oneDriveServiceCommand,
[]string{
cmd.SetOut(new(bytes.Buffer)) // drop output "--" + flags.RunModeFN, flags.RunModeFlagTest,
cmd.SetErr(new(bytes.Buffer)) // drop output "--" + flags.BackupFN, flagsTD.BackupInput,
},
err := cmd.Execute() flagsTD.PreparedProviderFlags(),
assert.NoError(t, err, clues.ToCore(err)) flagsTD.PreparedStorageFlags()))
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd)
} }
@ -279,51 +223,3 @@ func (suite *OneDriveUnitSuite) TestValidateOneDriveBackupCreateFlags() {
}) })
} }
} }
func (suite *OneDriveUnitSuite) TestOneDriveBackupDetailsSelectors() {
for v := 0; v <= version.Backup; v++ {
suite.Run(fmt.Sprintf("version%d", v), func() {
for _, test := range utilsTD.OneDriveOptionDetailLookups {
suite.Run(test.Name, func() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
bg := utilsTD.VersionedBackupGetter{
Details: dtd.GetDetailsSetForVersion(t, v),
}
output, err := runDetailsOneDriveCmd(
ctx,
bg,
"backup-ID",
test.Opts(t, v),
false)
assert.NoError(t, err, clues.ToCore(err))
assert.ElementsMatch(t, test.Expected(t, v), output.Entries)
})
}
})
}
}
func (suite *OneDriveUnitSuite) TestOneDriveBackupDetailsSelectorsBadFormats() {
for _, test := range utilsTD.BadOneDriveOptionsFormats {
suite.Run(test.Name, func() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
output, err := runDetailsOneDriveCmd(
ctx,
test.BackupGetter,
"backup-ID",
test.Opts(t, version.Backup),
false)
assert.Error(t, err, clues.ToCore(err))
assert.Empty(t, output)
})
}
}

View File

@ -4,7 +4,6 @@ import (
"context" "context"
"github.com/alcionai/clues" "github.com/alcionai/clues"
"github.com/pkg/errors"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/pflag" "github.com/spf13/pflag"
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
@ -13,12 +12,9 @@ import (
. "github.com/alcionai/corso/src/cli/print" . "github.com/alcionai/corso/src/cli/print"
"github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/internal/common/idname"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/filters" "github.com/alcionai/corso/src/pkg/filters"
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/repository"
"github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/selectors"
"github.com/alcionai/corso/src/pkg/services/m365" "github.com/alcionai/corso/src/pkg/services/m365"
) )
@ -179,7 +175,7 @@ func createSharePointCmd(cmd *cobra.Command, args []string) error {
selectorSet = append(selectorSet, discSel.Selector) selectorSet = append(selectorSet, discSel.Selector)
} }
return runBackups( return genericCreateCommand(
ctx, ctx,
r, r,
"SharePoint", "SharePoint",
@ -303,7 +299,7 @@ func deleteSharePointCmd(cmd *cobra.Command, args []string) error {
// backup details // backup details
// ------------------------------------------------------------------------------------------------ // ------------------------------------------------------------------------------------------------
// `corso backup details onedrive [<flag>...]` // `corso backup details SharePoint [<flag>...]`
func sharePointDetailsCmd() *cobra.Command { func sharePointDetailsCmd() *cobra.Command {
return &cobra.Command{ return &cobra.Command{
Use: sharePointServiceCommand, Use: sharePointServiceCommand,
@ -324,70 +320,27 @@ func detailsSharePointCmd(cmd *cobra.Command, args []string) error {
return nil return nil
} }
return runDetailsSharePointCmd(cmd)
}
func runDetailsSharePointCmd(cmd *cobra.Command) error {
ctx := cmd.Context() ctx := cmd.Context()
opts := utils.MakeSharePointOpts(cmd) opts := utils.MakeSharePointOpts(cmd)
r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.SharePointService) sel := utils.IncludeSharePointRestoreDataSelectors(ctx, opts)
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
utils.FilterSharePointRestoreInfoSelectors(sel, opts)
ds, err := genericDetailsCommand(cmd, flags.BackupIDFV, sel.Selector)
if err != nil { if err != nil {
return Only(ctx, err) return Only(ctx, err)
} }
defer utils.CloseRepo(ctx, r) if len(ds.Entries) > 0 {
ds.PrintEntries(ctx)
ds, err := runDetailsSharePointCmd( } else {
ctx,
r,
flags.BackupIDFV,
opts,
rdao.Opts.SkipReduce)
if err != nil {
return Only(ctx, err)
}
if len(ds.Entries) == 0 {
Info(ctx, selectors.ErrorNoMatchingItems) Info(ctx, selectors.ErrorNoMatchingItems)
return nil
} }
ds.PrintEntries(ctx)
return nil return nil
} }
// runDetailsSharePointCmd actually performs the lookup in backup details.
// the fault.Errors return is always non-nil. Callers should check if
// errs.Failure() == nil.
func runDetailsSharePointCmd(
ctx context.Context,
r repository.BackupGetter,
backupID string,
opts utils.SharePointOpts,
skipReduce bool,
) (*details.Details, error) {
if err := utils.ValidateSharePointRestoreFlags(backupID, opts); err != nil {
return nil, err
}
ctx = clues.Add(ctx, "backup_id", backupID)
d, _, errs := r.GetBackupDetails(ctx, backupID)
// TODO: log/track recoverable errors
if errs.Failure() != nil {
if errors.Is(errs.Failure(), data.ErrNotFound) {
return nil, clues.New("no backup exists with the id " + backupID)
}
return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository")
}
ctx = clues.Add(ctx, "details_entries", len(d.Entries))
if !skipReduce {
sel := utils.IncludeSharePointRestoreDataSelectors(ctx, opts)
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
utils.FilterSharePointRestoreInfoSelectors(sel, opts)
d = sel.Reduce(ctx, d, errs)
}
return d, nil
}

View File

@ -20,6 +20,7 @@ import (
"github.com/alcionai/corso/src/internal/operations" "github.com/alcionai/corso/src/internal/operations"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/selectors"
"github.com/alcionai/corso/src/pkg/selectors/testdata" "github.com/alcionai/corso/src/pkg/selectors/testdata"
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata" storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
@ -46,7 +47,7 @@ func (suite *NoBackupSharePointE2ESuite) SetupSuite() {
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
defer flush() defer flush()
suite.dpnd = prepM365Test(t, ctx) suite.dpnd = prepM365Test(t, ctx, path.SharePointService)
} }
func (suite *NoBackupSharePointE2ESuite) TestSharePointBackupListCmd_empty() { func (suite *NoBackupSharePointE2ESuite) TestSharePointBackupListCmd_empty() {
@ -103,7 +104,7 @@ func (suite *BackupDeleteSharePointE2ESuite) SetupSuite() {
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
defer flush() defer flush()
suite.dpnd = prepM365Test(t, ctx) suite.dpnd = prepM365Test(t, ctx, path.SharePointService)
var ( var (
m365SiteID = tconfig.M365SiteID(t) m365SiteID = tconfig.M365SiteID(t)

View File

@ -1,8 +1,6 @@
package backup package backup
import ( import (
"bytes"
"fmt"
"strings" "strings"
"testing" "testing"
@ -14,12 +12,10 @@ import (
"github.com/alcionai/corso/src/cli/flags" "github.com/alcionai/corso/src/cli/flags"
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
cliTD "github.com/alcionai/corso/src/cli/testdata"
"github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/cli/utils"
utilsTD "github.com/alcionai/corso/src/cli/utils/testdata"
"github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/internal/common/idname"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/version"
dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata"
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/selectors"
) )
@ -94,51 +90,36 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
func (suite *SharePointUnitSuite) TestBackupCreateFlags() { func (suite *SharePointUnitSuite) TestBackupCreateFlags() {
t := suite.T() t := suite.T()
cmd := &cobra.Command{Use: createCommand} cmd := cliTD.SetUpCmdHasFlags(
t,
// persistent flags not added by addCommands &cobra.Command{Use: createCommand},
flags.AddRunModeFlag(cmd, true) addSharePointCommands,
[]cliTD.UseCobraCommandFn{
c := addSharePointCommands(cmd) flags.AddAllProviderFlags,
require.NotNil(t, c) flags.AddAllStorageFlags,
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
sharePointServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.SiteIDFN, flagsTD.FlgInputs(flagsTD.SiteIDInput),
"--" + flags.SiteFN, flagsTD.FlgInputs(flagsTD.WebURLInput),
"--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.SharepointCategoryDataInput),
"--" + flags.FailFastFN,
"--" + flags.DisableIncrementalsFN,
"--" + flags.ForceItemDataDownloadFN,
}, },
flagsTD.PreparedProviderFlags(), flagsTD.WithFlags(
flagsTD.PreparedStorageFlags()) sharePointServiceCommand,
[]string{
cmd.SetOut(new(bytes.Buffer)) // drop output "--" + flags.RunModeFN, flags.RunModeFlagTest,
cmd.SetErr(new(bytes.Buffer)) // drop output "--" + flags.SiteIDFN, flagsTD.FlgInputs(flagsTD.SiteIDInput),
"--" + flags.SiteFN, flagsTD.FlgInputs(flagsTD.WebURLInput),
err := cmd.Execute() "--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.SharepointCategoryDataInput),
assert.NoError(t, err, clues.ToCore(err)) "--" + flags.FailFastFN,
"--" + flags.DisableIncrementalsFN,
"--" + flags.ForceItemDataDownloadFN,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
opts := utils.MakeSharePointOpts(cmd) opts := utils.MakeSharePointOpts(cmd)
co := utils.Control() co := utils.Control()
assert.ElementsMatch(t, []string{strings.Join(flagsTD.SiteIDInput, ",")}, opts.SiteID) assert.ElementsMatch(t, []string{strings.Join(flagsTD.SiteIDInput, ",")}, opts.SiteID)
assert.ElementsMatch(t, flagsTD.WebURLInput, opts.WebURL) assert.ElementsMatch(t, flagsTD.WebURLInput, opts.WebURL)
// no assertion for category data input
// bool flags
assert.Equal(t, control.FailFast, co.FailureHandling) assert.Equal(t, control.FailFast, co.FailureHandling)
assert.True(t, co.ToggleFeatures.DisableIncrementals) assert.True(t, co.ToggleFeatures.DisableIncrementals)
assert.True(t, co.ToggleFeatures.ForceItemDataDownload) assert.True(t, co.ToggleFeatures.ForceItemDataDownload)
flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd)
} }
@ -146,37 +127,25 @@ func (suite *SharePointUnitSuite) TestBackupCreateFlags() {
func (suite *SharePointUnitSuite) TestBackupListFlags() { func (suite *SharePointUnitSuite) TestBackupListFlags() {
t := suite.T() t := suite.T()
cmd := &cobra.Command{Use: listCommand} cmd := cliTD.SetUpCmdHasFlags(
t,
// persistent flags not added by addCommands &cobra.Command{Use: listCommand},
flags.AddRunModeFlag(cmd, true) addSharePointCommands,
[]cliTD.UseCobraCommandFn{
c := addSharePointCommands(cmd) flags.AddAllProviderFlags,
require.NotNil(t, c) flags.AddAllStorageFlags,
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
sharePointServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
}, },
flagsTD.PreparedBackupListFlags(), flagsTD.WithFlags(
flagsTD.PreparedProviderFlags(), sharePointServiceCommand,
flagsTD.PreparedStorageFlags()) []string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
cmd.SetOut(new(bytes.Buffer)) // drop output "--" + flags.BackupFN, flagsTD.BackupInput,
cmd.SetErr(new(bytes.Buffer)) // drop output },
flagsTD.PreparedBackupListFlags(),
err := cmd.Execute() flagsTD.PreparedProviderFlags(),
assert.NoError(t, err, clues.ToCore(err)) flagsTD.PreparedStorageFlags()))
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
flagsTD.AssertBackupListFlags(t, cmd) flagsTD.AssertBackupListFlags(t, cmd)
flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd)
@ -185,41 +154,28 @@ func (suite *SharePointUnitSuite) TestBackupListFlags() {
func (suite *SharePointUnitSuite) TestBackupDetailsFlags() { func (suite *SharePointUnitSuite) TestBackupDetailsFlags() {
t := suite.T() t := suite.T()
cmd := &cobra.Command{Use: detailsCommand} cmd := cliTD.SetUpCmdHasFlags(
t,
// persistent flags not added by addCommands &cobra.Command{Use: detailsCommand},
flags.AddRunModeFlag(cmd, true) addSharePointCommands,
[]cliTD.UseCobraCommandFn{
c := addSharePointCommands(cmd) flags.AddAllProviderFlags,
require.NotNil(t, c) flags.AddAllStorageFlags,
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
sharePointServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.SkipReduceFN,
}, },
flagsTD.PreparedProviderFlags(), flagsTD.WithFlags(
flagsTD.PreparedStorageFlags()) sharePointServiceCommand,
[]string{
cmd.SetOut(new(bytes.Buffer)) // drop output "--" + flags.RunModeFN, flags.RunModeFlagTest,
cmd.SetErr(new(bytes.Buffer)) // drop output "--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.SkipReduceFN,
err := cmd.Execute() },
assert.NoError(t, err, clues.ToCore(err)) flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
co := utils.Control() co := utils.Control()
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.True(t, co.SkipReduce) assert.True(t, co.SkipReduce)
flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd)
} }
@ -227,36 +183,24 @@ func (suite *SharePointUnitSuite) TestBackupDetailsFlags() {
func (suite *SharePointUnitSuite) TestBackupDeleteFlags() { func (suite *SharePointUnitSuite) TestBackupDeleteFlags() {
t := suite.T() t := suite.T()
cmd := &cobra.Command{Use: deleteCommand} cmd := cliTD.SetUpCmdHasFlags(
t,
// persistent flags not added by addCommands &cobra.Command{Use: deleteCommand},
flags.AddRunModeFlag(cmd, true) addSharePointCommands,
[]cliTD.UseCobraCommandFn{
c := addSharePointCommands(cmd) flags.AddAllProviderFlags,
require.NotNil(t, c) flags.AddAllStorageFlags,
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
flagsTD.WithFlags(
cmd,
sharePointServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
}, },
flagsTD.PreparedProviderFlags(), flagsTD.WithFlags(
flagsTD.PreparedStorageFlags()) sharePointServiceCommand,
[]string{
cmd.SetOut(new(bytes.Buffer)) // drop output "--" + flags.RunModeFN, flags.RunModeFlagTest,
cmd.SetErr(new(bytes.Buffer)) // drop output "--" + flags.BackupFN, flagsTD.BackupInput,
},
err := cmd.Execute() flagsTD.PreparedProviderFlags(),
assert.NoError(t, err, clues.ToCore(err)) flagsTD.PreparedStorageFlags()))
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd)
} }
@ -391,51 +335,3 @@ func (suite *SharePointUnitSuite) TestSharePointBackupCreateSelectors() {
}) })
} }
} }
func (suite *SharePointUnitSuite) TestSharePointBackupDetailsSelectors() {
for v := 0; v <= version.Backup; v++ {
suite.Run(fmt.Sprintf("version%d", v), func() {
for _, test := range utilsTD.SharePointOptionDetailLookups {
suite.Run(test.Name, func() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
bg := utilsTD.VersionedBackupGetter{
Details: dtd.GetDetailsSetForVersion(t, v),
}
output, err := runDetailsSharePointCmd(
ctx,
bg,
"backup-ID",
test.Opts(t, v),
false)
assert.NoError(t, err, clues.ToCore(err))
assert.ElementsMatch(t, test.Expected(t, v), output.Entries)
})
}
})
}
}
func (suite *SharePointUnitSuite) TestSharePointBackupDetailsSelectorsBadFormats() {
for _, test := range utilsTD.BadSharePointOptionsFormats {
suite.Run(test.Name, func() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
output, err := runDetailsSharePointCmd(
ctx,
test.BackupGetter,
"backup-ID",
test.Opts(t, version.Backup),
false)
assert.Error(t, err, clues.ToCore(err))
assert.Empty(t, output)
})
}
}

View File

@ -54,7 +54,7 @@ func configureAccount(
if matchFromConfig { if matchFromConfig {
providerType := vpr.GetString(account.AccountProviderTypeKey) providerType := vpr.GetString(account.AccountProviderTypeKey)
if providerType != account.ProviderM365.String() { if providerType != account.ProviderM365.String() {
return acct, clues.New("unsupported account provider: " + providerType) return acct, clues.New("unsupported account provider: [" + providerType + "]")
} }
if err := mustMatchConfig(vpr, m365Overrides(overrides)); err != nil { if err := mustMatchConfig(vpr, m365Overrides(overrides)); err != nil {

View File

@ -279,8 +279,7 @@ func getStorageAndAccountWithViper(
// possibly read the prior config from a .corso file // possibly read the prior config from a .corso file
if readFromFile { if readFromFile {
err = vpr.ReadInConfig() if err := vpr.ReadInConfig(); err != nil {
if err != nil {
if _, ok := err.(viper.ConfigFileNotFoundError); !ok { if _, ok := err.(viper.ConfigFileNotFoundError); !ok {
return config, clues.Wrap(err, "reading corso config file: "+vpr.ConfigFileUsed()) return config, clues.Wrap(err, "reading corso config file: "+vpr.ConfigFileUsed())
} }

View File

@ -356,10 +356,9 @@ func (suite *ConfigSuite) TestReadFromFlags() {
m365Config, _ := repoDetails.Account.M365Config() m365Config, _ := repoDetails.Account.M365Config()
sc, err := repoDetails.Storage.StorageConfig() s3Cfg, err := repoDetails.Storage.ToS3Config()
require.NoError(t, err, "reading s3 config from storage", clues.ToCore(err)) require.NoError(t, err, "reading s3 config from storage", clues.ToCore(err))
s3Cfg := sc.(*storage.S3Config)
commonConfig, _ := repoDetails.Storage.CommonConfig() commonConfig, _ := repoDetails.Storage.CommonConfig()
pass := commonConfig.Corso.CorsoPassphrase pass := commonConfig.Corso.CorsoPassphrase
@ -425,17 +424,21 @@ func (suite *ConfigIntegrationSuite) TestGetStorageAndAccount() {
err = writeRepoConfigWithViper(vpr, s3Cfg, m365, repository.Options{}, "repoid") err = writeRepoConfigWithViper(vpr, s3Cfg, m365, repository.Options{}, "repoid")
require.NoError(t, err, "writing repo config", clues.ToCore(err)) require.NoError(t, err, "writing repo config", clues.ToCore(err))
require.Equal(
t,
account.ProviderM365.String(),
vpr.GetString(account.AccountProviderTypeKey),
"viper should have m365 as the account provider")
err = vpr.ReadInConfig() err = vpr.ReadInConfig()
require.NoError(t, err, "reading repo config", clues.ToCore(err)) require.NoError(t, err, "reading repo config", clues.ToCore(err))
cfg, err := getStorageAndAccountWithViper(vpr, storage.ProviderS3, true, true, nil) cfg, err := getStorageAndAccountWithViper(vpr, storage.ProviderS3, true, true, nil)
require.NoError(t, err, "getting storage and account from config", clues.ToCore(err)) require.NoError(t, err, "getting storage and account from config", clues.ToCore(err))
sc, err := cfg.Storage.StorageConfig() readS3Cfg, err := cfg.Storage.ToS3Config()
require.NoError(t, err, "reading s3 config from storage", clues.ToCore(err)) require.NoError(t, err, "reading s3 config from storage", clues.ToCore(err))
readS3Cfg := sc.(*storage.S3Config)
assert.Equal(t, readS3Cfg.Bucket, s3Cfg.Bucket) assert.Equal(t, readS3Cfg.Bucket, s3Cfg.Bucket)
assert.Equal(t, readS3Cfg.Endpoint, s3Cfg.Endpoint) assert.Equal(t, readS3Cfg.Endpoint, s3Cfg.Endpoint)
assert.Equal(t, readS3Cfg.Prefix, s3Cfg.Prefix) assert.Equal(t, readS3Cfg.Prefix, s3Cfg.Prefix)
@ -482,11 +485,9 @@ func (suite *ConfigIntegrationSuite) TestGetStorageAndAccount_noFileOnlyOverride
cfg, err := getStorageAndAccountWithViper(vpr, storage.ProviderS3, false, true, overrides) cfg, err := getStorageAndAccountWithViper(vpr, storage.ProviderS3, false, true, overrides)
require.NoError(t, err, "getting storage and account from config", clues.ToCore(err)) require.NoError(t, err, "getting storage and account from config", clues.ToCore(err))
sc, err := cfg.Storage.StorageConfig() readS3Cfg, err := cfg.Storage.ToS3Config()
require.NoError(t, err, "reading s3 config from storage", clues.ToCore(err)) require.NoError(t, err, "reading s3 config from storage", clues.ToCore(err))
readS3Cfg := sc.(*storage.S3Config)
assert.Equal(t, readS3Cfg.Bucket, bkt) assert.Equal(t, readS3Cfg.Bucket, bkt)
assert.Equal(t, cfg.RepoID, "") assert.Equal(t, cfg.RepoID, "")
assert.Equal(t, readS3Cfg.Endpoint, end) assert.Equal(t, readS3Cfg.Endpoint, end)

View File

@ -27,11 +27,11 @@ var exportCommands = []func(cmd *cobra.Command) *cobra.Command{
// AddCommands attaches all `corso export * *` commands to the parent. // AddCommands attaches all `corso export * *` commands to the parent.
func AddCommands(cmd *cobra.Command) { func AddCommands(cmd *cobra.Command) {
subCommand := exportCmd() subCommand := exportCmd()
flags.AddAllStorageFlags(subCommand)
cmd.AddCommand(subCommand) cmd.AddCommand(subCommand)
for _, addExportTo := range exportCommands { for _, addExportTo := range exportCommands {
addExportTo(subCommand) sc := addExportTo(subCommand)
flags.AddAllStorageFlags(sc)
} }
} }

View File

@ -1,17 +1,15 @@
package export package export
import ( import (
"bytes"
"testing" "testing"
"github.com/alcionai/clues"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/cli/flags" "github.com/alcionai/corso/src/cli/flags"
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
cliTD "github.com/alcionai/corso/src/cli/testdata"
"github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
) )
@ -39,55 +37,41 @@ func (suite *GroupsUnitSuite) TestAddGroupsCommands() {
for _, test := range table { for _, test := range table {
suite.Run(test.name, func() { suite.Run(test.name, func() {
t := suite.T() t := suite.T()
parent := &cobra.Command{Use: exportCommand}
cmd := &cobra.Command{Use: test.use} cmd := cliTD.SetUpCmdHasFlags(
t,
// persistent flags not added by addCommands parent,
flags.AddRunModeFlag(cmd, true) addGroupsCommands,
[]cliTD.UseCobraCommandFn{
c := addGroupsCommands(cmd) flags.AddAllProviderFlags,
require.NotNil(t, c) flags.AddAllStorageFlags,
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
cmds := cmd.Commands()
require.Len(t, cmds, 1)
child := cmds[0]
assert.Equal(t, test.expectUse, child.Use)
assert.Equal(t, test.expectShort, child.Short)
tester.AreSameFunc(t, test.expectRunE, child.RunE)
flagsTD.WithFlags(
cmd,
groupsServiceCommand,
[]string{
flagsTD.RestoreDestination,
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.FormatFN, flagsTD.FormatType,
// bool flags
"--" + flags.ArchiveFN,
}, },
flagsTD.PreparedProviderFlags(), flagsTD.WithFlags(
flagsTD.PreparedStorageFlags()) groupsServiceCommand,
[]string{
flagsTD.RestoreDestination,
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.FormatFN, flagsTD.FormatType,
"--" + flags.ArchiveFN,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
cmd.SetOut(new(bytes.Buffer)) // drop output cliTD.CheckCmdChild(
cmd.SetErr(new(bytes.Buffer)) // drop output t,
parent,
err := cmd.Execute() 3,
assert.NoError(t, err, clues.ToCore(err)) test.expectUse,
test.expectShort,
test.expectRunE)
opts := utils.MakeGroupsOpts(cmd) opts := utils.MakeGroupsOpts(cmd)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.Equal(t, flagsTD.Archive, opts.ExportCfg.Archive) assert.Equal(t, flagsTD.Archive, opts.ExportCfg.Archive)
assert.Equal(t, flagsTD.FormatType, opts.ExportCfg.Format) assert.Equal(t, flagsTD.FormatType, opts.ExportCfg.Format)
flagsTD.AssertStorageFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd)
}) })
} }

View File

@ -1,17 +1,15 @@
package export package export
import ( import (
"bytes"
"testing" "testing"
"github.com/alcionai/clues"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/cli/flags" "github.com/alcionai/corso/src/cli/flags"
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
cliTD "github.com/alcionai/corso/src/cli/testdata"
"github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
) )
@ -39,67 +37,55 @@ func (suite *OneDriveUnitSuite) TestAddOneDriveCommands() {
for _, test := range table { for _, test := range table {
suite.Run(test.name, func() { suite.Run(test.name, func() {
t := suite.T() t := suite.T()
parent := &cobra.Command{Use: exportCommand}
cmd := &cobra.Command{Use: test.use} cmd := cliTD.SetUpCmdHasFlags(
t,
// persistent flags not added by addCommands parent,
flags.AddRunModeFlag(cmd, true) addOneDriveCommands,
[]cliTD.UseCobraCommandFn{
c := addOneDriveCommands(cmd) flags.AddAllProviderFlags,
require.NotNil(t, c) flags.AddAllStorageFlags,
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
cmds := cmd.Commands()
require.Len(t, cmds, 1)
child := cmds[0]
assert.Equal(t, test.expectUse, child.Use)
assert.Equal(t, test.expectShort, child.Short)
tester.AreSameFunc(t, test.expectRunE, child.RunE)
flagsTD.WithFlags(
cmd,
oneDriveServiceCommand,
[]string{
flagsTD.RestoreDestination,
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
"--" + flags.FormatFN, flagsTD.FormatType,
// bool flags
"--" + flags.ArchiveFN,
}, },
flagsTD.PreparedProviderFlags(), flagsTD.WithFlags(
flagsTD.PreparedStorageFlags()) oneDriveServiceCommand,
[]string{
flagsTD.RestoreDestination,
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
cmd.SetOut(new(bytes.Buffer)) // drop output "--" + flags.FormatFN, flagsTD.FormatType,
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute() // bool flags
assert.NoError(t, err, clues.ToCore(err)) "--" + flags.ArchiveFN,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
cliTD.CheckCmdChild(
t,
parent,
3,
test.expectUse,
test.expectShort,
test.expectRunE)
opts := utils.MakeOneDriveOpts(cmd) opts := utils.MakeOneDriveOpts(cmd)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName) assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName)
assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath) assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath)
assert.Equal(t, flagsTD.FileCreatedAfterInput, opts.FileCreatedAfter) assert.Equal(t, flagsTD.FileCreatedAfterInput, opts.FileCreatedAfter)
assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore) assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore)
assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter) assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter)
assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore) assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore)
assert.Equal(t, flagsTD.CorsoPassphrase, flags.CorsoPassphraseFV) assert.Equal(t, flagsTD.CorsoPassphrase, flags.CorsoPassphraseFV)
flagsTD.AssertStorageFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd)
}) })
} }

View File

@ -1,17 +1,15 @@
package export package export
import ( import (
"bytes"
"testing" "testing"
"github.com/alcionai/clues"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/cli/flags" "github.com/alcionai/corso/src/cli/flags"
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
cliTD "github.com/alcionai/corso/src/cli/testdata"
"github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
) )
@ -39,63 +37,50 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
for _, test := range table { for _, test := range table {
suite.Run(test.name, func() { suite.Run(test.name, func() {
t := suite.T() t := suite.T()
parent := &cobra.Command{Use: exportCommand}
cmd := &cobra.Command{Use: test.use} cmd := cliTD.SetUpCmdHasFlags(
t,
// persistent flags not added by addCommands parent,
flags.AddRunModeFlag(cmd, true) addSharePointCommands,
[]cliTD.UseCobraCommandFn{
c := addSharePointCommands(cmd) flags.AddAllProviderFlags,
require.NotNil(t, c) flags.AddAllStorageFlags,
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
cmds := cmd.Commands()
require.Len(t, cmds, 1)
child := cmds[0]
assert.Equal(t, test.expectUse, child.Use)
assert.Equal(t, test.expectShort, child.Short)
tester.AreSameFunc(t, test.expectRunE, child.RunE)
flagsTD.WithFlags(
cmd,
sharePointServiceCommand,
[]string{
flagsTD.RestoreDestination,
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.LibraryFN, flagsTD.LibraryInput,
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
"--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput),
"--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput),
"--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput),
"--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput),
"--" + flags.FormatFN, flagsTD.FormatType,
// bool flags
"--" + flags.ArchiveFN,
}, },
flagsTD.PreparedProviderFlags(), flagsTD.WithFlags(
flagsTD.PreparedStorageFlags()) sharePointServiceCommand,
[]string{
flagsTD.RestoreDestination,
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.LibraryFN, flagsTD.LibraryInput,
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
"--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput),
"--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput),
"--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput),
"--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput),
"--" + flags.FormatFN, flagsTD.FormatType,
"--" + flags.ArchiveFN,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
cmd.SetOut(new(bytes.Buffer)) // drop output cliTD.CheckCmdChild(
cmd.SetErr(new(bytes.Buffer)) // drop output t,
parent,
err := cmd.Execute() 3,
assert.NoError(t, err, clues.ToCore(err)) test.expectUse,
test.expectShort,
test.expectRunE)
opts := utils.MakeSharePointOpts(cmd) opts := utils.MakeSharePointOpts(cmd)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.Equal(t, flagsTD.LibraryInput, opts.Library) assert.Equal(t, flagsTD.LibraryInput, opts.Library)
assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName) assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName)
assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath) assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath)
@ -103,16 +88,12 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore) assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore)
assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter) assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter)
assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore) assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore)
assert.ElementsMatch(t, flagsTD.ListItemInput, opts.ListItem) assert.ElementsMatch(t, flagsTD.ListItemInput, opts.ListItem)
assert.ElementsMatch(t, flagsTD.ListFolderInput, opts.ListFolder) assert.ElementsMatch(t, flagsTD.ListFolderInput, opts.ListFolder)
assert.ElementsMatch(t, flagsTD.PageInput, opts.Page) assert.ElementsMatch(t, flagsTD.PageInput, opts.Page)
assert.ElementsMatch(t, flagsTD.PageFolderInput, opts.PageFolder) assert.ElementsMatch(t, flagsTD.PageFolderInput, opts.PageFolder)
assert.Equal(t, flagsTD.Archive, opts.ExportCfg.Archive) assert.Equal(t, flagsTD.Archive, opts.ExportCfg.Archive)
assert.Equal(t, flagsTD.FormatType, opts.ExportCfg.Format) assert.Equal(t, flagsTD.FormatType, opts.ExportCfg.Format)
flagsTD.AssertStorageFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd)
}) })
} }

View File

@ -3,9 +3,10 @@ package testdata
import ( import (
"testing" "testing"
"github.com/alcionai/corso/src/cli/flags"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"gotest.tools/v3/assert" "gotest.tools/v3/assert"
"github.com/alcionai/corso/src/cli/flags"
) )
func PreparedBackupListFlags() []string { func PreparedBackupListFlags() []string {

View File

@ -86,7 +86,7 @@ var (
DisableConcurrencyLimiter = true DisableConcurrencyLimiter = true
) )
func WithFlags( func WithFlags2(
cc *cobra.Command, cc *cobra.Command,
command string, command string,
flagSets ...[]string, flagSets ...[]string,
@ -99,3 +99,18 @@ func WithFlags(
cc.SetArgs(args) cc.SetArgs(args)
} }
func WithFlags(
command string,
flagSets ...[]string,
) func(*cobra.Command) {
return func(cc *cobra.Command) {
args := []string{command}
for _, sl := range flagSets {
args = append(args, sl...)
}
cc.SetArgs(args)
}
}

View File

@ -85,7 +85,7 @@ func initFilesystemCmd(cmd *cobra.Command, args []string) error {
opt := utils.ControlWithConfig(cfg) opt := utils.ControlWithConfig(cfg)
// Retention is not supported for filesystem repos. // Retention is not supported for filesystem repos.
retention := ctrlRepo.Retention{} retentionOpts := ctrlRepo.Retention{}
// SendStartCorsoEvent uses distict ID as tenant ID because repoID is still not generated // SendStartCorsoEvent uses distict ID as tenant ID because repoID is still not generated
utils.SendStartCorsoEvent( utils.SendStartCorsoEvent(
@ -96,13 +96,11 @@ func initFilesystemCmd(cmd *cobra.Command, args []string) error {
cfg.Account.ID(), cfg.Account.ID(),
opt) opt)
sc, err := cfg.Storage.StorageConfig() storageCfg, err := cfg.Storage.ToFilesystemConfig()
if err != nil { if err != nil {
return Only(ctx, clues.Wrap(err, "Retrieving filesystem configuration")) return Only(ctx, clues.Wrap(err, "Retrieving filesystem configuration"))
} }
storageCfg := sc.(*storage.FilesystemConfig)
m365, err := cfg.Account.M365Config() m365, err := cfg.Account.M365Config()
if err != nil { if err != nil {
return Only(ctx, clues.Wrap(err, "Failed to parse m365 account config")) return Only(ctx, clues.Wrap(err, "Failed to parse m365 account config"))
@ -118,19 +116,27 @@ func initFilesystemCmd(cmd *cobra.Command, args []string) error {
return Only(ctx, clues.Wrap(err, "Failed to construct the repository controller")) return Only(ctx, clues.Wrap(err, "Failed to construct the repository controller"))
} }
if err = r.Initialize(ctx, retention); err != nil { ric := repository.InitConfig{RetentionOpts: retentionOpts}
if err = r.Initialize(ctx, ric); err != nil {
if flags.SucceedIfExistsFV && errors.Is(err, repository.ErrorRepoAlreadyExists) { if flags.SucceedIfExistsFV && errors.Is(err, repository.ErrorRepoAlreadyExists) {
return nil return nil
} }
return Only(ctx, clues.Wrap(err, "Failed to initialize a new filesystem repository")) return Only(ctx, clues.Stack(ErrInitializingRepo, err))
} }
defer utils.CloseRepo(ctx, r) defer utils.CloseRepo(ctx, r)
Infof(ctx, "Initialized a repository at path %s", storageCfg.Path) Infof(ctx, "Initialized a repository at path %s", storageCfg.Path)
if err = config.WriteRepoConfig(ctx, sc, m365, opt.Repo, r.GetID()); err != nil { err = config.WriteRepoConfig(
ctx,
storageCfg,
m365,
opt.Repo,
r.GetID())
if err != nil {
return Only(ctx, clues.Wrap(err, "Failed to write repository configuration")) return Only(ctx, clues.Wrap(err, "Failed to write repository configuration"))
} }
@ -181,13 +187,11 @@ func connectFilesystemCmd(cmd *cobra.Command, args []string) error {
repoID = events.RepoIDNotFound repoID = events.RepoIDNotFound
} }
sc, err := cfg.Storage.StorageConfig() storageCfg, err := cfg.Storage.ToFilesystemConfig()
if err != nil { if err != nil {
return Only(ctx, clues.Wrap(err, "Retrieving filesystem configuration")) return Only(ctx, clues.Wrap(err, "Retrieving filesystem configuration"))
} }
storageCfg := sc.(*storage.FilesystemConfig)
m365, err := cfg.Account.M365Config() m365, err := cfg.Account.M365Config()
if err != nil { if err != nil {
return Only(ctx, clues.Wrap(err, "Failed to parse m365 account config")) return Only(ctx, clues.Wrap(err, "Failed to parse m365 account config"))
@ -205,15 +209,21 @@ func connectFilesystemCmd(cmd *cobra.Command, args []string) error {
return Only(ctx, clues.Wrap(err, "Failed to create a repository controller")) return Only(ctx, clues.Wrap(err, "Failed to create a repository controller"))
} }
if err := r.Connect(ctx); err != nil { if err := r.Connect(ctx, repository.ConnConfig{}); err != nil {
return Only(ctx, clues.Wrap(err, "Failed to connect to the filesystem repository")) return Only(ctx, clues.Stack(ErrConnectingRepo, err))
} }
defer utils.CloseRepo(ctx, r) defer utils.CloseRepo(ctx, r)
Infof(ctx, "Connected to repository at path %s", storageCfg.Path) Infof(ctx, "Connected to repository at path %s", storageCfg.Path)
if err = config.WriteRepoConfig(ctx, sc, m365, opts.Repo, r.GetID()); err != nil { err = config.WriteRepoConfig(
ctx,
storageCfg,
m365,
opts.Repo,
r.GetID())
if err != nil {
return Only(ctx, clues.Wrap(err, "Failed to write repository configuration")) return Only(ctx, clues.Wrap(err, "Failed to write repository configuration"))
} }

View File

@ -16,7 +16,6 @@ import (
"github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
"github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/repository"
"github.com/alcionai/corso/src/pkg/storage" "github.com/alcionai/corso/src/pkg/storage"
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata" storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
@ -56,9 +55,8 @@ func (suite *FilesystemE2ESuite) TestInitFilesystemCmd() {
st := storeTD.NewFilesystemStorage(t) st := storeTD.NewFilesystemStorage(t)
sc, err := st.StorageConfig() cfg, err := st.ToFilesystemConfig()
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
cfg := sc.(*storage.FilesystemConfig)
force := map[string]string{ force := map[string]string{
tconfig.TestCfgStorageProvider: storage.ProviderFilesystem.String(), tconfig.TestCfgStorageProvider: storage.ProviderFilesystem.String(),
@ -113,9 +111,8 @@ func (suite *FilesystemE2ESuite) TestConnectFilesystemCmd() {
defer flush() defer flush()
st := storeTD.NewFilesystemStorage(t) st := storeTD.NewFilesystemStorage(t)
sc, err := st.StorageConfig() cfg, err := st.ToFilesystemConfig()
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
cfg := sc.(*storage.FilesystemConfig)
force := map[string]string{ force := map[string]string{
tconfig.TestCfgAccountProvider: account.ProviderM365.String(), tconfig.TestCfgAccountProvider: account.ProviderM365.String(),
@ -134,13 +131,13 @@ func (suite *FilesystemE2ESuite) TestConnectFilesystemCmd() {
// init the repo first // init the repo first
r, err := repository.New( r, err := repository.New(
ctx, ctx,
account.Account{}, tconfig.NewM365Account(t),
st, st,
control.DefaultOptions(), control.DefaultOptions(),
repository.NewRepoID) repository.NewRepoID)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
err = r.Initialize(ctx, ctrlRepo.Retention{}) err = r.Initialize(ctx, repository.InitConfig{})
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
// then test it // then test it

View File

@ -21,6 +21,11 @@ const (
maintenanceCommand = "maintenance" maintenanceCommand = "maintenance"
) )
var (
ErrConnectingRepo = clues.New("connecting repository")
ErrInitializingRepo = clues.New("initializing repository")
)
var repoCommands = []func(cmd *cobra.Command) *cobra.Command{ var repoCommands = []func(cmd *cobra.Command) *cobra.Command{
addS3Commands, addS3Commands,
addFilesystemCommands, addFilesystemCommands,

View File

@ -116,13 +116,11 @@ func initS3Cmd(cmd *cobra.Command, args []string) error {
cfg.Account.ID(), cfg.Account.ID(),
opt) opt)
sc, err := cfg.Storage.StorageConfig() s3Cfg, err := cfg.Storage.ToS3Config()
if err != nil { if err != nil {
return Only(ctx, clues.Wrap(err, "Retrieving s3 configuration")) return Only(ctx, clues.Wrap(err, "Retrieving s3 configuration"))
} }
s3Cfg := sc.(*storage.S3Config)
if strings.HasPrefix(s3Cfg.Endpoint, "http://") || strings.HasPrefix(s3Cfg.Endpoint, "https://") { if strings.HasPrefix(s3Cfg.Endpoint, "http://") || strings.HasPrefix(s3Cfg.Endpoint, "https://") {
invalidEndpointErr := "endpoint doesn't support specifying protocol. " + invalidEndpointErr := "endpoint doesn't support specifying protocol. " +
"pass --disable-tls flag to use http:// instead of default https://" "pass --disable-tls flag to use http:// instead of default https://"
@ -145,12 +143,14 @@ func initS3Cmd(cmd *cobra.Command, args []string) error {
return Only(ctx, clues.Wrap(err, "Failed to construct the repository controller")) return Only(ctx, clues.Wrap(err, "Failed to construct the repository controller"))
} }
if err = r.Initialize(ctx, retentionOpts); err != nil { ric := repository.InitConfig{RetentionOpts: retentionOpts}
if err = r.Initialize(ctx, ric); err != nil {
if flags.SucceedIfExistsFV && errors.Is(err, repository.ErrorRepoAlreadyExists) { if flags.SucceedIfExistsFV && errors.Is(err, repository.ErrorRepoAlreadyExists) {
return nil return nil
} }
return Only(ctx, clues.Wrap(err, "Failed to initialize a new S3 repository")) return Only(ctx, clues.Stack(ErrInitializingRepo, err))
} }
defer utils.CloseRepo(ctx, r) defer utils.CloseRepo(ctx, r)
@ -199,13 +199,11 @@ func connectS3Cmd(cmd *cobra.Command, args []string) error {
repoID = events.RepoIDNotFound repoID = events.RepoIDNotFound
} }
sc, err := cfg.Storage.StorageConfig() s3Cfg, err := cfg.Storage.ToS3Config()
if err != nil { if err != nil {
return Only(ctx, clues.Wrap(err, "Retrieving s3 configuration")) return Only(ctx, clues.Wrap(err, "Retrieving s3 configuration"))
} }
s3Cfg := sc.(*storage.S3Config)
m365, err := cfg.Account.M365Config() m365, err := cfg.Account.M365Config()
if err != nil { if err != nil {
return Only(ctx, clues.Wrap(err, "Failed to parse m365 account config")) return Only(ctx, clues.Wrap(err, "Failed to parse m365 account config"))
@ -230,8 +228,8 @@ func connectS3Cmd(cmd *cobra.Command, args []string) error {
return Only(ctx, clues.Wrap(err, "Failed to create a repository controller")) return Only(ctx, clues.Wrap(err, "Failed to create a repository controller"))
} }
if err := r.Connect(ctx); err != nil { if err := r.Connect(ctx, repository.ConnConfig{}); err != nil {
return Only(ctx, clues.Wrap(err, "Failed to connect to the S3 repository")) return Only(ctx, clues.Stack(ErrConnectingRepo, err))
} }
defer utils.CloseRepo(ctx, r) defer utils.CloseRepo(ctx, r)

View File

@ -8,15 +8,16 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"golang.org/x/exp/maps"
"github.com/alcionai/corso/src/cli" "github.com/alcionai/corso/src/cli"
"github.com/alcionai/corso/src/cli/config" "github.com/alcionai/corso/src/cli/config"
cliTD "github.com/alcionai/corso/src/cli/testdata" cliTD "github.com/alcionai/corso/src/cli/testdata"
"github.com/alcionai/corso/src/internal/common/str"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
"github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/repository"
"github.com/alcionai/corso/src/pkg/storage" "github.com/alcionai/corso/src/pkg/storage"
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata" storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
@ -64,9 +65,8 @@ func (suite *S3E2ESuite) TestInitS3Cmd() {
st := storeTD.NewPrefixedS3Storage(t) st := storeTD.NewPrefixedS3Storage(t)
sc, err := st.StorageConfig() cfg, err := st.ToS3Config()
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
cfg := sc.(*storage.S3Config)
vpr, configFP := tconfig.MakeTempTestConfigClone(t, nil) vpr, configFP := tconfig.MakeTempTestConfigClone(t, nil)
if !test.hasConfigFile { if !test.hasConfigFile {
@ -102,10 +102,9 @@ func (suite *S3E2ESuite) TestInitMultipleTimes() {
defer flush() defer flush()
st := storeTD.NewPrefixedS3Storage(t) st := storeTD.NewPrefixedS3Storage(t)
sc, err := st.StorageConfig()
require.NoError(t, err, clues.ToCore(err))
cfg := sc.(*storage.S3Config) cfg, err := st.ToS3Config()
require.NoError(t, err, clues.ToCore(err))
vpr, configFP := tconfig.MakeTempTestConfigClone(t, nil) vpr, configFP := tconfig.MakeTempTestConfigClone(t, nil)
@ -134,11 +133,9 @@ func (suite *S3E2ESuite) TestInitS3Cmd_missingBucket() {
st := storeTD.NewPrefixedS3Storage(t) st := storeTD.NewPrefixedS3Storage(t)
sc, err := st.StorageConfig() cfg, err := st.ToS3Config()
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
cfg := sc.(*storage.S3Config)
force := map[string]string{ force := map[string]string{
tconfig.TestCfgBucket: "", tconfig.TestCfgBucket: "",
} }
@ -189,9 +186,9 @@ func (suite *S3E2ESuite) TestConnectS3Cmd() {
defer flush() defer flush()
st := storeTD.NewPrefixedS3Storage(t) st := storeTD.NewPrefixedS3Storage(t)
sc, err := st.StorageConfig()
cfg, err := st.ToS3Config()
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
cfg := sc.(*storage.S3Config)
force := map[string]string{ force := map[string]string{
tconfig.TestCfgAccountProvider: account.ProviderM365.String(), tconfig.TestCfgAccountProvider: account.ProviderM365.String(),
@ -210,13 +207,13 @@ func (suite *S3E2ESuite) TestConnectS3Cmd() {
// init the repo first // init the repo first
r, err := repository.New( r, err := repository.New(
ctx, ctx,
account.Account{}, tconfig.NewM365Account(t),
st, st,
control.DefaultOptions(), control.DefaultOptions(),
repository.NewRepoID) repository.NewRepoID)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
err = r.Initialize(ctx, ctrlRepo.Retention{}) err = r.Initialize(ctx, repository.InitConfig{})
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
// then test it // then test it
@ -234,60 +231,65 @@ func (suite *S3E2ESuite) TestConnectS3Cmd() {
} }
} }
func (suite *S3E2ESuite) TestConnectS3Cmd_BadBucket() { func (suite *S3E2ESuite) TestConnectS3Cmd_badInputs() {
t := suite.T() table := []struct {
ctx, flush := tester.NewContext(t) name string
bucket string
prefix string
expectErr func(t *testing.T, err error)
}{
{
name: "bucket",
bucket: "wrong",
expectErr: func(t *testing.T, err error) {
assert.ErrorIs(t, err, storage.ErrVerifyingConfigStorage, clues.ToCore(err))
},
},
{
name: "prefix",
prefix: "wrong",
expectErr: func(t *testing.T, err error) {
assert.ErrorIs(t, err, storage.ErrVerifyingConfigStorage, clues.ToCore(err))
},
},
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
defer flush() ctx, flush := tester.NewContext(t)
defer flush()
st := storeTD.NewPrefixedS3Storage(t) st := storeTD.NewPrefixedS3Storage(t)
sc, err := st.StorageConfig() cfg, err := st.ToS3Config()
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
cfg := sc.(*storage.S3Config) bucket := str.First(test.bucket, cfg.Bucket)
prefix := str.First(test.prefix, cfg.Prefix)
vpr, configFP := tconfig.MakeTempTestConfigClone(t, nil) over := map[string]string{}
acct := tconfig.NewM365Account(t)
ctx = config.SetViper(ctx, vpr) maps.Copy(over, acct.Config)
over[account.AccountProviderTypeKey] = account.ProviderM365.String()
over[storage.StorageProviderTypeKey] = storage.ProviderS3.String()
cmd := cliTD.StubRootCmd( vpr, configFP := tconfig.MakeTempTestConfigClone(t, over)
"repo", "connect", "s3", ctx = config.SetViper(ctx, vpr)
"--config-file", configFP,
"--bucket", "wrong",
"--prefix", cfg.Prefix)
cli.BuildCommandTree(cmd)
// run the command cmd := cliTD.StubRootCmd(
err = cmd.ExecuteContext(ctx) "repo", "connect", "s3",
require.Error(t, err, clues.ToCore(err)) "--config-file", configFP,
} "--bucket", bucket,
"--prefix", prefix)
cli.BuildCommandTree(cmd)
func (suite *S3E2ESuite) TestConnectS3Cmd_BadPrefix() { // run the command
t := suite.T() err = cmd.ExecuteContext(ctx)
ctx, flush := tester.NewContext(t) require.Error(t, err, clues.ToCore(err))
test.expectErr(t, err)
defer flush() })
}
st := storeTD.NewPrefixedS3Storage(t)
sc, err := st.StorageConfig()
require.NoError(t, err, clues.ToCore(err))
cfg := sc.(*storage.S3Config)
vpr, configFP := tconfig.MakeTempTestConfigClone(t, nil)
ctx = config.SetViper(ctx, vpr)
cmd := cliTD.StubRootCmd(
"repo", "connect", "s3",
"--config-file", configFP,
"--bucket", cfg.Bucket,
"--prefix", "wrong")
cli.BuildCommandTree(cmd)
// run the command
err = cmd.ExecuteContext(ctx)
require.Error(t, err, clues.ToCore(err))
} }
func (suite *S3E2ESuite) TestUpdateS3Cmd() { func (suite *S3E2ESuite) TestUpdateS3Cmd() {

View File

@ -20,7 +20,6 @@ import (
"github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/repository"
"github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/selectors"
@ -66,11 +65,9 @@ func (suite *RestoreExchangeE2ESuite) SetupSuite() {
suite.acct = tconfig.NewM365Account(t) suite.acct = tconfig.NewM365Account(t)
suite.st = storeTD.NewPrefixedS3Storage(t) suite.st = storeTD.NewPrefixedS3Storage(t)
sc, err := suite.st.StorageConfig() cfg, err := suite.st.ToS3Config()
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
cfg := sc.(*storage.S3Config)
force := map[string]string{ force := map[string]string{
tconfig.TestCfgAccountProvider: account.ProviderM365.String(), tconfig.TestCfgAccountProvider: account.ProviderM365.String(),
tconfig.TestCfgStorageProvider: storage.ProviderS3.String(), tconfig.TestCfgStorageProvider: storage.ProviderS3.String(),
@ -94,7 +91,7 @@ func (suite *RestoreExchangeE2ESuite) SetupSuite() {
repository.NewRepoID) repository.NewRepoID)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
err = suite.repo.Initialize(ctx, ctrlRepo.Retention{}) err = suite.repo.Initialize(ctx, repository.InitConfig{Service: path.ExchangeService})
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
suite.backupOps = make(map[path.CategoryType]operations.BackupOperation) suite.backupOps = make(map[path.CategoryType]operations.BackupOperation)

View File

@ -1,17 +1,15 @@
package restore package restore
import ( import (
"bytes"
"testing" "testing"
"github.com/alcionai/clues"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/cli/flags" "github.com/alcionai/corso/src/cli/flags"
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
cliTD "github.com/alcionai/corso/src/cli/testdata"
"github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
) )
@ -39,80 +37,64 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() {
for _, test := range table { for _, test := range table {
suite.Run(test.name, func() { suite.Run(test.name, func() {
t := suite.T() t := suite.T()
parent := &cobra.Command{Use: restoreCommand}
cmd := &cobra.Command{Use: test.use} cmd := cliTD.SetUpCmdHasFlags(
t,
// persistent flags not added by addCommands parent,
flags.AddRunModeFlag(cmd, true) addExchangeCommands,
[]cliTD.UseCobraCommandFn{
c := addExchangeCommands(cmd) flags.AddAllProviderFlags,
require.NotNil(t, c) flags.AddAllStorageFlags,
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
cmds := cmd.Commands()
require.Len(t, cmds, 1)
child := cmds[0]
assert.Equal(t, test.expectUse, child.Use)
assert.Equal(t, test.expectShort, child.Short)
tester.AreSameFunc(t, test.expectRunE, child.RunE)
flagsTD.WithFlags(
cmd,
exchangeServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.ContactFN, flagsTD.FlgInputs(flagsTD.ContactInput),
"--" + flags.ContactFolderFN, flagsTD.FlgInputs(flagsTD.ContactFldInput),
"--" + flags.ContactNameFN, flagsTD.ContactNameInput,
"--" + flags.EmailFN, flagsTD.FlgInputs(flagsTD.EmailInput),
"--" + flags.EmailFolderFN, flagsTD.FlgInputs(flagsTD.EmailFldInput),
"--" + flags.EmailReceivedAfterFN, flagsTD.EmailReceivedAfterInput,
"--" + flags.EmailReceivedBeforeFN, flagsTD.EmailReceivedBeforeInput,
"--" + flags.EmailSenderFN, flagsTD.EmailSenderInput,
"--" + flags.EmailSubjectFN, flagsTD.EmailSubjectInput,
"--" + flags.EventFN, flagsTD.FlgInputs(flagsTD.EventInput),
"--" + flags.EventCalendarFN, flagsTD.FlgInputs(flagsTD.EventCalInput),
"--" + flags.EventOrganizerFN, flagsTD.EventOrganizerInput,
"--" + flags.EventRecursFN, flagsTD.EventRecursInput,
"--" + flags.EventStartsAfterFN, flagsTD.EventStartsAfterInput,
"--" + flags.EventStartsBeforeFN, flagsTD.EventStartsBeforeInput,
"--" + flags.EventSubjectFN, flagsTD.EventSubjectInput,
"--" + flags.CollisionsFN, flagsTD.Collisions,
"--" + flags.DestinationFN, flagsTD.Destination,
"--" + flags.ToResourceFN, flagsTD.ToResource,
}, },
flagsTD.PreparedProviderFlags(), flagsTD.WithFlags(
flagsTD.PreparedStorageFlags()) exchangeServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.ContactFN, flagsTD.FlgInputs(flagsTD.ContactInput),
"--" + flags.ContactFolderFN, flagsTD.FlgInputs(flagsTD.ContactFldInput),
"--" + flags.ContactNameFN, flagsTD.ContactNameInput,
"--" + flags.EmailFN, flagsTD.FlgInputs(flagsTD.EmailInput),
"--" + flags.EmailFolderFN, flagsTD.FlgInputs(flagsTD.EmailFldInput),
"--" + flags.EmailReceivedAfterFN, flagsTD.EmailReceivedAfterInput,
"--" + flags.EmailReceivedBeforeFN, flagsTD.EmailReceivedBeforeInput,
"--" + flags.EmailSenderFN, flagsTD.EmailSenderInput,
"--" + flags.EmailSubjectFN, flagsTD.EmailSubjectInput,
"--" + flags.EventFN, flagsTD.FlgInputs(flagsTD.EventInput),
"--" + flags.EventCalendarFN, flagsTD.FlgInputs(flagsTD.EventCalInput),
"--" + flags.EventOrganizerFN, flagsTD.EventOrganizerInput,
"--" + flags.EventRecursFN, flagsTD.EventRecursInput,
"--" + flags.EventStartsAfterFN, flagsTD.EventStartsAfterInput,
"--" + flags.EventStartsBeforeFN, flagsTD.EventStartsBeforeInput,
"--" + flags.EventSubjectFN, flagsTD.EventSubjectInput,
"--" + flags.CollisionsFN, flagsTD.Collisions,
"--" + flags.DestinationFN, flagsTD.Destination,
"--" + flags.ToResourceFN, flagsTD.ToResource,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
cmd.SetOut(new(bytes.Buffer)) // drop output cliTD.CheckCmdChild(
cmd.SetErr(new(bytes.Buffer)) // drop output t,
parent,
err := cmd.Execute() 3,
assert.NoError(t, err, clues.ToCore(err)) test.expectUse,
test.expectShort,
test.expectRunE)
opts := utils.MakeExchangeOpts(cmd) opts := utils.MakeExchangeOpts(cmd)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.ElementsMatch(t, flagsTD.ContactInput, opts.Contact) assert.ElementsMatch(t, flagsTD.ContactInput, opts.Contact)
assert.ElementsMatch(t, flagsTD.ContactFldInput, opts.ContactFolder) assert.ElementsMatch(t, flagsTD.ContactFldInput, opts.ContactFolder)
assert.Equal(t, flagsTD.ContactNameInput, opts.ContactName) assert.Equal(t, flagsTD.ContactNameInput, opts.ContactName)
assert.ElementsMatch(t, flagsTD.EmailInput, opts.Email) assert.ElementsMatch(t, flagsTD.EmailInput, opts.Email)
assert.ElementsMatch(t, flagsTD.EmailFldInput, opts.EmailFolder) assert.ElementsMatch(t, flagsTD.EmailFldInput, opts.EmailFolder)
assert.Equal(t, flagsTD.EmailReceivedAfterInput, opts.EmailReceivedAfter) assert.Equal(t, flagsTD.EmailReceivedAfterInput, opts.EmailReceivedAfter)
assert.Equal(t, flagsTD.EmailReceivedBeforeInput, opts.EmailReceivedBefore) assert.Equal(t, flagsTD.EmailReceivedBeforeInput, opts.EmailReceivedBefore)
assert.Equal(t, flagsTD.EmailSenderInput, opts.EmailSender) assert.Equal(t, flagsTD.EmailSenderInput, opts.EmailSender)
assert.Equal(t, flagsTD.EmailSubjectInput, opts.EmailSubject) assert.Equal(t, flagsTD.EmailSubjectInput, opts.EmailSubject)
assert.ElementsMatch(t, flagsTD.EventInput, opts.Event) assert.ElementsMatch(t, flagsTD.EventInput, opts.Event)
assert.ElementsMatch(t, flagsTD.EventCalInput, opts.EventCalendar) assert.ElementsMatch(t, flagsTD.EventCalInput, opts.EventCalendar)
assert.Equal(t, flagsTD.EventOrganizerInput, opts.EventOrganizer) assert.Equal(t, flagsTD.EventOrganizerInput, opts.EventOrganizer)
@ -120,11 +102,9 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() {
assert.Equal(t, flagsTD.EventStartsAfterInput, opts.EventStartsAfter) assert.Equal(t, flagsTD.EventStartsAfterInput, opts.EventStartsAfter)
assert.Equal(t, flagsTD.EventStartsBeforeInput, opts.EventStartsBefore) assert.Equal(t, flagsTD.EventStartsBeforeInput, opts.EventStartsBefore)
assert.Equal(t, flagsTD.EventSubjectInput, opts.EventSubject) assert.Equal(t, flagsTD.EventSubjectInput, opts.EventSubject)
assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions) assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions)
assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination) assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination)
assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource) assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource)
flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd)
}) })

View File

@ -1,17 +1,15 @@
package restore package restore
import ( import (
"bytes"
"testing" "testing"
"github.com/alcionai/clues"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/cli/flags" "github.com/alcionai/corso/src/cli/flags"
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
cliTD "github.com/alcionai/corso/src/cli/testdata"
"github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
) )
@ -39,65 +37,51 @@ func (suite *GroupsUnitSuite) TestAddGroupsCommands() {
for _, test := range table { for _, test := range table {
suite.Run(test.name, func() { suite.Run(test.name, func() {
t := suite.T() t := suite.T()
parent := &cobra.Command{Use: restoreCommand}
cmd := &cobra.Command{Use: test.use} cmd := cliTD.SetUpCmdHasFlags(
t,
// persistent flags not added by addCommands parent,
flags.AddRunModeFlag(cmd, true) addGroupsCommands,
[]cliTD.UseCobraCommandFn{
c := addGroupsCommands(cmd) flags.AddAllProviderFlags,
require.NotNil(t, c) flags.AddAllStorageFlags,
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
cmds := cmd.Commands()
require.Len(t, cmds, 1)
child := cmds[0]
assert.Equal(t, test.expectUse, child.Use)
assert.Equal(t, test.expectShort, child.Short)
tester.AreSameFunc(t, test.expectRunE, child.RunE)
flagsTD.WithFlags(
cmd,
groupsServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.LibraryFN, flagsTD.LibraryInput,
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
"--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput),
"--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput),
"--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput),
"--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput),
"--" + flags.CollisionsFN, flagsTD.Collisions,
"--" + flags.DestinationFN, flagsTD.Destination,
"--" + flags.ToResourceFN, flagsTD.ToResource,
// bool flags
"--" + flags.NoPermissionsFN,
}, },
flagsTD.PreparedProviderFlags(), flagsTD.WithFlags(
flagsTD.PreparedStorageFlags()) groupsServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.LibraryFN, flagsTD.LibraryInput,
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
"--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput),
"--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput),
"--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput),
"--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput),
"--" + flags.CollisionsFN, flagsTD.Collisions,
"--" + flags.DestinationFN, flagsTD.Destination,
"--" + flags.ToResourceFN, flagsTD.ToResource,
"--" + flags.NoPermissionsFN,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
cmd.SetOut(new(bytes.Buffer)) // drop output cliTD.CheckCmdChild(
cmd.SetErr(new(bytes.Buffer)) // drop output t,
parent,
err := cmd.Execute() 3,
assert.NoError(t, err, clues.ToCore(err)) test.expectUse,
test.expectShort,
test.expectRunE)
opts := utils.MakeGroupsOpts(cmd) opts := utils.MakeGroupsOpts(cmd)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.Equal(t, flagsTD.LibraryInput, opts.Library) assert.Equal(t, flagsTD.LibraryInput, opts.Library)
assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName) assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName)
assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath) assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath)
@ -105,14 +89,10 @@ func (suite *GroupsUnitSuite) TestAddGroupsCommands() {
assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore) assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore)
assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter) assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter)
assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore) assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore)
assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions) assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions)
assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination) assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination)
assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource) assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource)
// bool flags
assert.True(t, flags.NoPermissionsFV) assert.True(t, flags.NoPermissionsFV)
flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd)
}) })

View File

@ -1,17 +1,15 @@
package restore package restore
import ( import (
"bytes"
"testing" "testing"
"github.com/alcionai/clues"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/cli/flags" "github.com/alcionai/corso/src/cli/flags"
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
cliTD "github.com/alcionai/corso/src/cli/testdata"
"github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
) )
@ -39,73 +37,56 @@ func (suite *OneDriveUnitSuite) TestAddOneDriveCommands() {
for _, test := range table { for _, test := range table {
suite.Run(test.name, func() { suite.Run(test.name, func() {
t := suite.T() t := suite.T()
parent := &cobra.Command{Use: restoreCommand}
cmd := &cobra.Command{Use: test.use} cmd := cliTD.SetUpCmdHasFlags(
t,
// persistent flags not added by addCommands parent,
flags.AddRunModeFlag(cmd, true) addOneDriveCommands,
[]cliTD.UseCobraCommandFn{
c := addOneDriveCommands(cmd) flags.AddAllProviderFlags,
require.NotNil(t, c) flags.AddAllStorageFlags,
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
cmds := cmd.Commands()
require.Len(t, cmds, 1)
child := cmds[0]
assert.Equal(t, test.expectUse, child.Use)
assert.Equal(t, test.expectShort, child.Short)
tester.AreSameFunc(t, test.expectRunE, child.RunE)
flagsTD.WithFlags(
cmd,
oneDriveServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
"--" + flags.CollisionsFN, flagsTD.Collisions,
"--" + flags.DestinationFN, flagsTD.Destination,
"--" + flags.ToResourceFN, flagsTD.ToResource,
// bool flags
"--" + flags.NoPermissionsFN,
}, },
flagsTD.PreparedProviderFlags(), flagsTD.WithFlags(
flagsTD.PreparedStorageFlags()) oneDriveServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
"--" + flags.CollisionsFN, flagsTD.Collisions,
"--" + flags.DestinationFN, flagsTD.Destination,
"--" + flags.ToResourceFN, flagsTD.ToResource,
"--" + flags.NoPermissionsFN,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
cmd.SetOut(new(bytes.Buffer)) // drop output cliTD.CheckCmdChild(
cmd.SetErr(new(bytes.Buffer)) // drop output t,
parent,
err := cmd.Execute() 3,
assert.NoError(t, err, clues.ToCore(err)) test.expectUse,
test.expectShort,
test.expectRunE)
opts := utils.MakeOneDriveOpts(cmd) opts := utils.MakeOneDriveOpts(cmd)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName) assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName)
assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath) assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath)
assert.Equal(t, flagsTD.FileCreatedAfterInput, opts.FileCreatedAfter) assert.Equal(t, flagsTD.FileCreatedAfterInput, opts.FileCreatedAfter)
assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore) assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore)
assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter) assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter)
assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore) assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore)
assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions) assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions)
assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination) assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination)
assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource) assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource)
// bool flags
assert.True(t, flags.NoPermissionsFV) assert.True(t, flags.NoPermissionsFV)
flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd)
}) })

View File

@ -25,12 +25,12 @@ var restoreCommands = []func(cmd *cobra.Command) *cobra.Command{
// AddCommands attaches all `corso restore * *` commands to the parent. // AddCommands attaches all `corso restore * *` commands to the parent.
func AddCommands(cmd *cobra.Command) { func AddCommands(cmd *cobra.Command) {
subCommand := restoreCmd() subCommand := restoreCmd()
flags.AddAllProviderFlags(subCommand)
flags.AddAllStorageFlags(subCommand)
cmd.AddCommand(subCommand) cmd.AddCommand(subCommand)
for _, addRestoreTo := range restoreCommands { for _, addRestoreTo := range restoreCommands {
addRestoreTo(subCommand) sc := addRestoreTo(subCommand)
flags.AddAllProviderFlags(sc)
flags.AddAllStorageFlags(sc)
} }
} }

View File

@ -1,17 +1,15 @@
package restore package restore
import ( import (
"bytes"
"testing" "testing"
"github.com/alcionai/clues"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/cli/flags" "github.com/alcionai/corso/src/cli/flags"
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
cliTD "github.com/alcionai/corso/src/cli/testdata"
"github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
) )
@ -39,64 +37,51 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
for _, test := range table { for _, test := range table {
suite.Run(test.name, func() { suite.Run(test.name, func() {
t := suite.T() t := suite.T()
parent := &cobra.Command{Use: restoreCommand}
cmd := &cobra.Command{Use: test.use} cmd := cliTD.SetUpCmdHasFlags(
t,
// persistent flags not added by addCommands parent,
flags.AddRunModeFlag(cmd, true) addSharePointCommands,
[]cliTD.UseCobraCommandFn{
c := addSharePointCommands(cmd) flags.AddAllProviderFlags,
require.NotNil(t, c) flags.AddAllStorageFlags,
// non-persistent flags not added by addCommands
flags.AddAllProviderFlags(c)
flags.AddAllStorageFlags(c)
cmds := cmd.Commands()
require.Len(t, cmds, 1)
child := cmds[0]
assert.Equal(t, test.expectUse, child.Use)
assert.Equal(t, test.expectShort, child.Short)
tester.AreSameFunc(t, test.expectRunE, child.RunE)
flagsTD.WithFlags(
cmd,
sharePointServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.LibraryFN, flagsTD.LibraryInput,
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
"--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput),
"--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput),
"--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput),
"--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput),
"--" + flags.CollisionsFN, flagsTD.Collisions,
"--" + flags.DestinationFN, flagsTD.Destination,
"--" + flags.ToResourceFN, flagsTD.ToResource,
// bool flags
"--" + flags.NoPermissionsFN,
}, },
flagsTD.PreparedProviderFlags(), flagsTD.WithFlags(
flagsTD.PreparedStorageFlags()) sharePointServiceCommand,
[]string{
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, flagsTD.BackupInput,
"--" + flags.LibraryFN, flagsTD.LibraryInput,
"--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput),
"--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput),
"--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput,
"--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput,
"--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput,
"--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput,
"--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput),
"--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput),
"--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput),
"--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput),
"--" + flags.CollisionsFN, flagsTD.Collisions,
"--" + flags.DestinationFN, flagsTD.Destination,
"--" + flags.ToResourceFN, flagsTD.ToResource,
"--" + flags.NoPermissionsFN,
},
flagsTD.PreparedProviderFlags(),
flagsTD.PreparedStorageFlags()))
cmd.SetOut(new(bytes.Buffer)) // drop output cliTD.CheckCmdChild(
cmd.SetErr(new(bytes.Buffer)) // drop output t,
parent,
err := cmd.Execute() 3,
assert.NoError(t, err, clues.ToCore(err)) test.expectUse,
test.expectShort,
test.expectRunE)
opts := utils.MakeSharePointOpts(cmd) opts := utils.MakeSharePointOpts(cmd)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV)
assert.Equal(t, flagsTD.LibraryInput, opts.Library) assert.Equal(t, flagsTD.LibraryInput, opts.Library)
assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName) assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName)
assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath) assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath)
@ -104,20 +89,14 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore) assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore)
assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter) assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter)
assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore) assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore)
assert.ElementsMatch(t, flagsTD.ListItemInput, opts.ListItem) assert.ElementsMatch(t, flagsTD.ListItemInput, opts.ListItem)
assert.ElementsMatch(t, flagsTD.ListFolderInput, opts.ListFolder) assert.ElementsMatch(t, flagsTD.ListFolderInput, opts.ListFolder)
assert.ElementsMatch(t, flagsTD.PageInput, opts.Page) assert.ElementsMatch(t, flagsTD.PageInput, opts.Page)
assert.ElementsMatch(t, flagsTD.PageFolderInput, opts.PageFolder) assert.ElementsMatch(t, flagsTD.PageFolderInput, opts.PageFolder)
assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions) assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions)
assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination) assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination)
assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource) assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource)
// bool flags
assert.True(t, flags.NoPermissionsFV) assert.True(t, flags.NoPermissionsFV)
flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd)
}) })

View File

@ -1,11 +1,20 @@
package testdata package testdata
import ( import (
"bytes"
"fmt" "fmt"
"strings"
"testing"
"time" "time"
"github.com/alcionai/clues"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/alcionai/corso/src/cli/flags"
"github.com/alcionai/corso/src/internal/tester"
) )
// StubRootCmd builds a stub cobra command to be used as // StubRootCmd builds a stub cobra command to be used as
@ -27,3 +36,82 @@ func StubRootCmd(args ...string) *cobra.Command {
return c return c
} }
type UseCobraCommandFn func(*cobra.Command)
func SetUpCmdHasFlags(
t *testing.T,
parentCmd *cobra.Command,
addChildCommand func(*cobra.Command) *cobra.Command,
addFlags []UseCobraCommandFn,
setArgs UseCobraCommandFn,
) *cobra.Command {
parentCmd.PersistentPreRun = func(c *cobra.Command, args []string) {
t.Log("testing args:")
for _, arg := range args {
t.Log(arg)
}
}
// persistent flags not added by addCommands
flags.AddRunModeFlag(parentCmd, true)
cmd := addChildCommand(parentCmd)
require.NotNil(t, cmd)
cul := cmd.UseLine()
require.Truef(
t,
strings.HasPrefix(cul, parentCmd.Use+" "+cmd.Use),
"child command has expected usage format 'parent child', got %q",
cul)
for _, af := range addFlags {
af(cmd)
}
setArgs(parentCmd)
parentCmd.SetOut(new(bytes.Buffer)) // drop output
parentCmd.SetErr(new(bytes.Buffer)) // drop output
err := parentCmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
return cmd
}
type CobraRunEFn func(cmd *cobra.Command, args []string) error
func CheckCmdChild(
t *testing.T,
cmd *cobra.Command,
expectChildCount int,
expectUse string,
expectShort string,
expectRunE CobraRunEFn,
) {
var (
cmds = cmd.Commands()
child *cobra.Command
)
for _, cc := range cmds {
if cc.Use == expectUse {
child = cc
break
}
}
require.Len(
t,
cmds,
expectChildCount,
"parent command should have the correct child command count")
require.NotNil(t, child, "should have found expected child command")
assert.Equal(t, expectShort, child.Short)
tester.AreSameFunc(t, expectRunE, child.RunE)
}

View File

@ -78,16 +78,10 @@ func GetAccountAndConnectWithOverrides(
return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "creating a repository controller") return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "creating a repository controller")
} }
if err := r.Connect(ctx); err != nil { if err := r.Connect(ctx, repository.ConnConfig{Service: pst}); err != nil {
return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "connecting to the "+cfg.Storage.Provider.String()+" repository") return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "connecting to the "+cfg.Storage.Provider.String()+" repository")
} }
// this initializes our graph api client configurations,
// including control options such as concurency limitations.
if _, err := r.ConnectToM365(ctx, pst); err != nil {
return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "connecting to m365")
}
rdao := RepoDetailsAndOpts{ rdao := RepoDetailsAndOpts{
Repo: cfg, Repo: cfg,
Opts: opts, Opts: opts,

View File

@ -72,7 +72,7 @@ func deleteBackups(
// Only supported for S3 repos currently. // Only supported for S3 repos currently.
func pitrListBackups( func pitrListBackups(
ctx context.Context, ctx context.Context,
service path.ServiceType, pst path.ServiceType,
pitr time.Time, pitr time.Time,
backupIDs []string, backupIDs []string,
) error { ) error {
@ -113,14 +113,14 @@ func pitrListBackups(
return clues.Wrap(err, "creating a repo") return clues.Wrap(err, "creating a repo")
} }
err = r.Connect(ctx) err = r.Connect(ctx, repository.ConnConfig{Service: pst})
if err != nil { if err != nil {
return clues.Wrap(err, "connecting to the repository") return clues.Wrap(err, "connecting to the repository")
} }
defer r.Close(ctx) defer r.Close(ctx)
backups, err := r.BackupsByTag(ctx, store.Service(service)) backups, err := r.BackupsByTag(ctx, store.Service(pst))
if err != nil { if err != nil {
return clues.Wrap(err, "listing backups").WithClues(ctx) return clues.Wrap(err, "listing backups").WithClues(ctx)
} }

View File

@ -197,13 +197,11 @@ func handleCheckerCommand(cmd *cobra.Command, args []string, f flags) error {
return clues.Wrap(err, "getting storage config") return clues.Wrap(err, "getting storage config")
} }
sc, err := repoDetails.Storage.StorageConfig() cfg, err := repoDetails.Storage.ToS3Config()
if err != nil { if err != nil {
return clues.Wrap(err, "getting S3 config") return clues.Wrap(err, "getting S3 config")
} }
cfg := sc.(*storage.S3Config)
endpoint := defaultS3Endpoint endpoint := defaultS3Endpoint
if len(cfg.Endpoint) > 0 { if len(cfg.Endpoint) > 0 {
endpoint = cfg.Endpoint endpoint = cfg.Endpoint

View File

@ -1,6 +1,68 @@
package common package common
import (
"context"
"fmt"
"os"
"strings"
"time"
"github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/credentials"
"github.com/alcionai/corso/src/pkg/logger"
"github.com/alcionai/corso/src/pkg/services/m365/api"
)
type PermissionInfo struct { type PermissionInfo struct {
EntityID string EntityID string
Roles []string Roles []string
} }
const (
sanityBaseBackup = "SANITY_BASE_BACKUP"
sanityTestData = "SANITY_TEST_DATA"
sanityTestFolder = "SANITY_TEST_FOLDER"
sanityTestService = "SANITY_TEST_SERVICE"
)
type Envs struct {
BaseBackupFolder string
DataFolder string
FolderName string
Service string
SiteID string
StartTime time.Time
UserID string
}
func EnvVars(ctx context.Context) Envs {
folder := strings.TrimSpace(os.Getenv(sanityTestFolder))
startTime, _ := MustGetTimeFromName(ctx, folder)
e := Envs{
BaseBackupFolder: os.Getenv(sanityBaseBackup),
DataFolder: os.Getenv(sanityTestData),
FolderName: folder,
SiteID: tconfig.GetM365SiteID(ctx),
Service: os.Getenv(sanityTestService),
StartTime: startTime,
UserID: tconfig.GetM365UserID(ctx),
}
fmt.Printf("\n-----\nenvs %+v\n-----\n", e)
logger.Ctx(ctx).Info("envs", e)
return e
}
func GetAC() (api.Client, error) {
creds := account.M365Config{
M365: credentials.GetM365(),
AzureTenantID: os.Getenv(account.AzureTenantID),
}
return api.NewClient(creds, control.DefaultOptions())
}

View File

@ -0,0 +1,38 @@
package common
import (
"os"
"path/filepath"
"time"
"github.com/alcionai/clues"
)
func FilepathWalker(
folderName string,
exportFileSizes map[string]int64,
startTime time.Time,
) filepath.WalkFunc {
return func(path string, info os.FileInfo, err error) error {
if err != nil {
return clues.Stack(err)
}
if info.IsDir() {
return nil
}
relPath, err := filepath.Rel(folderName, path)
if err != nil {
return clues.Stack(err)
}
exportFileSizes[relPath] = info.Size()
if startTime.After(info.ModTime()) {
startTime = info.ModTime()
}
return nil
}
}

View File

@ -0,0 +1,69 @@
package common
import (
"context"
"golang.org/x/exp/maps"
)
// Sanitree is used to build out a hierarchical tree of items
// for comparison against each other. Primarily so that a restore
// can compare two subtrees easily.
type Sanitree[T any] struct {
Container T
ContainerID string
ContainerName string
// non-containers only
ContainsItems int
// name -> node
Children map[string]*Sanitree[T]
}
func AssertEqualTrees[T any](
ctx context.Context,
expect, other *Sanitree[T],
) {
if expect == nil && other == nil {
return
}
Assert(
ctx,
func() bool { return expect != nil && other != nil },
"non nil nodes",
expect,
other)
Assert(
ctx,
func() bool { return expect.ContainerName == other.ContainerName },
"container names match",
expect.ContainerName,
other.ContainerName)
Assert(
ctx,
func() bool { return expect.ContainsItems == other.ContainsItems },
"count of items in container matches",
expect.ContainsItems,
other.ContainsItems)
Assert(
ctx,
func() bool { return len(expect.Children) == len(other.Children) },
"count of child containers matches",
len(expect.Children),
len(other.Children))
for name, s := range expect.Children {
ch, ok := other.Children[name]
Assert(
ctx,
func() bool { return ok },
"found matching child container",
name,
maps.Keys(other.Children))
AssertEqualTrees(ctx, s, ch)
}
}

View File

@ -22,7 +22,7 @@ func Assert(
return return
} }
header = "Error: " + header header = "TEST FAILURE: " + header
expected := fmt.Sprintf("* Expected: %+v", expect) expected := fmt.Sprintf("* Expected: %+v", expect)
got := fmt.Sprintf("* Current: %+v", current) got := fmt.Sprintf("* Current: %+v", current)
@ -37,7 +37,7 @@ func Assert(
func Fatal(ctx context.Context, msg string, err error) { func Fatal(ctx context.Context, msg string, err error) {
logger.CtxErr(ctx, err).Error("test failure: " + msg) logger.CtxErr(ctx, err).Error("test failure: " + msg)
fmt.Println(msg+": ", err) fmt.Println("TEST FAILURE: "+msg+": ", err)
os.Exit(1) os.Exit(1)
} }

View File

@ -0,0 +1,16 @@
package export
import (
"context"
"github.com/alcionai/corso/src/cmd/sanity_test/common"
"github.com/alcionai/corso/src/pkg/services/m365/api"
)
func CheckGroupsExport(
ctx context.Context,
ac api.Client,
envs common.Envs,
) {
// TODO
}

View File

@ -3,28 +3,21 @@ package export
import ( import (
"context" "context"
"fmt" "fmt"
"os"
"path/filepath" "path/filepath"
"time" "time"
"github.com/alcionai/clues"
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
"github.com/alcionai/corso/src/cmd/sanity_test/common" "github.com/alcionai/corso/src/cmd/sanity_test/common"
"github.com/alcionai/corso/src/cmd/sanity_test/restore" "github.com/alcionai/corso/src/cmd/sanity_test/restore"
"github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/pkg/services/m365/api"
) )
func CheckOneDriveExport( func CheckOneDriveExport(
ctx context.Context, ctx context.Context,
client *msgraphsdk.GraphServiceClient, ac api.Client,
userID, folderName, dataFolder string, envs common.Envs,
) { ) {
drive, err := client. drive, err := ac.Users().GetDefaultDrive(ctx, envs.UserID)
Users().
ByUserId(userID).
Drive().
Get(ctx, nil)
if err != nil { if err != nil {
common.Fatal(ctx, "getting the drive:", err) common.Fatal(ctx, "getting the drive:", err)
} }
@ -36,37 +29,19 @@ func CheckOneDriveExport(
startTime = time.Now() startTime = time.Now()
) )
err = filepath.Walk(folderName, func(path string, info os.FileInfo, err error) error { err = filepath.Walk(
if err != nil { envs.FolderName,
return clues.Stack(err) common.FilepathWalker(envs.FolderName, exportFileSizes, startTime))
}
if info.IsDir() {
return nil
}
relPath, err := filepath.Rel(folderName, path)
if err != nil {
return clues.Stack(err)
}
exportFileSizes[relPath] = info.Size()
if startTime.After(info.ModTime()) {
startTime = info.ModTime()
}
return nil
})
if err != nil { if err != nil {
fmt.Println("Error walking the path:", err) fmt.Println("Error walking the path:", err)
} }
_ = restore.PopulateDriveDetails( _ = restore.PopulateDriveDetails(
ctx, ctx,
client, ac,
ptr.Val(drive.GetId()), ptr.Val(drive.GetId()),
folderName, envs.FolderName,
dataFolder, envs.DataFolder,
fileSizes, fileSizes,
map[string][]common.PermissionInfo{}, map[string][]common.PermissionInfo{},
startTime) startTime)

View File

@ -3,28 +3,21 @@ package export
import ( import (
"context" "context"
"fmt" "fmt"
"os"
"path/filepath" "path/filepath"
"time" "time"
"github.com/alcionai/clues"
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
"github.com/alcionai/corso/src/cmd/sanity_test/common" "github.com/alcionai/corso/src/cmd/sanity_test/common"
"github.com/alcionai/corso/src/cmd/sanity_test/restore" "github.com/alcionai/corso/src/cmd/sanity_test/restore"
"github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/pkg/services/m365/api"
) )
func CheckSharePointExport( func CheckSharePointExport(
ctx context.Context, ctx context.Context,
client *msgraphsdk.GraphServiceClient, ac api.Client,
siteID, folderName, dataFolder string, envs common.Envs,
) { ) {
drive, err := client. drive, err := ac.Sites().GetDefaultDrive(ctx, envs.SiteID)
Sites().
BySiteId(siteID).
Drive().
Get(ctx, nil)
if err != nil { if err != nil {
common.Fatal(ctx, "getting the drive:", err) common.Fatal(ctx, "getting the drive:", err)
} }
@ -36,37 +29,19 @@ func CheckSharePointExport(
startTime = time.Now() startTime = time.Now()
) )
err = filepath.Walk(folderName, func(path string, info os.FileInfo, err error) error { err = filepath.Walk(
if err != nil { envs.FolderName,
return clues.Stack(err) common.FilepathWalker(envs.FolderName, exportFileSizes, startTime))
}
if info.IsDir() {
return nil
}
relPath, err := filepath.Rel(folderName, path)
if err != nil {
return clues.Stack(err)
}
exportFileSizes[relPath] = info.Size()
if startTime.After(info.ModTime()) {
startTime = info.ModTime()
}
return nil
})
if err != nil { if err != nil {
fmt.Println("Error walking the path:", err) fmt.Println("Error walking the path:", err)
} }
_ = restore.PopulateDriveDetails( _ = restore.PopulateDriveDetails(
ctx, ctx,
client, ac,
ptr.Val(drive.GetId()), ptr.Val(drive.GetId()),
folderName, envs.FolderName,
dataFolder, envs.DataFolder,
fileSizes, fileSizes,
map[string][]common.PermissionInfo{}, map[string][]common.PermissionInfo{},
startTime) startTime)

View File

@ -3,99 +3,43 @@ package restore
import ( import (
"context" "context"
"fmt" "fmt"
stdpath "path"
"strings"
"time"
"github.com/alcionai/clues" "github.com/alcionai/clues"
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
"github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/microsoftgraph/msgraph-sdk-go/users"
"github.com/alcionai/corso/src/cmd/sanity_test/common" "github.com/alcionai/corso/src/cmd/sanity_test/common"
"github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/pkg/filters" "github.com/alcionai/corso/src/pkg/services/m365/api"
) )
// CheckEmailRestoration verifies that the emails count in restored folder is equivalent to // CheckEmailRestoration verifies that the emails count in restored folder is equivalent to
// emails in actual m365 account // emails in actual m365 account
func CheckEmailRestoration( func CheckEmailRestoration(
ctx context.Context, ctx context.Context,
client *msgraphsdk.GraphServiceClient, ac api.Client,
testUser, folderName, dataFolder, baseBackupFolder string, envs common.Envs,
startTime time.Time,
) { ) {
var ( var (
restoreFolder models.MailFolderable folderNameToItemCount = make(map[string]int32)
itemCount = make(map[string]int32) folderNameToRestoreItemCount = make(map[string]int32)
restoreItemCount = make(map[string]int32)
builder = client.Users().ByUserId(testUser).MailFolders()
) )
for { restoredTree := buildSanitree(ctx, ac, envs.UserID, envs.FolderName)
result, err := builder.Get(ctx, nil) dataTree := buildSanitree(ctx, ac, envs.UserID, envs.DataFolder)
if err != nil {
common.Fatal(ctx, "getting mail folders", err)
}
values := result.GetValue()
for _, v := range values {
itemName := ptr.Val(v.GetDisplayName())
if itemName == folderName {
restoreFolder = v
continue
}
if itemName == dataFolder || itemName == baseBackupFolder {
// otherwise, recursively aggregate all child folders.
getAllMailSubFolders(ctx, client, testUser, v, itemName, dataFolder, itemCount)
itemCount[itemName] = ptr.Val(v.GetTotalItemCount())
}
}
link, ok := ptr.ValOK(result.GetOdataNextLink())
if !ok {
break
}
builder = users.NewItemMailFoldersRequestBuilder(link, client.GetAdapter())
}
folderID := ptr.Val(restoreFolder.GetId())
folderName = ptr.Val(restoreFolder.GetDisplayName())
ctx = clues.Add( ctx = clues.Add(
ctx, ctx,
"restore_folder_id", folderID, "restore_folder_id", restoredTree.ContainerID,
"restore_folder_name", folderName) "restore_folder_name", restoredTree.ContainerName,
"original_folder_id", dataTree.ContainerID,
"original_folder_name", dataTree.ContainerName)
childFolder, err := client. verifyEmailData(ctx, folderNameToRestoreItemCount, folderNameToItemCount)
Users().
ByUserId(testUser).
MailFolders().
ByMailFolderId(folderID).
ChildFolders().
Get(ctx, nil)
if err != nil {
common.Fatal(ctx, "getting restore folder child folders", err)
}
for _, fld := range childFolder.GetValue() { common.AssertEqualTrees[models.MailFolderable](
restoreDisplayName := ptr.Val(fld.GetDisplayName()) ctx,
dataTree,
// check if folder is the data folder we loaded or the base backup to verify restoredTree.Children[envs.DataFolder])
// the incremental backup worked fine
if strings.EqualFold(restoreDisplayName, dataFolder) || strings.EqualFold(restoreDisplayName, baseBackupFolder) {
count, _ := ptr.ValOK(fld.GetTotalItemCount())
restoreItemCount[restoreDisplayName] = count
checkAllSubFolder(ctx, client, fld, testUser, restoreDisplayName, dataFolder, restoreItemCount)
}
}
verifyEmailData(ctx, restoreItemCount, itemCount)
} }
func verifyEmailData(ctx context.Context, restoreMessageCount, messageCount map[string]int32) { func verifyEmailData(ctx context.Context, restoreMessageCount, messageCount map[string]int32) {
@ -111,109 +55,71 @@ func verifyEmailData(ctx context.Context, restoreMessageCount, messageCount map[
} }
} }
// getAllSubFolder will recursively check for all subfolders and get the corresponding func buildSanitree(
// email count.
func getAllMailSubFolders(
ctx context.Context, ctx context.Context,
client *msgraphsdk.GraphServiceClient, ac api.Client,
testUser string, userID, folderName string,
r models.MailFolderable, ) *common.Sanitree[models.MailFolderable] {
parentFolder, gcc, err := ac.Mail().GetContainerByName(
dataFolder string, ctx,
messageCount map[string]int32, userID,
) { api.MsgFolderRoot,
var ( folderName)
folderID = ptr.Val(r.GetId())
count int32 = 99
options = &users.ItemMailFoldersItemChildFoldersRequestBuilderGetRequestConfiguration{
QueryParameters: &users.ItemMailFoldersItemChildFoldersRequestBuilderGetQueryParameters{
Top: &count,
},
}
)
ctx = clues.Add(ctx, "parent_folder_id", folderID)
childFolder, err := client.
Users().
ByUserId(testUser).
MailFolders().
ByMailFolderId(folderID).
ChildFolders().
Get(ctx, options)
if err != nil { if err != nil {
common.Fatal(ctx, "getting mail subfolders", err) common.Fatal(
ctx,
fmt.Sprintf("finding folder by name %q", folderName),
err)
} }
for _, child := range childFolder.GetValue() { mmf, ok := gcc.(models.MailFolderable)
var ( if !ok {
childDisplayName = ptr.Val(child.GetDisplayName()) common.Fatal(
childFolderCount = ptr.Val(child.GetChildFolderCount()) ctx,
//nolint:forbidigo "mail folderable required",
fullFolderName = stdpath.Join(parentFolder, childDisplayName) clues.New("casting "+*gcc.GetDisplayName()+" to models.MailFolderable"))
) }
if filters.PathContains([]string{dataFolder}).Compare(fullFolderName) { root := &common.Sanitree[models.MailFolderable]{
messageCount[fullFolderName] = ptr.Val(child.GetTotalItemCount()) Container: mmf,
// recursively check for subfolders ContainerID: ptr.Val(mmf.GetId()),
if childFolderCount > 0 { ContainerName: ptr.Val(mmf.GetDisplayName()),
parentFolder := fullFolderName ContainsItems: int(ptr.Val(mmf.GetTotalItemCount())),
Children: map[string]*common.Sanitree[models.MailFolderable]{},
}
getAllMailSubFolders(ctx, client, testUser, child, parentFolder, dataFolder, messageCount) recurseSubfolders(ctx, ac, root, userID)
}
} return root
} }
}
func recurseSubfolders(
// checkAllSubFolder will recursively traverse inside the restore folder and ctx context.Context,
// verify that data matched in all subfolders ac api.Client,
func checkAllSubFolder( parent *common.Sanitree[models.MailFolderable],
ctx context.Context, userID string,
client *msgraphsdk.GraphServiceClient, ) {
r models.MailFolderable, childFolders, err := ac.Mail().GetContainerChildren(
testUser, ctx,
parentFolder, userID,
dataFolder string, parent.ContainerID)
restoreMessageCount map[string]int32, if err != nil {
) { common.Fatal(ctx, "getting subfolders", err)
var ( }
folderID = ptr.Val(r.GetId())
count int32 = 99 for _, child := range childFolders {
options = &users.ItemMailFoldersItemChildFoldersRequestBuilderGetRequestConfiguration{ c := &common.Sanitree[models.MailFolderable]{
QueryParameters: &users.ItemMailFoldersItemChildFoldersRequestBuilderGetQueryParameters{ Container: child,
Top: &count, ContainerID: ptr.Val(child.GetId()),
}, ContainerName: ptr.Val(child.GetDisplayName()),
} ContainsItems: int(ptr.Val(child.GetTotalItemCount())),
) Children: map[string]*common.Sanitree[models.MailFolderable]{},
}
childFolder, err := client.
Users(). parent.Children[c.ContainerName] = c
ByUserId(testUser).
MailFolders(). if ptr.Val(child.GetChildFolderCount()) > 0 {
ByMailFolderId(folderID). recurseSubfolders(ctx, ac, c, userID)
ChildFolders().
Get(ctx, options)
if err != nil {
common.Fatal(ctx, "getting mail subfolders", err)
}
for _, child := range childFolder.GetValue() {
var (
childDisplayName = ptr.Val(child.GetDisplayName())
//nolint:forbidigo
fullFolderName = stdpath.Join(parentFolder, childDisplayName)
)
if filters.PathContains([]string{dataFolder}).Compare(fullFolderName) {
childTotalCount, _ := ptr.ValOK(child.GetTotalItemCount())
restoreMessageCount[fullFolderName] = childTotalCount
}
childFolderCount := ptr.Val(child.GetChildFolderCount())
if childFolderCount > 0 {
parentFolder := fullFolderName
checkAllSubFolder(ctx, client, child, testUser, parentFolder, dataFolder, restoreMessageCount)
} }
} }
} }

View File

@ -0,0 +1,16 @@
package restore
import (
"context"
"github.com/alcionai/corso/src/cmd/sanity_test/common"
"github.com/alcionai/corso/src/pkg/services/m365/api"
)
func CheckGroupsRestoration(
ctx context.Context,
ac api.Client,
envs common.Envs,
) {
// TODO
}

View File

@ -7,12 +7,12 @@ import (
"time" "time"
"github.com/alcionai/clues" "github.com/alcionai/clues"
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
"github.com/alcionai/corso/src/cmd/sanity_test/common" "github.com/alcionai/corso/src/cmd/sanity_test/common"
"github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/services/m365/api"
) )
const ( const (
@ -21,34 +21,29 @@ const (
func CheckOneDriveRestoration( func CheckOneDriveRestoration(
ctx context.Context, ctx context.Context,
client *msgraphsdk.GraphServiceClient, ac api.Client,
userID, folderName, dataFolder string, envs common.Envs,
startTime time.Time,
) { ) {
drive, err := client. drive, err := ac.Users().GetDefaultDrive(ctx, envs.UserID)
Users().
ByUserId(userID).
Drive().
Get(ctx, nil)
if err != nil { if err != nil {
common.Fatal(ctx, "getting the drive:", err) common.Fatal(ctx, "getting the drive:", err)
} }
checkDriveRestoration( checkDriveRestoration(
ctx, ctx,
client, ac,
path.OneDriveService, path.OneDriveService,
folderName, envs.FolderName,
ptr.Val(drive.GetId()), ptr.Val(drive.GetId()),
ptr.Val(drive.GetName()), ptr.Val(drive.GetName()),
dataFolder, envs.DataFolder,
startTime, envs.StartTime,
false) false)
} }
func checkDriveRestoration( func checkDriveRestoration(
ctx context.Context, ctx context.Context,
client *msgraphsdk.GraphServiceClient, ac api.Client,
service path.ServiceType, service path.ServiceType,
folderName, folderName,
driveID, driveID,
@ -70,7 +65,7 @@ func checkDriveRestoration(
restoreFolderID := PopulateDriveDetails( restoreFolderID := PopulateDriveDetails(
ctx, ctx,
client, ac,
driveID, driveID,
folderName, folderName,
dataFolder, dataFolder,
@ -78,7 +73,14 @@ func checkDriveRestoration(
folderPermissions, folderPermissions,
startTime) startTime)
getRestoredDrive(ctx, client, driveID, restoreFolderID, restoreFile, restoredFolderPermissions, startTime) getRestoredDrive(
ctx,
ac,
driveID,
restoreFolderID,
restoreFile,
restoredFolderPermissions,
startTime)
checkRestoredDriveItemPermissions( checkRestoredDriveItemPermissions(
ctx, ctx,
@ -105,7 +107,7 @@ func checkDriveRestoration(
func PopulateDriveDetails( func PopulateDriveDetails(
ctx context.Context, ctx context.Context,
client *msgraphsdk.GraphServiceClient, ac api.Client,
driveID, folderName, dataFolder string, driveID, folderName, dataFolder string,
fileSizes map[string]int64, fileSizes map[string]int64,
folderPermissions map[string][]common.PermissionInfo, folderPermissions map[string][]common.PermissionInfo,
@ -113,18 +115,12 @@ func PopulateDriveDetails(
) string { ) string {
var restoreFolderID string var restoreFolderID string
response, err := client. children, err := ac.Drives().GetFolderChildren(ctx, driveID, "root")
Drives().
ByDriveId(driveID).
Items().
ByDriveItemId("root").
Children().
Get(ctx, nil)
if err != nil { if err != nil {
common.Fatal(ctx, "getting drive by id", err) common.Fatal(ctx, "getting drive by id", err)
} }
for _, driveItem := range response.GetValue() { for _, driveItem := range children {
var ( var (
itemID = ptr.Val(driveItem.GetId()) itemID = ptr.Val(driveItem.GetId())
itemName = ptr.Val(driveItem.GetName()) itemName = ptr.Val(driveItem.GetName())
@ -156,8 +152,17 @@ func PopulateDriveDetails(
continue continue
} }
folderPermissions[itemName] = permissionIn(ctx, client, driveID, itemID) folderPermissions[itemName] = permissionIn(ctx, ac, driveID, itemID)
getOneDriveChildFolder(ctx, client, driveID, itemID, itemName, fileSizes, folderPermissions, startTime)
getOneDriveChildFolder(
ctx,
ac,
driveID,
itemID,
itemName,
fileSizes,
folderPermissions,
startTime)
} }
return restoreFolderID return restoreFolderID
@ -228,18 +233,18 @@ func checkRestoredDriveItemPermissions(
func getOneDriveChildFolder( func getOneDriveChildFolder(
ctx context.Context, ctx context.Context,
client *msgraphsdk.GraphServiceClient, ac api.Client,
driveID, itemID, parentName string, driveID, itemID, parentName string,
fileSizes map[string]int64, fileSizes map[string]int64,
folderPermission map[string][]common.PermissionInfo, folderPermission map[string][]common.PermissionInfo,
startTime time.Time, startTime time.Time,
) { ) {
response, err := client.Drives().ByDriveId(driveID).Items().ByDriveItemId(itemID).Children().Get(ctx, nil) children, err := ac.Drives().GetFolderChildren(ctx, driveID, itemID)
if err != nil { if err != nil {
common.Fatal(ctx, "getting child folder", err) common.Fatal(ctx, "getting child folder", err)
} }
for _, driveItem := range response.GetValue() { for _, driveItem := range children {
var ( var (
itemID = ptr.Val(driveItem.GetId()) itemID = ptr.Val(driveItem.GetId())
itemName = ptr.Val(driveItem.GetName()) itemName = ptr.Val(driveItem.GetName())
@ -268,31 +273,33 @@ func getOneDriveChildFolder(
continue continue
} }
folderPermission[fullName] = permissionIn(ctx, client, driveID, itemID) folderPermission[fullName] = permissionIn(ctx, ac, driveID, itemID)
getOneDriveChildFolder(ctx, client, driveID, itemID, fullName, fileSizes, folderPermission, startTime) getOneDriveChildFolder(
ctx,
ac,
driveID,
itemID,
fullName,
fileSizes,
folderPermission,
startTime)
} }
} }
func getRestoredDrive( func getRestoredDrive(
ctx context.Context, ctx context.Context,
client *msgraphsdk.GraphServiceClient, ac api.Client,
driveID, restoreFolderID string, driveID, restoreFolderID string,
restoreFile map[string]int64, restoreFile map[string]int64,
restoreFolder map[string][]common.PermissionInfo, restoreFolder map[string][]common.PermissionInfo,
startTime time.Time, startTime time.Time,
) { ) {
restored, err := client. children, err := ac.Drives().GetFolderChildren(ctx, driveID, restoreFolderID)
Drives().
ByDriveId(driveID).
Items().
ByDriveItemId(restoreFolderID).
Children().
Get(ctx, nil)
if err != nil { if err != nil {
common.Fatal(ctx, "getting child folder", err) common.Fatal(ctx, "getting child folder", err)
} }
for _, item := range restored.GetValue() { for _, item := range children {
var ( var (
itemID = ptr.Val(item.GetId()) itemID = ptr.Val(item.GetId())
itemName = ptr.Val(item.GetName()) itemName = ptr.Val(item.GetName())
@ -308,8 +315,16 @@ func getRestoredDrive(
continue continue
} }
restoreFolder[itemName] = permissionIn(ctx, client, driveID, itemID) restoreFolder[itemName] = permissionIn(ctx, ac, driveID, itemID)
getOneDriveChildFolder(ctx, client, driveID, itemID, itemName, restoreFile, restoreFolder, startTime) getOneDriveChildFolder(
ctx,
ac,
driveID,
itemID,
itemName,
restoreFile,
restoreFolder,
startTime)
} }
} }
@ -319,18 +334,12 @@ func getRestoredDrive(
func permissionIn( func permissionIn(
ctx context.Context, ctx context.Context,
client *msgraphsdk.GraphServiceClient, ac api.Client,
driveID, itemID string, driveID, itemID string,
) []common.PermissionInfo { ) []common.PermissionInfo {
pi := []common.PermissionInfo{} pi := []common.PermissionInfo{}
pcr, err := client. pcr, err := ac.Drives().GetItemPermission(ctx, driveID, itemID)
Drives().
ByDriveId(driveID).
Items().
ByDriveItemId(itemID).
Permissions().
Get(ctx, nil)
if err != nil { if err != nil {
common.Fatal(ctx, "getting permission", err) common.Fatal(ctx, "getting permission", err)
} }

View File

@ -2,38 +2,31 @@ package restore
import ( import (
"context" "context"
"time"
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
"github.com/alcionai/corso/src/cmd/sanity_test/common" "github.com/alcionai/corso/src/cmd/sanity_test/common"
"github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/services/m365/api"
) )
func CheckSharePointRestoration( func CheckSharePointRestoration(
ctx context.Context, ctx context.Context,
client *msgraphsdk.GraphServiceClient, ac api.Client,
siteID, userID, folderName, dataFolder string, envs common.Envs,
startTime time.Time,
) { ) {
drive, err := client. drive, err := ac.Sites().GetDefaultDrive(ctx, envs.SiteID)
Sites().
BySiteId(siteID).
Drive().
Get(ctx, nil)
if err != nil { if err != nil {
common.Fatal(ctx, "getting the drive:", err) common.Fatal(ctx, "getting the drive:", err)
} }
checkDriveRestoration( checkDriveRestoration(
ctx, ctx,
client, ac,
path.SharePointService, path.SharePointService,
folderName, envs.FolderName,
ptr.Val(drive.GetId()), ptr.Val(drive.GetId()),
ptr.Val(drive.GetName()), ptr.Val(drive.GetName()),
dataFolder, envs.DataFolder,
startTime, envs.StartTime,
true) true)
} }

View File

@ -2,21 +2,40 @@ package main
import ( import (
"context" "context"
"fmt"
"os" "os"
"strings"
"time"
"github.com/alcionai/clues" "github.com/alcionai/clues"
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go" "github.com/spf13/cobra"
"github.com/alcionai/corso/src/cli/print"
"github.com/alcionai/corso/src/cmd/sanity_test/common" "github.com/alcionai/corso/src/cmd/sanity_test/common"
"github.com/alcionai/corso/src/cmd/sanity_test/export" "github.com/alcionai/corso/src/cmd/sanity_test/export"
"github.com/alcionai/corso/src/cmd/sanity_test/restore" "github.com/alcionai/corso/src/cmd/sanity_test/restore"
"github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/m365/graph"
"github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/logger"
) )
// ---------------------------------------------------------------------------
// root command
// ---------------------------------------------------------------------------
func rootCMD() *cobra.Command {
return &cobra.Command{
Use: "sanity-test",
Short: "run the sanity tests",
DisableAutoGenTag: true,
RunE: sanityTestRoot,
PersistentPreRun: func(cmd *cobra.Command, args []string) {
fmt.Println("running", cmd.UseLine())
},
}
}
func sanityTestRoot(cmd *cobra.Command, args []string) error {
return print.Only(cmd.Context(), clues.New("must specify a kind of test"))
}
func main() { func main() {
ls := logger.Settings{ ls := logger.Settings{
File: logger.GetLogFile(""), File: logger.GetLogFile(""),
@ -29,60 +48,226 @@ func main() {
_ = log.Sync() // flush all logs in the buffer _ = log.Sync() // flush all logs in the buffer
}() }()
// TODO: only needed for exchange
graph.InitializeConcurrencyLimiter(ctx, true, 4) graph.InitializeConcurrencyLimiter(ctx, true, 4)
adapter, err := graph.CreateAdapter( root := rootCMD()
tconfig.GetM365TenantID(ctx),
os.Getenv("AZURE_CLIENT_ID"),
os.Getenv("AZURE_CLIENT_SECRET"))
if err != nil {
common.Fatal(ctx, "creating adapter", err)
}
var ( restCMD := restoreCMD()
client = msgraphsdk.NewGraphServiceClient(adapter)
testUser = tconfig.GetM365UserID(ctx)
testSite = tconfig.GetM365SiteID(ctx)
testKind = os.Getenv("SANITY_TEST_KIND") // restore or export (cli arg?)
testService = os.Getenv("SANITY_TEST_SERVICE")
folder = strings.TrimSpace(os.Getenv("SANITY_TEST_FOLDER"))
dataFolder = os.Getenv("TEST_DATA")
baseBackupFolder = os.Getenv("BASE_BACKUP")
)
ctx = clues.Add( restCMD.AddCommand(restoreExchangeCMD())
ctx, restCMD.AddCommand(restoreOneDriveCMD())
"resource_owner", testUser, restCMD.AddCommand(restoreSharePointCMD())
"service", testService, restCMD.AddCommand(restoreGroupsCMD())
"sanity_restore_folder", folder) root.AddCommand(restCMD)
logger.Ctx(ctx).Info("starting sanity test check") expCMD := exportCMD()
switch testKind { expCMD.AddCommand(exportOneDriveCMD())
case "restore": expCMD.AddCommand(exportSharePointCMD())
startTime, _ := common.MustGetTimeFromName(ctx, folder) expCMD.AddCommand(exportGroupsCMD())
clues.Add(ctx, "sanity_restore_start_time", startTime.Format(time.RFC3339)) root.AddCommand(expCMD)
switch testService { if err := root.Execute(); err != nil {
case "exchange": os.Exit(1)
restore.CheckEmailRestoration(ctx, client, testUser, folder, dataFolder, baseBackupFolder, startTime)
case "onedrive":
restore.CheckOneDriveRestoration(ctx, client, testUser, folder, dataFolder, startTime)
case "sharepoint":
restore.CheckSharePointRestoration(ctx, client, testSite, testUser, folder, dataFolder, startTime)
default:
common.Fatal(ctx, "unknown service for restore sanity tests", nil)
}
case "export":
switch testService {
case "onedrive":
export.CheckOneDriveExport(ctx, client, testUser, folder, dataFolder)
case "sharepoint":
export.CheckSharePointExport(ctx, client, testSite, folder, dataFolder)
default:
common.Fatal(ctx, "unknown service for export sanity tests", nil)
}
default:
common.Fatal(ctx, "unknown test kind (expected restore or export)", nil)
} }
} }
// ---------------------------------------------------------------------------
// restore/export command
// ---------------------------------------------------------------------------
func exportCMD() *cobra.Command {
return &cobra.Command{
Use: "restore",
Short: "run the post-export sanity tests",
DisableAutoGenTag: true,
RunE: sanityTestExport,
}
}
func sanityTestExport(cmd *cobra.Command, args []string) error {
return print.Only(cmd.Context(), clues.New("must specify a service"))
}
func restoreCMD() *cobra.Command {
return &cobra.Command{
Use: "restore",
Short: "run the post-restore sanity tests",
DisableAutoGenTag: true,
RunE: sanityTestRestore,
}
}
func sanityTestRestore(cmd *cobra.Command, args []string) error {
return print.Only(cmd.Context(), clues.New("must specify a service"))
}
// ---------------------------------------------------------------------------
// service commands - export
// ---------------------------------------------------------------------------
func exportGroupsCMD() *cobra.Command {
return &cobra.Command{
Use: "groups",
Short: "run the groups export sanity tests",
DisableAutoGenTag: true,
RunE: sanityTestExportGroups,
}
}
func sanityTestExportGroups(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
envs := common.EnvVars(ctx)
ac, err := common.GetAC()
if err != nil {
return print.Only(ctx, err)
}
export.CheckGroupsExport(ctx, ac, envs)
return nil
}
func exportOneDriveCMD() *cobra.Command {
return &cobra.Command{
Use: "onedrive",
Short: "run the onedrive export sanity tests",
DisableAutoGenTag: true,
RunE: sanityTestExportOneDrive,
}
}
func sanityTestExportOneDrive(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
envs := common.EnvVars(ctx)
ac, err := common.GetAC()
if err != nil {
return print.Only(ctx, err)
}
export.CheckOneDriveExport(ctx, ac, envs)
return nil
}
func exportSharePointCMD() *cobra.Command {
return &cobra.Command{
Use: "sharepoint",
Short: "run the sharepoint export sanity tests",
DisableAutoGenTag: true,
RunE: sanityTestExportSharePoint,
}
}
func sanityTestExportSharePoint(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
envs := common.EnvVars(ctx)
ac, err := common.GetAC()
if err != nil {
return print.Only(ctx, err)
}
export.CheckSharePointExport(ctx, ac, envs)
return nil
}
// ---------------------------------------------------------------------------
// service commands - restore
// ---------------------------------------------------------------------------
func restoreExchangeCMD() *cobra.Command {
return &cobra.Command{
Use: "exchange",
Short: "run the exchange restore sanity tests",
DisableAutoGenTag: true,
RunE: sanityTestRestoreExchange,
}
}
func sanityTestRestoreExchange(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
envs := common.EnvVars(ctx)
ac, err := common.GetAC()
if err != nil {
return print.Only(ctx, err)
}
restore.CheckEmailRestoration(ctx, ac, envs)
return nil
}
func restoreOneDriveCMD() *cobra.Command {
return &cobra.Command{
Use: "onedrive",
Short: "run the onedrive restore sanity tests",
DisableAutoGenTag: true,
RunE: sanityTestRestoreOneDrive,
}
}
func sanityTestRestoreOneDrive(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
envs := common.EnvVars(ctx)
ac, err := common.GetAC()
if err != nil {
return print.Only(ctx, err)
}
restore.CheckOneDriveRestoration(ctx, ac, envs)
return nil
}
func restoreSharePointCMD() *cobra.Command {
return &cobra.Command{
Use: "sharepoint",
Short: "run the sharepoint restore sanity tests",
DisableAutoGenTag: true,
RunE: sanityTestRestoreSharePoint,
}
}
func sanityTestRestoreSharePoint(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
envs := common.EnvVars(ctx)
ac, err := common.GetAC()
if err != nil {
return print.Only(ctx, err)
}
restore.CheckSharePointRestoration(ctx, ac, envs)
return nil
}
func restoreGroupsCMD() *cobra.Command {
return &cobra.Command{
Use: "groups",
Short: "run the groups restore sanity tests",
DisableAutoGenTag: true,
RunE: sanityTestRestoreGroups,
}
}
func sanityTestRestoreGroups(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
envs := common.EnvVars(ctx)
ac, err := common.GetAC()
if err != nil {
return print.Only(ctx, err)
}
restore.CheckGroupsRestoration(ctx, ac, envs)
return nil
}

View File

@ -8,7 +8,7 @@ require (
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1
github.com/alcionai/clues v0.0.0-20230920212840-728ac1a1d8b8 github.com/alcionai/clues v0.0.0-20230920212840-728ac1a1d8b8
github.com/armon/go-metrics v0.4.1 github.com/armon/go-metrics v0.4.1
github.com/aws/aws-xray-sdk-go v1.8.1 github.com/aws/aws-xray-sdk-go v1.8.2
github.com/cenkalti/backoff/v4 v4.2.1 github.com/cenkalti/backoff/v4 v4.2.1
github.com/google/uuid v1.3.1 github.com/google/uuid v1.3.1
github.com/h2non/gock v1.2.0 github.com/h2non/gock v1.2.0

View File

@ -71,8 +71,8 @@ github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJ
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/aws/aws-sdk-go v1.45.0 h1:qoVOQHuLacxJMO71T49KeE70zm+Tk3vtrl7XO4VUPZc= github.com/aws/aws-sdk-go v1.45.0 h1:qoVOQHuLacxJMO71T49KeE70zm+Tk3vtrl7XO4VUPZc=
github.com/aws/aws-sdk-go v1.45.0/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go v1.45.0/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo= github.com/aws/aws-xray-sdk-go v1.8.2 h1:PVxNWnQG+rAYjxsmhEN97DTO57Dipg6VS0wsu6bXUB0=
github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A= github.com/aws/aws-xray-sdk-go v1.8.2/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=

View File

@ -0,0 +1,187 @@
package readers
import (
"bytes"
"encoding/binary"
"io"
"os"
"unsafe"
"github.com/alcionai/clues"
)
// persistedSerializationVersion is the size of the serialization version in
// storage.
//
// The current on-disk format of this field is written in big endian. The
// highest bit denotes if the item is empty because it was deleted between the
// time we told the storage about it and when we needed to get data for it. The
// lowest two bytes are the version number. All other bits are reserved for
// future use.
//
// MSB 31 30 16 8 0 LSB
// +----------+----+---------+--------+-------+
// | del flag | reserved | version number |
// +----------+----+---------+--------+-------+
type persistedSerializationVersion = uint32
// SerializationVersion is the in-memory size of the version number that gets
// added to the persisted serialization version.
//
// Right now it's only a uint16 but we can expand it to be larger so long as the
// expanded size doesn't clash with the flags in the high-order bits.
type SerializationVersion uint16
// DefaultSerializationVersion is the current (default) version number for all
// services. As services evolve their storage format they should begin tracking
// their own version numbers separate from other services.
const DefaultSerializationVersion SerializationVersion = 1
const (
VersionFormatSize = int(unsafe.Sizeof(persistedSerializationVersion(0)))
delInFlightMask persistedSerializationVersion = 1 << ((VersionFormatSize * 8) - 1)
)
// SerializationFormat is a struct describing serialization format versions and
// flags to add for this item.
type SerializationFormat struct {
Version SerializationVersion
DelInFlight bool
}
// NewVersionedBackupReader creates a reader that injects format into the first
// bytes of the returned data. After format has been returned, data is returned
// from baseReaders in the order they're passed in.
func NewVersionedBackupReader(
format SerializationFormat,
baseReaders ...io.ReadCloser,
) (io.ReadCloser, error) {
if format.DelInFlight && len(baseReaders) > 0 {
// This is a conservative check, but we can always loosen it later on if
// needed. At the moment we really don't expect any data if the item was
// deleted.
return nil, clues.New("item marked deleted but has reader(s)")
}
formattedVersion := persistedSerializationVersion(format.Version)
if format.DelInFlight {
formattedVersion |= delInFlightMask
}
formattedBuf := make([]byte, VersionFormatSize)
binary.BigEndian.PutUint32(formattedBuf, formattedVersion)
versionReader := io.NopCloser(bytes.NewReader(formattedBuf))
// Need to add readers individually because types differ.
allReaders := make([]io.Reader, 0, len(baseReaders)+1)
allReaders = append(allReaders, versionReader)
for _, r := range baseReaders {
allReaders = append(allReaders, r)
}
res := &versionedBackupReader{
baseReaders: append([]io.ReadCloser{versionReader}, baseReaders...),
combined: io.MultiReader(allReaders...),
}
return res, nil
}
type versionedBackupReader struct {
// baseReaders is a reference to the original readers so we can close them.
baseReaders []io.ReadCloser
// combined is the reader that will return all data.
combined io.Reader
}
func (vbr *versionedBackupReader) Read(p []byte) (int, error) {
if vbr.combined == nil {
return 0, os.ErrClosed
}
n, err := vbr.combined.Read(p)
if err == io.EOF {
// Golang doesn't allow wrapping of EOF. If we wrap it other things start
// thinking it's an actual error.
return n, err
}
return n, clues.Stack(err).OrNil()
}
func (vbr *versionedBackupReader) Close() error {
if vbr.combined == nil {
return nil
}
vbr.combined = nil
var errs *clues.Err
for i, r := range vbr.baseReaders {
if err := r.Close(); err != nil {
errs = clues.Stack(
errs,
clues.Wrap(err, "closing reader").With("reader_index", i))
}
}
vbr.baseReaders = nil
return errs.OrNil()
}
// NewVersionedRestoreReader wraps baseReader and provides easy access to the
// SerializationFormat info in the first bytes of the data contained in
// baseReader.
func NewVersionedRestoreReader(
baseReader io.ReadCloser,
) (*VersionedRestoreReader, error) {
versionBuf := make([]byte, VersionFormatSize)
// Loop to account for the unlikely case where we get a short read.
for read := 0; read < VersionFormatSize; {
n, err := baseReader.Read(versionBuf[read:])
if err != nil {
return nil, clues.Wrap(err, "reading serialization version")
}
read += n
}
formattedVersion := binary.BigEndian.Uint32(versionBuf)
return &VersionedRestoreReader{
baseReader: baseReader,
format: SerializationFormat{
Version: SerializationVersion(formattedVersion),
DelInFlight: (formattedVersion & delInFlightMask) != 0,
},
}, nil
}
type VersionedRestoreReader struct {
baseReader io.ReadCloser
format SerializationFormat
}
func (vrr *VersionedRestoreReader) Read(p []byte) (int, error) {
n, err := vrr.baseReader.Read(p)
if err == io.EOF {
// Golang doesn't allow wrapping of EOF. If we wrap it other things start
// thinking it's an actual error.
return n, err
}
return n, clues.Stack(err).OrNil()
}
func (vrr *VersionedRestoreReader) Close() error {
return clues.Stack(vrr.baseReader.Close()).OrNil()
}
func (vrr VersionedRestoreReader) Format() SerializationFormat {
return vrr.format
}

View File

@ -0,0 +1,362 @@
package readers_test
import (
"bytes"
"io"
"testing"
"github.com/alcionai/clues"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"golang.org/x/exp/slices"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/tester"
)
type shortReader struct {
maxReadLen int
io.ReadCloser
}
func (s *shortReader) Read(p []byte) (int, error) {
toRead := s.maxReadLen
if len(p) < toRead {
toRead = len(p)
}
return s.ReadCloser.Read(p[:toRead])
}
type SerializationReaderUnitSuite struct {
tester.Suite
}
func TestSerializationReaderUnitSuite(t *testing.T) {
suite.Run(t, &SerializationReaderUnitSuite{Suite: tester.NewUnitSuite(t)})
}
func (suite *SerializationReaderUnitSuite) TestBackupSerializationReader() {
baseData := []byte("hello world")
table := []struct {
name string
format readers.SerializationFormat
inputReaders []io.ReadCloser
expectErr require.ErrorAssertionFunc
expectData []byte
}{
{
name: "DeletedInFlight NoVersion NoReaders",
format: readers.SerializationFormat{
DelInFlight: true,
},
expectErr: require.NoError,
expectData: []byte{0x80, 0x0, 0x0, 0x0},
},
{
name: "DeletedInFlight NoReaders",
format: readers.SerializationFormat{
Version: 42,
DelInFlight: true,
},
expectErr: require.NoError,
expectData: []byte{0x80, 0x0, 0x0, 42},
},
{
name: "NoVersion NoReaders",
expectErr: require.NoError,
expectData: []byte{0x00, 0x0, 0x0, 0x0},
},
{
name: "NoReaders",
format: readers.SerializationFormat{
Version: 42,
},
expectErr: require.NoError,
expectData: []byte{0x00, 0x0, 0x0, 42},
},
{
name: "SingleReader",
format: readers.SerializationFormat{
Version: 42,
},
inputReaders: []io.ReadCloser{io.NopCloser(bytes.NewReader(baseData))},
expectErr: require.NoError,
expectData: append([]byte{0x00, 0x0, 0x0, 42}, baseData...),
},
{
name: "MultipleReaders",
format: readers.SerializationFormat{
Version: 42,
},
inputReaders: []io.ReadCloser{
io.NopCloser(bytes.NewReader(baseData)),
io.NopCloser(bytes.NewReader(baseData)),
},
expectErr: require.NoError,
expectData: append(
append([]byte{0x00, 0x0, 0x0, 42}, baseData...),
baseData...),
},
// Uncomment if we expand the version to 32 bits.
//{
// name: "VersionWithHighBitSet NoReaders Errors",
// format: readers.SerializationFormat{
// Version: 0x80000000,
// },
// expectErr: require.Error,
//},
{
name: "DeletedInFlight SingleReader Errors",
format: readers.SerializationFormat{
DelInFlight: true,
},
inputReaders: []io.ReadCloser{io.NopCloser(bytes.NewReader(baseData))},
expectErr: require.Error,
},
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
r, err := readers.NewVersionedBackupReader(
test.format,
test.inputReaders...)
test.expectErr(t, err, "getting backup reader: %v", clues.ToCore(err))
if err != nil {
return
}
defer func() {
err := r.Close()
assert.NoError(t, err, "closing reader: %v", clues.ToCore(err))
}()
buf, err := io.ReadAll(r)
require.NoError(
t,
err,
"reading serialized data: %v",
clues.ToCore(err))
// Need to use equal because output is order-sensitive.
assert.Equal(t, test.expectData, buf, "serialized data")
})
}
}
func (suite *SerializationReaderUnitSuite) TestBackupSerializationReader_ShortReads() {
t := suite.T()
baseData := []byte("hello world")
expectData := append(
append([]byte{0x00, 0x0, 0x0, 42}, baseData...),
baseData...)
r, err := readers.NewVersionedBackupReader(
readers.SerializationFormat{Version: 42},
io.NopCloser(bytes.NewReader(baseData)),
io.NopCloser(bytes.NewReader(baseData)))
require.NoError(t, err, "getting backup reader: %v", clues.ToCore(err))
defer func() {
err := r.Close()
assert.NoError(t, err, "closing reader: %v", clues.ToCore(err))
}()
buf := make([]byte, len(expectData))
r = &shortReader{
maxReadLen: 3,
ReadCloser: r,
}
for read := 0; ; {
n, err := r.Read(buf[read:])
read += n
if read >= len(buf) {
break
}
require.NoError(t, err, "reading data: %v", clues.ToCore(err))
}
// Need to use equal because output is order-sensitive.
assert.Equal(t, expectData, buf, "serialized data")
}
// TestRestoreSerializationReader checks that we can read previously serialized
// data. For simplicity, it uses the versionedBackupReader to generate the
// input. This should be relatively safe because the tests for
// versionedBackupReader do compare directly against serialized data.
func (suite *SerializationReaderUnitSuite) TestRestoreSerializationReader() {
baseData := []byte("hello world")
table := []struct {
name string
inputReader func(*testing.T) io.ReadCloser
expectErr require.ErrorAssertionFunc
expectVersion readers.SerializationVersion
expectDelInFlight bool
expectData []byte
}{
{
name: "NoVersion NoReaders",
inputReader: func(t *testing.T) io.ReadCloser {
r, err := readers.NewVersionedBackupReader(readers.SerializationFormat{})
require.NoError(t, err, "making reader: %v", clues.ToCore(err))
return r
},
expectErr: require.NoError,
expectData: []byte{},
},
{
name: "DeletedInFlight NoReaders",
inputReader: func(t *testing.T) io.ReadCloser {
r, err := readers.NewVersionedBackupReader(
readers.SerializationFormat{
Version: 42,
DelInFlight: true,
})
require.NoError(t, err, "making reader: %v", clues.ToCore(err))
return r
},
expectErr: require.NoError,
expectVersion: 42,
expectDelInFlight: true,
expectData: []byte{},
},
{
name: "DeletedInFlight SingleReader",
inputReader: func(t *testing.T) io.ReadCloser {
// Need to specify the bytes manually because the backup reader won't
// allow creating something with the deleted flag and data.
return io.NopCloser(bytes.NewReader(append(
[]byte{0x80, 0x0, 0x0, 42},
baseData...)))
},
expectErr: require.NoError,
expectVersion: 42,
expectDelInFlight: true,
expectData: baseData,
},
{
name: "NoVersion SingleReader",
inputReader: func(t *testing.T) io.ReadCloser {
r, err := readers.NewVersionedBackupReader(
readers.SerializationFormat{},
io.NopCloser(bytes.NewReader(baseData)))
require.NoError(t, err, "making reader: %v", clues.ToCore(err))
return r
},
expectErr: require.NoError,
expectData: baseData,
},
{
name: "SingleReader",
inputReader: func(t *testing.T) io.ReadCloser {
r, err := readers.NewVersionedBackupReader(
readers.SerializationFormat{Version: 42},
io.NopCloser(bytes.NewReader(baseData)))
require.NoError(t, err, "making reader: %v", clues.ToCore(err))
return r
},
expectErr: require.NoError,
expectVersion: 42,
expectData: baseData,
},
{
name: "ShortReads SingleReader",
inputReader: func(t *testing.T) io.ReadCloser {
r, err := readers.NewVersionedBackupReader(
readers.SerializationFormat{Version: 42},
io.NopCloser(bytes.NewReader(baseData)))
require.NoError(t, err, "making reader: %v", clues.ToCore(err))
r = &shortReader{
maxReadLen: 3,
ReadCloser: r,
}
return r
},
expectErr: require.NoError,
expectVersion: 42,
expectData: baseData,
},
{
name: "MultipleReaders",
inputReader: func(t *testing.T) io.ReadCloser {
r, err := readers.NewVersionedBackupReader(
readers.SerializationFormat{Version: 42},
io.NopCloser(bytes.NewReader(baseData)),
io.NopCloser(bytes.NewReader(baseData)))
require.NoError(t, err, "making reader: %v", clues.ToCore(err))
return r
},
expectErr: require.NoError,
expectVersion: 42,
expectData: append(slices.Clone(baseData), baseData...),
},
{
name: "EmptyReader Errors",
inputReader: func(t *testing.T) io.ReadCloser {
return io.NopCloser(bytes.NewReader([]byte{}))
},
expectErr: require.Error,
},
{
name: "TruncatedVersion Errors",
inputReader: func(t *testing.T) io.ReadCloser {
return io.NopCloser(bytes.NewReader([]byte{0x80, 0x0}))
},
expectErr: require.Error,
},
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
r, err := readers.NewVersionedRestoreReader(test.inputReader(t))
test.expectErr(t, err, "getting restore reader: %v", clues.ToCore(err))
if err != nil {
return
}
defer func() {
err := r.Close()
assert.NoError(t, err, "closing reader: %v", clues.ToCore(err))
}()
assert.Equal(
t,
test.expectVersion,
r.Format().Version,
"version")
assert.Equal(
t,
test.expectDelInFlight,
r.Format().DelInFlight,
"deleted in flight")
buf, err := io.ReadAll(r)
require.NoError(t, err, "reading serialized data: %v", clues.ToCore(err))
// Need to use equal because output is order-sensitive.
assert.Equal(t, test.expectData, buf, "serialized data")
})
}
}

View File

@ -1,30 +1,38 @@
package data package data
import ( import (
"bytes"
"context" "context"
"io" "io"
"sync"
"time" "time"
"github.com/alcionai/clues" "github.com/alcionai/clues"
"github.com/spatialcurrent/go-lazy/pkg/lazy" "github.com/spatialcurrent/go-lazy/pkg/lazy"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/logger"
) )
var ( var (
_ Item = &unindexedPrefetchedItem{}
_ ItemModTime = &unindexedPrefetchedItem{}
_ Item = &prefetchedItem{} _ Item = &prefetchedItem{}
_ ItemInfo = &prefetchedItem{} _ ItemInfo = &prefetchedItem{}
_ ItemModTime = &prefetchedItem{} _ ItemModTime = &prefetchedItem{}
_ Item = &unindexedLazyItem{}
_ ItemModTime = &unindexedLazyItem{}
_ Item = &lazyItem{} _ Item = &lazyItem{}
_ ItemInfo = &lazyItem{} _ ItemInfo = &lazyItem{}
_ ItemModTime = &lazyItem{} _ ItemModTime = &lazyItem{}
) )
func NewDeletedItem(itemID string) Item { func NewDeletedItem(itemID string) Item {
return &prefetchedItem{ return &unindexedPrefetchedItem{
id: itemID, id: itemID,
deleted: true, deleted: true,
// TODO(ashmrtn): This really doesn't need to be set since deleted items are // TODO(ashmrtn): This really doesn't need to be set since deleted items are
@ -34,24 +42,33 @@ func NewDeletedItem(itemID string) Item {
} }
} }
func NewPrefetchedItem( func NewUnindexedPrefetchedItem(
reader io.ReadCloser, reader io.ReadCloser,
itemID string, itemID string,
info details.ItemInfo, modTime time.Time,
) Item { ) (*unindexedPrefetchedItem, error) {
return &prefetchedItem{ r, err := readers.NewVersionedBackupReader(
id: itemID, readers.SerializationFormat{Version: readers.DefaultSerializationVersion},
reader: reader, reader)
info: info, if err != nil {
modTime: info.Modified(), return nil, clues.Stack(err)
} }
return &unindexedPrefetchedItem{
id: itemID,
reader: r,
modTime: modTime,
}, nil
} }
// prefetchedItem represents a single item retrieved from the remote service. // unindexedPrefetchedItem represents a single item retrieved from the remote
type prefetchedItem struct { // service.
//
// This item doesn't implement ItemInfo so it's safe to use for items like
// metadata that shouldn't appear in backup details.
type unindexedPrefetchedItem struct {
id string id string
reader io.ReadCloser reader io.ReadCloser
info details.ItemInfo
// modTime is the modified time of the item. It should match the modTime in // modTime is the modified time of the item. It should match the modTime in
// info if info is present. Here as a separate field so that deleted items // info if info is present. Here as a separate field so that deleted items
// don't error out by trying to source it from info. // don't error out by trying to source it from info.
@ -62,26 +79,51 @@ type prefetchedItem struct {
deleted bool deleted bool
} }
func (i prefetchedItem) ID() string { func (i unindexedPrefetchedItem) ID() string {
return i.id return i.id
} }
func (i *prefetchedItem) ToReader() io.ReadCloser { func (i *unindexedPrefetchedItem) ToReader() io.ReadCloser {
return i.reader return i.reader
} }
func (i prefetchedItem) Deleted() bool { func (i unindexedPrefetchedItem) Deleted() bool {
return i.deleted return i.deleted
} }
func (i unindexedPrefetchedItem) ModTime() time.Time {
return i.modTime
}
func NewPrefetchedItem(
reader io.ReadCloser,
itemID string,
info details.ItemInfo,
) (*prefetchedItem, error) {
inner, err := NewUnindexedPrefetchedItem(reader, itemID, info.Modified())
if err != nil {
return nil, clues.Stack(err)
}
return &prefetchedItem{
unindexedPrefetchedItem: inner,
info: info,
}, nil
}
// prefetchedItem represents a single item retrieved from the remote service.
//
// This item implements ItemInfo so it should be used for things that need to
// appear in backup details.
type prefetchedItem struct {
*unindexedPrefetchedItem
info details.ItemInfo
}
func (i prefetchedItem) Info() (details.ItemInfo, error) { func (i prefetchedItem) Info() (details.ItemInfo, error) {
return i.info, nil return i.info, nil
} }
func (i prefetchedItem) ModTime() time.Time {
return i.modTime
}
type ItemDataGetter interface { type ItemDataGetter interface {
GetData( GetData(
context.Context, context.Context,
@ -89,14 +131,14 @@ type ItemDataGetter interface {
) (io.ReadCloser, *details.ItemInfo, bool, error) ) (io.ReadCloser, *details.ItemInfo, bool, error)
} }
func NewLazyItem( func NewUnindexedLazyItem(
ctx context.Context, ctx context.Context,
itemGetter ItemDataGetter, itemGetter ItemDataGetter,
itemID string, itemID string,
modTime time.Time, modTime time.Time,
errs *fault.Bus, errs *fault.Bus,
) Item { ) *unindexedLazyItem {
return &lazyItem{ return &unindexedLazyItem{
ctx: ctx, ctx: ctx,
id: itemID, id: itemID,
itemGetter: itemGetter, itemGetter: itemGetter,
@ -105,11 +147,15 @@ func NewLazyItem(
} }
} }
// lazyItem represents a single item retrieved from the remote service. It // unindexedLazyItem represents a single item retrieved from the remote service.
// lazily fetches the item's data when the first call to ToReader().Read() is // It lazily fetches the item's data when the first call to ToReader().Read() is
// made. // made.
type lazyItem struct { //
// This item doesn't implement ItemInfo so it's safe to use for items like
// metadata that shouldn't appear in backup details.
type unindexedLazyItem struct {
ctx context.Context ctx context.Context
mu sync.Mutex
id string id string
errs *fault.Bus errs *fault.Bus
itemGetter ItemDataGetter itemGetter ItemDataGetter
@ -127,17 +173,27 @@ type lazyItem struct {
delInFlight bool delInFlight bool
} }
func (i lazyItem) ID() string { func (i *unindexedLazyItem) ID() string {
return i.id return i.id
} }
func (i *lazyItem) ToReader() io.ReadCloser { func (i *unindexedLazyItem) ToReader() io.ReadCloser {
return lazy.NewLazyReadCloser(func() (io.ReadCloser, error) { return lazy.NewLazyReadCloser(func() (io.ReadCloser, error) {
// Don't allow getting Item info while trying to initialize said info.
// GetData could be a long running call, but in theory nothing should happen
// with the item until a reader is returned anyway.
i.mu.Lock()
defer i.mu.Unlock()
reader, info, delInFlight, err := i.itemGetter.GetData(i.ctx, i.errs) reader, info, delInFlight, err := i.itemGetter.GetData(i.ctx, i.errs)
if err != nil { if err != nil {
return nil, clues.Stack(err) return nil, clues.Stack(err)
} }
format := readers.SerializationFormat{
Version: readers.DefaultSerializationVersion,
}
// If an item was deleted then return an empty file so we don't fail the // If an item was deleted then return an empty file so we don't fail the
// backup and return a sentinel error when asked for ItemInfo so we don't // backup and return a sentinel error when asked for ItemInfo so we don't
// display the item in the backup. // display the item in the backup.
@ -149,21 +205,59 @@ func (i *lazyItem) ToReader() io.ReadCloser {
logger.Ctx(i.ctx).Info("item not found") logger.Ctx(i.ctx).Info("item not found")
i.delInFlight = true i.delInFlight = true
format.DelInFlight = true
r, err := readers.NewVersionedBackupReader(format)
return io.NopCloser(bytes.NewReader([]byte{})), nil return r, clues.Stack(err).OrNil()
} }
i.info = info i.info = info
return reader, nil r, err := readers.NewVersionedBackupReader(format, reader)
return r, clues.Stack(err).OrNil()
}) })
} }
func (i lazyItem) Deleted() bool { func (i *unindexedLazyItem) Deleted() bool {
return false return false
} }
func (i lazyItem) Info() (details.ItemInfo, error) { func (i *unindexedLazyItem) ModTime() time.Time {
return i.modTime
}
func NewLazyItem(
ctx context.Context,
itemGetter ItemDataGetter,
itemID string,
modTime time.Time,
errs *fault.Bus,
) *lazyItem {
return &lazyItem{
unindexedLazyItem: NewUnindexedLazyItem(
ctx,
itemGetter,
itemID,
modTime,
errs),
}
}
// lazyItem represents a single item retrieved from the remote service. It
// lazily fetches the item's data when the first call to ToReader().Read() is
// made.
//
// This item implements ItemInfo so it should be used for things that need to
// appear in backup details.
type lazyItem struct {
*unindexedLazyItem
}
func (i *lazyItem) Info() (details.ItemInfo, error) {
i.mu.Lock()
defer i.mu.Unlock()
if i.delInFlight { if i.delInFlight {
return details.ItemInfo{}, clues.Stack(ErrNotFound).WithClues(i.ctx) return details.ItemInfo{}, clues.Stack(ErrNotFound).WithClues(i.ctx)
} else if i.info == nil { } else if i.info == nil {
@ -173,7 +267,3 @@ func (i lazyItem) Info() (details.ItemInfo, error) {
return *i.info, nil return *i.info, nil
} }
func (i lazyItem) ModTime() time.Time {
return i.modTime
}

View File

@ -12,6 +12,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/backup/details"
@ -49,6 +50,38 @@ func TestItemUnitSuite(t *testing.T) {
suite.Run(t, &ItemUnitSuite{Suite: tester.NewUnitSuite(t)}) suite.Run(t, &ItemUnitSuite{Suite: tester.NewUnitSuite(t)})
} }
func (suite *ItemUnitSuite) TestUnindexedPrefetchedItem() {
prefetch, err := data.NewUnindexedPrefetchedItem(
io.NopCloser(bytes.NewReader([]byte{})),
"foo",
time.Time{})
require.NoError(suite.T(), err, clues.ToCore(err))
var item data.Item = prefetch
_, ok := item.(data.ItemInfo)
assert.False(suite.T(), ok, "unindexedPrefetchedItem implements Info()")
}
func (suite *ItemUnitSuite) TestUnindexedLazyItem() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
lazy := data.NewUnindexedLazyItem(
ctx,
nil,
"foo",
time.Time{},
fault.New(true))
var item data.Item = lazy
_, ok := item.(data.ItemInfo)
assert.False(t, ok, "unindexedLazyItem implements Info()")
}
func (suite *ItemUnitSuite) TestDeletedItem() { func (suite *ItemUnitSuite) TestDeletedItem() {
var ( var (
t = suite.T() t = suite.T()
@ -115,18 +148,29 @@ func (suite *ItemUnitSuite) TestPrefetchedItem() {
suite.Run(test.name, func() { suite.Run(test.name, func() {
t := suite.T() t := suite.T()
item := data.NewPrefetchedItem(test.reader, id, test.info) item, err := data.NewPrefetchedItem(test.reader, id, test.info)
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, id, item.ID(), "ID") assert.Equal(t, id, item.ID(), "ID")
assert.False(t, item.Deleted(), "deleted") assert.False(t, item.Deleted(), "deleted")
assert.Equal( assert.Equal(
t, t,
test.info.Modified(), test.info.Modified(),
item.(data.ItemModTime).ModTime(), item.ModTime(),
"mod time") "mod time")
readData, err := io.ReadAll(item.ToReader()) r, err := readers.NewVersionedRestoreReader(item.ToReader())
test.readErr(t, err, clues.ToCore(err), "read error") require.NoError(t, err, "version error: %v", clues.ToCore(err))
if err != nil {
return
}
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
assert.False(t, r.Format().DelInFlight)
readData, err := io.ReadAll(r)
test.readErr(t, err, "read error: %v", clues.ToCore(err))
assert.Equal(t, test.expectData, readData, "read data") assert.Equal(t, test.expectData, readData, "read data")
}) })
} }
@ -169,6 +213,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
table := []struct { table := []struct {
name string name string
mid *mockItemDataGetter mid *mockItemDataGetter
versionErr assert.ErrorAssertionFunc
readErr assert.ErrorAssertionFunc readErr assert.ErrorAssertionFunc
infoErr assert.ErrorAssertionFunc infoErr assert.ErrorAssertionFunc
expectData []byte expectData []byte
@ -180,6 +225,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
reader: io.NopCloser(bytes.NewReader([]byte{})), reader: io.NopCloser(bytes.NewReader([]byte{})),
info: &details.ItemInfo{Exchange: &details.ExchangeInfo{Modified: now}}, info: &details.ItemInfo{Exchange: &details.ExchangeInfo{Modified: now}},
}, },
versionErr: assert.NoError,
readErr: assert.NoError, readErr: assert.NoError,
infoErr: assert.NoError, infoErr: assert.NoError,
expectData: []byte{}, expectData: []byte{},
@ -190,6 +236,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
reader: io.NopCloser(bytes.NewReader(baseData)), reader: io.NopCloser(bytes.NewReader(baseData)),
info: &details.ItemInfo{Exchange: &details.ExchangeInfo{Modified: now}}, info: &details.ItemInfo{Exchange: &details.ExchangeInfo{Modified: now}},
}, },
versionErr: assert.NoError,
readErr: assert.NoError, readErr: assert.NoError,
infoErr: assert.NoError, infoErr: assert.NoError,
expectData: baseData, expectData: baseData,
@ -200,6 +247,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
reader: io.NopCloser(bytes.NewReader(baseData)), reader: io.NopCloser(bytes.NewReader(baseData)),
info: &details.ItemInfo{OneDrive: &details.OneDriveInfo{Modified: now}}, info: &details.ItemInfo{OneDrive: &details.OneDriveInfo{Modified: now}},
}, },
versionErr: assert.NoError,
readErr: assert.NoError, readErr: assert.NoError,
infoErr: assert.NoError, infoErr: assert.NoError,
expectData: baseData, expectData: baseData,
@ -209,6 +257,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
mid: &mockItemDataGetter{ mid: &mockItemDataGetter{
err: assert.AnError, err: assert.AnError,
}, },
versionErr: assert.Error,
readErr: assert.Error, readErr: assert.Error,
infoErr: assert.Error, infoErr: assert.Error,
expectData: []byte{}, expectData: []byte{},
@ -224,6 +273,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
}, },
info: &details.ItemInfo{OneDrive: &details.OneDriveInfo{Modified: now}}, info: &details.ItemInfo{OneDrive: &details.OneDriveInfo{Modified: now}},
}, },
versionErr: assert.NoError,
readErr: assert.Error, readErr: assert.Error,
infoErr: assert.NoError, infoErr: assert.NoError,
expectData: baseData[:5], expectData: baseData[:5],
@ -253,15 +303,25 @@ func (suite *ItemUnitSuite) TestLazyItem() {
assert.Equal( assert.Equal(
t, t,
now, now,
item.(data.ItemModTime).ModTime(), item.ModTime(),
"mod time") "mod time")
// Read data to execute lazy reader. // Read data to execute lazy reader.
readData, err := io.ReadAll(item.ToReader()) r, err := readers.NewVersionedRestoreReader(item.ToReader())
test.versionErr(t, err, "version error: %v", clues.ToCore(err))
if err != nil {
return
}
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
assert.False(t, r.Format().DelInFlight)
readData, err := io.ReadAll(r)
test.readErr(t, err, clues.ToCore(err), "read error") test.readErr(t, err, clues.ToCore(err), "read error")
assert.Equal(t, test.expectData, readData, "read data") assert.Equal(t, test.expectData, readData, "read data")
_, err = item.(data.ItemInfo).Info() _, err = item.Info()
test.infoErr(t, err, "Info(): %v", clues.ToCore(err)) test.infoErr(t, err, "Info(): %v", clues.ToCore(err))
e := errs.Errors() e := errs.Errors()
@ -301,15 +361,21 @@ func (suite *ItemUnitSuite) TestLazyItem_DeletedInFlight() {
assert.Equal( assert.Equal(
t, t,
now, now,
item.(data.ItemModTime).ModTime(), item.ModTime(),
"mod time") "mod time")
// Read data to execute lazy reader. // Read data to execute lazy reader.
readData, err := io.ReadAll(item.ToReader()) r, err := readers.NewVersionedRestoreReader(item.ToReader())
require.NoError(t, err, "version error: %v", clues.ToCore(err))
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
assert.True(t, r.Format().DelInFlight)
readData, err := io.ReadAll(r)
require.NoError(t, err, clues.ToCore(err), "read error") require.NoError(t, err, clues.ToCore(err), "read error")
assert.Empty(t, readData, "read data") assert.Empty(t, readData, "read data")
_, err = item.(data.ItemInfo).Info() _, err = item.Info()
assert.ErrorIs(t, err, data.ErrNotFound, "Info() error") assert.ErrorIs(t, err, data.ErrNotFound, "Info() error")
e := errs.Errors() e := errs.Errors()
@ -341,9 +407,9 @@ func (suite *ItemUnitSuite) TestLazyItem_InfoBeforeReadErrors() {
assert.Equal( assert.Equal(
t, t,
now, now,
item.(data.ItemModTime).ModTime(), item.ModTime(),
"mod time") "mod time")
_, err := item.(data.ItemInfo).Info() _, err := item.Info()
assert.Error(t, err, "Info() error") assert.Error(t, err, "Info() error")
} }

View File

@ -3,8 +3,13 @@ package mock
import ( import (
"context" "context"
"io" "io"
"testing"
"time" "time"
"github.com/alcionai/clues"
"github.com/stretchr/testify/require"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/fault"
@ -163,3 +168,106 @@ func (rc RestoreCollection) FetchItemByName(
return res, nil return res, nil
} }
var (
_ data.BackupCollection = &versionedBackupCollection{}
_ data.RestoreCollection = &unversionedRestoreCollection{}
_ data.Item = &itemWrapper{}
)
type itemWrapper struct {
data.Item
reader io.ReadCloser
}
func (i *itemWrapper) ToReader() io.ReadCloser {
return i.reader
}
func NewUnversionedRestoreCollection(
t *testing.T,
col data.RestoreCollection,
) *unversionedRestoreCollection {
return &unversionedRestoreCollection{
RestoreCollection: col,
t: t,
}
}
// unversionedRestoreCollection strips out version format headers on all items.
//
// Wrap data.RestoreCollections in this type if you don't need access to the
// version format header during tests and you know the item readers can't return
// an error.
type unversionedRestoreCollection struct {
data.RestoreCollection
t *testing.T
}
func (c *unversionedRestoreCollection) Items(
ctx context.Context,
errs *fault.Bus,
) <-chan data.Item {
res := make(chan data.Item)
go func() {
defer close(res)
for item := range c.RestoreCollection.Items(ctx, errs) {
r, err := readers.NewVersionedRestoreReader(item.ToReader())
require.NoError(c.t, err, clues.ToCore(err))
res <- &itemWrapper{
Item: item,
reader: r,
}
}
}()
return res
}
func NewVersionedBackupCollection(
t *testing.T,
col data.BackupCollection,
) *versionedBackupCollection {
return &versionedBackupCollection{
BackupCollection: col,
t: t,
}
}
// versionedBackupCollection injects basic version information on all items.
//
// Wrap data.BackupCollections in this type if you don't need to explicitly set
// the version format header during tests, aren't trying to check reader errors
// cases, and aren't populating backup details.
type versionedBackupCollection struct {
data.BackupCollection
t *testing.T
}
func (c *versionedBackupCollection) Items(
ctx context.Context,
errs *fault.Bus,
) <-chan data.Item {
res := make(chan data.Item)
go func() {
defer close(res)
for item := range c.BackupCollection.Items(ctx, errs) {
r, err := readers.NewVersionedBackupReader(
readers.SerializationFormat{
Version: readers.DefaultSerializationVersion,
},
item.ToReader())
require.NoError(c.t, err, clues.ToCore(err))
res <- &itemWrapper{
Item: item,
reader: r,
}
}
}()
return res
}

View File

@ -205,7 +205,7 @@ func (w *conn) commonConnect(
bst, bst,
password, password,
kopiaOpts); err != nil { kopiaOpts); err != nil {
return clues.Wrap(err, "connecting to repo").WithClues(ctx) return clues.Wrap(err, "connecting to kopia repo").WithClues(ctx)
} }
if err := w.open(ctx, cfgFile, password); err != nil { if err := w.open(ctx, cfgFile, password); err != nil {
@ -580,6 +580,10 @@ func (w *conn) SnapshotRoot(man *snapshot.Manifest) (fs.Entry, error) {
} }
func (w *conn) UpdatePassword(ctx context.Context, password string, opts repository.Options) error { func (w *conn) UpdatePassword(ctx context.Context, password string, opts repository.Options) error {
if len(password) <= 0 {
return clues.New("empty password provided")
}
kopiaRef := NewConn(w.storage) kopiaRef := NewConn(w.storage)
if err := kopiaRef.Connect(ctx, opts); err != nil { if err := kopiaRef.Connect(ctx, opts); err != nil {
return clues.Wrap(err, "connecting kopia client") return clues.Wrap(err, "connecting kopia client")
@ -587,8 +591,10 @@ func (w *conn) UpdatePassword(ctx context.Context, password string, opts reposit
defer kopiaRef.Close(ctx) defer kopiaRef.Close(ctx)
repository := kopiaRef.Repository.(repo.DirectRepository) kopiaRepo := kopiaRef.Repository.(repo.DirectRepository)
err := repository.FormatManager().ChangePassword(ctx, password) if err := kopiaRepo.FormatManager().ChangePassword(ctx, password); err != nil {
return clues.Wrap(err, "unable to update password")
}
return errors.Wrap(err, "unable to update password") return nil
} }

View File

@ -22,6 +22,20 @@ import (
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata" storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
) )
func openLocalKopiaRepo(
t tester.TestT,
ctx context.Context, //revive:disable-line:context-as-argument
) (*conn, error) {
st := storeTD.NewFilesystemStorage(t)
k := NewConn(st)
if err := k.Initialize(ctx, repository.Options{}, repository.Retention{}); err != nil {
return nil, err
}
return k, nil
}
func openKopiaRepo( func openKopiaRepo(
t tester.TestT, t tester.TestT,
ctx context.Context, //revive:disable-line:context-as-argument ctx context.Context, //revive:disable-line:context-as-argument
@ -81,7 +95,7 @@ func (suite *WrapperIntegrationSuite) TestRepoExistsError() {
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
defer flush() defer flush()
st := storeTD.NewPrefixedS3Storage(t) st := storeTD.NewFilesystemStorage(t)
k := NewConn(st) k := NewConn(st)
err := k.Initialize(ctx, repository.Options{}, repository.Retention{}) err := k.Initialize(ctx, repository.Options{}, repository.Retention{})
@ -101,7 +115,7 @@ func (suite *WrapperIntegrationSuite) TestBadProviderErrors() {
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
defer flush() defer flush()
st := storeTD.NewPrefixedS3Storage(t) st := storeTD.NewFilesystemStorage(t)
st.Provider = storage.ProviderUnknown st.Provider = storage.ProviderUnknown
k := NewConn(st) k := NewConn(st)
@ -115,7 +129,7 @@ func (suite *WrapperIntegrationSuite) TestConnectWithoutInitErrors() {
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
defer flush() defer flush()
st := storeTD.NewPrefixedS3Storage(t) st := storeTD.NewFilesystemStorage(t)
k := NewConn(st) k := NewConn(st)
err := k.Connect(ctx, repository.Options{}) err := k.Connect(ctx, repository.Options{})
@ -408,7 +422,7 @@ func (suite *WrapperIntegrationSuite) TestSetUserAndHost() {
Host: "bar", Host: "bar",
} }
st := storeTD.NewPrefixedS3Storage(t) st := storeTD.NewFilesystemStorage(t)
k := NewConn(st) k := NewConn(st)
err := k.Initialize(ctx, opts, repository.Retention{}) err := k.Initialize(ctx, opts, repository.Retention{})

View File

@ -7,6 +7,7 @@ import (
"github.com/alcionai/clues" "github.com/alcionai/clues"
"github.com/kopia/kopia/fs" "github.com/kopia/kopia/fs"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/logger"
@ -16,6 +17,7 @@ import (
var ( var (
_ data.RestoreCollection = &kopiaDataCollection{} _ data.RestoreCollection = &kopiaDataCollection{}
_ data.Item = &kopiaDataStream{} _ data.Item = &kopiaDataStream{}
_ data.ItemSize = &kopiaDataStream{}
) )
type kopiaDataCollection struct { type kopiaDataCollection struct {
@ -23,7 +25,7 @@ type kopiaDataCollection struct {
dir fs.Directory dir fs.Directory
items []string items []string
counter ByteCounter counter ByteCounter
expectedVersion uint32 expectedVersion readers.SerializationVersion
} }
func (kdc *kopiaDataCollection) Items( func (kdc *kopiaDataCollection) Items(
@ -102,7 +104,7 @@ func (kdc kopiaDataCollection) FetchItemByName(
return nil, clues.New("object is not a file").WithClues(ctx) return nil, clues.New("object is not a file").WithClues(ctx)
} }
size := f.Size() - int64(versionSize) size := f.Size() - int64(readers.VersionFormatSize)
if size < 0 { if size < 0 {
logger.Ctx(ctx).Infow("negative file size; resetting to 0", "file_size", size) logger.Ctx(ctx).Infow("negative file size; resetting to 0", "file_size", size)
@ -118,13 +120,32 @@ func (kdc kopiaDataCollection) FetchItemByName(
return nil, clues.Wrap(err, "opening file").WithClues(ctx) return nil, clues.Wrap(err, "opening file").WithClues(ctx)
} }
// TODO(ashmrtn): Remove this when individual services implement checks for
// version and deleted items.
rr, err := readers.NewVersionedRestoreReader(r)
if err != nil {
return nil, clues.Stack(err).WithClues(ctx)
}
if rr.Format().Version != kdc.expectedVersion {
return nil, clues.New("unexpected data format").
WithClues(ctx).
With(
"read_version", rr.Format().Version,
"expected_version", kdc.expectedVersion)
}
// This is a conservative check, but we shouldn't be seeing items that were
// deleted in flight during restores because there's no way to select them.
if rr.Format().DelInFlight {
return nil, clues.New("selected item marked as deleted in flight").
WithClues(ctx)
}
return &kopiaDataStream{ return &kopiaDataStream{
id: name, id: name,
reader: &restoreStreamReader{ reader: rr,
ReadCloser: r, size: size,
expectedVersion: kdc.expectedVersion,
},
size: size,
}, nil }, nil
} }

View File

@ -13,6 +13,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
dataMock "github.com/alcionai/corso/src/internal/data/mock" dataMock "github.com/alcionai/corso/src/internal/data/mock"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
@ -121,25 +122,35 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() {
) )
// Needs to be a function so the readers get refreshed each time. // Needs to be a function so the readers get refreshed each time.
getLayout := func() fs.Directory { getLayout := func(t *testing.T) fs.Directory {
format := readers.SerializationFormat{
Version: readers.DefaultSerializationVersion,
}
r1, err := readers.NewVersionedBackupReader(
format,
io.NopCloser(bytes.NewReader(files[0].data)))
require.NoError(t, err, clues.ToCore(err))
r2, err := readers.NewVersionedBackupReader(
format,
io.NopCloser(bytes.NewReader(files[1].data)))
require.NoError(t, err, clues.ToCore(err))
return virtualfs.NewStaticDirectory(encodeAsPath("foo"), []fs.Entry{ return virtualfs.NewStaticDirectory(encodeAsPath("foo"), []fs.Entry{
&mockFile{ &mockFile{
StreamingFile: virtualfs.StreamingFileFromReader( StreamingFile: virtualfs.StreamingFileFromReader(
encodeAsPath(files[0].uuid), encodeAsPath(files[0].uuid),
nil), nil),
r: newBackupStreamReader( r: r1,
serializationVersion, size: int64(len(files[0].data) + readers.VersionFormatSize),
io.NopCloser(bytes.NewReader(files[0].data))),
size: int64(len(files[0].data) + versionSize),
}, },
&mockFile{ &mockFile{
StreamingFile: virtualfs.StreamingFileFromReader( StreamingFile: virtualfs.StreamingFileFromReader(
encodeAsPath(files[1].uuid), encodeAsPath(files[1].uuid),
nil), nil),
r: newBackupStreamReader( r: r2,
serializationVersion, size: int64(len(files[1].data) + readers.VersionFormatSize),
io.NopCloser(bytes.NewReader(files[1].data))),
size: int64(len(files[1].data) + versionSize),
}, },
&mockFile{ &mockFile{
StreamingFile: virtualfs.StreamingFileFromReader( StreamingFile: virtualfs.StreamingFileFromReader(
@ -224,10 +235,10 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() {
} }
c := kopiaDataCollection{ c := kopiaDataCollection{
dir: getLayout(), dir: getLayout(t),
path: nil, path: nil,
items: items, items: items,
expectedVersion: serializationVersion, expectedVersion: readers.DefaultSerializationVersion,
} }
var ( var (
@ -291,23 +302,34 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() {
// Needs to be a function so we can switch the serialization version as // Needs to be a function so we can switch the serialization version as
// needed. // needed.
getLayout := func(serVersion uint32) fs.Directory { getLayout := func(
t *testing.T,
serVersion readers.SerializationVersion,
) fs.Directory {
format := readers.SerializationFormat{Version: serVersion}
r1, err := readers.NewVersionedBackupReader(
format,
io.NopCloser(bytes.NewReader([]byte(noErrFileData))))
require.NoError(t, err, clues.ToCore(err))
r2, err := readers.NewVersionedBackupReader(
format,
errReader.ToReader())
require.NoError(t, err, clues.ToCore(err))
return virtualfs.NewStaticDirectory(encodeAsPath(folder2), []fs.Entry{ return virtualfs.NewStaticDirectory(encodeAsPath(folder2), []fs.Entry{
&mockFile{ &mockFile{
StreamingFile: virtualfs.StreamingFileFromReader( StreamingFile: virtualfs.StreamingFileFromReader(
encodeAsPath(noErrFileName), encodeAsPath(noErrFileName),
nil), nil),
r: newBackupStreamReader( r: r1,
serVersion,
io.NopCloser(bytes.NewReader([]byte(noErrFileData)))),
}, },
&mockFile{ &mockFile{
StreamingFile: virtualfs.StreamingFileFromReader( StreamingFile: virtualfs.StreamingFileFromReader(
encodeAsPath(errFileName), encodeAsPath(errFileName),
nil), nil),
r: newBackupStreamReader( r: r2,
serVersion,
errReader.ToReader()),
}, },
&mockFile{ &mockFile{
StreamingFile: virtualfs.StreamingFileFromReader( StreamingFile: virtualfs.StreamingFileFromReader(
@ -330,7 +352,7 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() {
table := []struct { table := []struct {
name string name string
inputName string inputName string
inputSerializationVersion uint32 inputSerializationVersion readers.SerializationVersion
expectedData []byte expectedData []byte
lookupErr assert.ErrorAssertionFunc lookupErr assert.ErrorAssertionFunc
readErr assert.ErrorAssertionFunc readErr assert.ErrorAssertionFunc
@ -339,7 +361,7 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() {
{ {
name: "FileFound_NoError", name: "FileFound_NoError",
inputName: noErrFileName, inputName: noErrFileName,
inputSerializationVersion: serializationVersion, inputSerializationVersion: readers.DefaultSerializationVersion,
expectedData: []byte(noErrFileData), expectedData: []byte(noErrFileData),
lookupErr: assert.NoError, lookupErr: assert.NoError,
readErr: assert.NoError, readErr: assert.NoError,
@ -347,21 +369,20 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() {
{ {
name: "FileFound_ReadError", name: "FileFound_ReadError",
inputName: errFileName, inputName: errFileName,
inputSerializationVersion: serializationVersion, inputSerializationVersion: readers.DefaultSerializationVersion,
lookupErr: assert.NoError, lookupErr: assert.NoError,
readErr: assert.Error, readErr: assert.Error,
}, },
{ {
name: "FileFound_VersionError", name: "FileFound_VersionError",
inputName: noErrFileName, inputName: noErrFileName,
inputSerializationVersion: serializationVersion + 1, inputSerializationVersion: readers.DefaultSerializationVersion + 1,
lookupErr: assert.NoError, lookupErr: assert.Error,
readErr: assert.Error,
}, },
{ {
name: "FileNotFound", name: "FileNotFound",
inputName: "foo", inputName: "foo",
inputSerializationVersion: serializationVersion + 1, inputSerializationVersion: readers.DefaultSerializationVersion + 1,
lookupErr: assert.Error, lookupErr: assert.Error,
notFoundErr: true, notFoundErr: true,
}, },
@ -373,14 +394,14 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() {
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
defer flush() defer flush()
root := getLayout(test.inputSerializationVersion) root := getLayout(t, test.inputSerializationVersion)
c := &i64counter{} c := &i64counter{}
col := &kopiaDataCollection{ col := &kopiaDataCollection{
path: pth, path: pth,
dir: root, dir: root,
counter: c, counter: c,
expectedVersion: serializationVersion, expectedVersion: readers.DefaultSerializationVersion,
} }
s, err := col.FetchItemByName(ctx, test.inputName) s, err := col.FetchItemByName(ctx, test.inputName)

View File

@ -16,12 +16,11 @@ func filesystemStorage(
repoOpts repository.Options, repoOpts repository.Options,
s storage.Storage, s storage.Storage,
) (blob.Storage, error) { ) (blob.Storage, error) {
cfg, err := s.StorageConfig() fsCfg, err := s.ToFilesystemConfig()
if err != nil { if err != nil {
return nil, clues.Stack(err).WithClues(ctx) return nil, clues.Stack(err).WithClues(ctx)
} }
fsCfg := cfg.(*storage.FilesystemConfig)
opts := filesystem.Options{ opts := filesystem.Options{
Path: fsCfg.Path, Path: fsCfg.Path,
} }

View File

@ -12,6 +12,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/m365/service/exchange/mock" "github.com/alcionai/corso/src/internal/m365/service/exchange/mock"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
@ -150,20 +151,27 @@ func (suite *MergeCollectionUnitSuite) TestFetchItemByName() {
require.NoError(suite.T(), err, clues.ToCore(err)) require.NoError(suite.T(), err, clues.ToCore(err))
// Needs to be a function so the readers get refreshed each time. // Needs to be a function so the readers get refreshed each time.
layouts := []func() fs.Directory{ layouts := []func(t *testing.T) fs.Directory{
// Has the following; // Has the following;
// - file1: data[0] // - file1: data[0]
// - errOpen: (error opening file) // - errOpen: (error opening file)
func() fs.Directory { func(t *testing.T) fs.Directory {
format := readers.SerializationFormat{
Version: readers.DefaultSerializationVersion,
}
r1, err := readers.NewVersionedBackupReader(
format,
io.NopCloser(bytes.NewReader(fileData1)))
require.NoError(t, err, clues.ToCore(err))
return virtualfs.NewStaticDirectory(encodeAsPath(colPaths[0]), []fs.Entry{ return virtualfs.NewStaticDirectory(encodeAsPath(colPaths[0]), []fs.Entry{
&mockFile{ &mockFile{
StreamingFile: virtualfs.StreamingFileFromReader( StreamingFile: virtualfs.StreamingFileFromReader(
encodeAsPath(fileName1), encodeAsPath(fileName1),
nil), nil),
r: newBackupStreamReader( r: r1,
serializationVersion, size: int64(len(fileData1) + readers.VersionFormatSize),
io.NopCloser(bytes.NewReader(fileData1))),
size: int64(len(fileData1) + versionSize),
}, },
&mockFile{ &mockFile{
StreamingFile: virtualfs.StreamingFileFromReader( StreamingFile: virtualfs.StreamingFileFromReader(
@ -178,34 +186,47 @@ func (suite *MergeCollectionUnitSuite) TestFetchItemByName() {
// - file1: data[1] // - file1: data[1]
// - file2: data[0] // - file2: data[0]
// - errOpen: data[2] // - errOpen: data[2]
func() fs.Directory { func(t *testing.T) fs.Directory {
format := readers.SerializationFormat{
Version: readers.DefaultSerializationVersion,
}
r1, err := readers.NewVersionedBackupReader(
format,
io.NopCloser(bytes.NewReader(fileData2)))
require.NoError(t, err, clues.ToCore(err))
r2, err := readers.NewVersionedBackupReader(
format,
io.NopCloser(bytes.NewReader(fileData1)))
require.NoError(t, err, clues.ToCore(err))
r3, err := readers.NewVersionedBackupReader(
format,
io.NopCloser(bytes.NewReader(fileData3)))
require.NoError(t, err, clues.ToCore(err))
return virtualfs.NewStaticDirectory(encodeAsPath(colPaths[1]), []fs.Entry{ return virtualfs.NewStaticDirectory(encodeAsPath(colPaths[1]), []fs.Entry{
&mockFile{ &mockFile{
StreamingFile: virtualfs.StreamingFileFromReader( StreamingFile: virtualfs.StreamingFileFromReader(
encodeAsPath(fileName1), encodeAsPath(fileName1),
nil), nil),
r: newBackupStreamReader( r: r1,
serializationVersion, size: int64(len(fileData2) + readers.VersionFormatSize),
io.NopCloser(bytes.NewReader(fileData2))),
size: int64(len(fileData2) + versionSize),
}, },
&mockFile{ &mockFile{
StreamingFile: virtualfs.StreamingFileFromReader( StreamingFile: virtualfs.StreamingFileFromReader(
encodeAsPath(fileName2), encodeAsPath(fileName2),
nil), nil),
r: newBackupStreamReader( r: r2,
serializationVersion, size: int64(len(fileData1) + readers.VersionFormatSize),
io.NopCloser(bytes.NewReader(fileData1))),
size: int64(len(fileData1) + versionSize),
}, },
&mockFile{ &mockFile{
StreamingFile: virtualfs.StreamingFileFromReader( StreamingFile: virtualfs.StreamingFileFromReader(
encodeAsPath(fileOpenErrName), encodeAsPath(fileOpenErrName),
nil), nil),
r: newBackupStreamReader( r: r3,
serializationVersion, size: int64(len(fileData3) + readers.VersionFormatSize),
io.NopCloser(bytes.NewReader(fileData3))),
size: int64(len(fileData3) + versionSize),
}, },
}) })
}, },
@ -257,9 +278,9 @@ func (suite *MergeCollectionUnitSuite) TestFetchItemByName() {
for i, layout := range layouts { for i, layout := range layouts {
col := &kopiaDataCollection{ col := &kopiaDataCollection{
path: pth, path: pth,
dir: layout(), dir: layout(t),
counter: c, counter: c,
expectedVersion: serializationVersion, expectedVersion: readers.DefaultSerializationVersion,
} }
err := dc.addCollection(colPaths[i], col) err := dc.addCollection(colPaths[i], col)

View File

@ -29,7 +29,7 @@ type fooModel struct {
//revive:disable-next-line:context-as-argument //revive:disable-next-line:context-as-argument
func getModelStore(t *testing.T, ctx context.Context) *ModelStore { func getModelStore(t *testing.T, ctx context.Context) *ModelStore {
c, err := openKopiaRepo(t, ctx) c, err := openLocalKopiaRepo(t, ctx)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
return &ModelStore{c: c, modelVersion: globalModelVersion} return &ModelStore{c: c, modelVersion: globalModelVersion}
@ -856,7 +856,7 @@ func openConnAndModelStore(
t *testing.T, t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument ctx context.Context, //revive:disable-line:context-as-argument
) (*conn, *ModelStore) { ) (*conn, *ModelStore) {
st := storeTD.NewPrefixedS3Storage(t) st := storeTD.NewFilesystemStorage(t)
c := NewConn(st) c := NewConn(st)
err := c.Initialize(ctx, repository.Options{}, repository.Retention{}) err := c.Initialize(ctx, repository.Options{}, repository.Retention{})

View File

@ -20,13 +20,11 @@ func s3BlobStorage(
repoOpts repository.Options, repoOpts repository.Options,
s storage.Storage, s storage.Storage,
) (blob.Storage, error) { ) (blob.Storage, error) {
sc, err := s.StorageConfig() cfg, err := s.ToS3Config()
if err != nil { if err != nil {
return nil, clues.Stack(err).WithClues(ctx) return nil, clues.Stack(err).WithClues(ctx)
} }
cfg := sc.(*storage.S3Config)
endpoint := defaultS3Endpoint endpoint := defaultS3Endpoint
if len(cfg.Endpoint) > 0 { if len(cfg.Endpoint) > 0 {
endpoint = cfg.Endpoint endpoint = cfg.Endpoint

View File

@ -1,19 +1,14 @@
package kopia package kopia
import ( import (
"bytes"
"context" "context"
"encoding/base64" "encoding/base64"
"encoding/binary"
"errors" "errors"
"io"
"os"
"runtime/trace" "runtime/trace"
"strings" "strings"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
"unsafe"
"github.com/alcionai/clues" "github.com/alcionai/clues"
"github.com/kopia/kopia/fs" "github.com/kopia/kopia/fs"
@ -37,101 +32,6 @@ import (
const maxInflateTraversalDepth = 500 const maxInflateTraversalDepth = 500
var versionSize = int(unsafe.Sizeof(serializationVersion))
func newBackupStreamReader(version uint32, reader io.ReadCloser) *backupStreamReader {
buf := make([]byte, versionSize)
binary.BigEndian.PutUint32(buf, version)
bufReader := io.NopCloser(bytes.NewReader(buf))
return &backupStreamReader{
readers: []io.ReadCloser{bufReader, reader},
combined: io.NopCloser(io.MultiReader(bufReader, reader)),
}
}
// backupStreamReader is a wrapper around the io.Reader that other Corso
// components return when backing up information. It injects a version number at
// the start of the data stream. Future versions of Corso may not need this if
// they use more complex serialization logic as serialization/version injection
// will be handled by other components.
type backupStreamReader struct {
readers []io.ReadCloser
combined io.ReadCloser
}
func (rw *backupStreamReader) Read(p []byte) (n int, err error) {
if rw.combined == nil {
return 0, os.ErrClosed
}
return rw.combined.Read(p)
}
func (rw *backupStreamReader) Close() error {
if rw.combined == nil {
return nil
}
rw.combined = nil
var errs *clues.Err
for _, r := range rw.readers {
err := r.Close()
if err != nil {
errs = clues.Stack(clues.Wrap(err, "closing reader"), errs)
}
}
return errs.OrNil()
}
// restoreStreamReader is a wrapper around the io.Reader that kopia returns when
// reading data from an item. It examines and strips off the version number of
// the restored data. Future versions of Corso may not need this if they use
// more complex serialization logic as version checking/deserialization will be
// handled by other components. A reader that returns a version error is no
// longer valid and should not be used once the version error is returned.
type restoreStreamReader struct {
io.ReadCloser
expectedVersion uint32
readVersion bool
}
func (rw *restoreStreamReader) checkVersion() error {
versionBuf := make([]byte, versionSize)
for newlyRead := 0; newlyRead < versionSize; {
n, err := rw.ReadCloser.Read(versionBuf[newlyRead:])
if err != nil {
return clues.Wrap(err, "reading data format version")
}
newlyRead += n
}
version := binary.BigEndian.Uint32(versionBuf)
if version != rw.expectedVersion {
return clues.New("unexpected data format").With("read_version", version)
}
return nil
}
func (rw *restoreStreamReader) Read(p []byte) (n int, err error) {
if !rw.readVersion {
rw.readVersion = true
if err := rw.checkVersion(); err != nil {
return 0, err
}
}
return rw.ReadCloser.Read(p)
}
type itemDetails struct { type itemDetails struct {
infoer data.ItemInfo infoer data.ItemInfo
repoPath path.Path repoPath path.Path
@ -436,7 +336,7 @@ func collectionEntries(
entry := virtualfs.StreamingFileWithModTimeFromReader( entry := virtualfs.StreamingFileWithModTimeFromReader(
encodedName, encodedName,
modTime, modTime,
newBackupStreamReader(serializationVersion, e.ToReader())) e.ToReader())
err = ctr(ctx, entry) err = ctr(ctx, entry)
if err != nil { if err != nil {

View File

@ -14,7 +14,6 @@ import (
"github.com/kopia/kopia/repo/manifest" "github.com/kopia/kopia/repo/manifest"
"github.com/kopia/kopia/snapshot" "github.com/kopia/kopia/snapshot"
"github.com/kopia/kopia/snapshot/snapshotfs" "github.com/kopia/kopia/snapshot/snapshotfs"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
@ -124,12 +123,6 @@ func expectFileData(
return return
} }
// Need to wrap with a restore stream reader to remove the version.
r = &restoreStreamReader{
ReadCloser: io.NopCloser(r),
expectedVersion: serializationVersion,
}
got, err := io.ReadAll(r) got, err := io.ReadAll(r)
if !assert.NoError(t, err, "reading data in file", name, clues.ToCore(err)) { if !assert.NoError(t, err, "reading data in file", name, clues.ToCore(err)) {
return return
@ -226,135 +219,6 @@ func getDirEntriesForEntry(
// --------------- // ---------------
// unit tests // unit tests
// --------------- // ---------------
type limitedRangeReader struct {
readLen int
io.ReadCloser
}
func (lrr *limitedRangeReader) Read(p []byte) (int, error) {
if len(p) == 0 {
// Not well specified behavior, defer to underlying reader.
return lrr.ReadCloser.Read(p)
}
toRead := lrr.readLen
if len(p) < toRead {
toRead = len(p)
}
return lrr.ReadCloser.Read(p[:toRead])
}
type VersionReadersUnitSuite struct {
tester.Suite
}
func TestVersionReadersUnitSuite(t *testing.T) {
suite.Run(t, &VersionReadersUnitSuite{Suite: tester.NewUnitSuite(t)})
}
func (suite *VersionReadersUnitSuite) TestWriteAndRead() {
inputData := []byte("This is some data for the reader to test with")
table := []struct {
name string
readVersion uint32
writeVersion uint32
check assert.ErrorAssertionFunc
}{
{
name: "SameVersionSucceeds",
readVersion: 42,
writeVersion: 42,
check: assert.NoError,
},
{
name: "DifferentVersionsFail",
readVersion: 7,
writeVersion: 42,
check: assert.Error,
},
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
baseReader := bytes.NewReader(inputData)
reversible := &restoreStreamReader{
expectedVersion: test.readVersion,
ReadCloser: newBackupStreamReader(
test.writeVersion,
io.NopCloser(baseReader)),
}
defer reversible.Close()
allData, err := io.ReadAll(reversible)
test.check(t, err, clues.ToCore(err))
if err != nil {
return
}
assert.Equal(t, inputData, allData)
})
}
}
func readAllInParts(
t *testing.T,
partLen int,
reader io.ReadCloser,
) ([]byte, int) {
res := []byte{}
read := 0
tmp := make([]byte, partLen)
for {
n, err := reader.Read(tmp)
if errors.Is(err, io.EOF) {
break
}
require.NoError(t, err, clues.ToCore(err))
read += n
res = append(res, tmp[:n]...)
}
return res, read
}
func (suite *VersionReadersUnitSuite) TestWriteHandlesShortReads() {
t := suite.T()
inputData := []byte("This is some data for the reader to test with")
version := uint32(42)
baseReader := bytes.NewReader(inputData)
versioner := newBackupStreamReader(version, io.NopCloser(baseReader))
expectedToWrite := len(inputData) + int(versionSize)
// "Write" all the data.
versionedData, writtenLen := readAllInParts(t, 1, versioner)
assert.Equal(t, expectedToWrite, writtenLen)
// Read all of the data back.
baseReader = bytes.NewReader(versionedData)
reader := &restoreStreamReader{
expectedVersion: version,
// Be adversarial and only allow reads of length 1 from the byte reader.
ReadCloser: &limitedRangeReader{
readLen: 1,
ReadCloser: io.NopCloser(baseReader),
},
}
readData, readLen := readAllInParts(t, 1, reader)
// This reports the bytes read and returned to the user, excluding the version
// that is stripped off at the start.
assert.Equal(t, len(inputData), readLen)
assert.Equal(t, inputData, readData)
}
type CorsoProgressUnitSuite struct { type CorsoProgressUnitSuite struct {
tester.Suite tester.Suite
targetFilePath path.Path targetFilePath path.Path
@ -2420,9 +2284,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsCorrectSubt
encodeElements(inboxFileName1)[0], encodeElements(inboxFileName1)[0],
time.Time{}, time.Time{},
// Wrap with a backup reader so it gets the version injected. // Wrap with a backup reader so it gets the version injected.
newBackupStreamReader( io.NopCloser(bytes.NewReader(inboxFileData1v2))),
serializationVersion,
io.NopCloser(bytes.NewReader(inboxFileData1v2)))),
}), }),
}), }),
virtualfs.NewStaticDirectory( virtualfs.NewStaticDirectory(
@ -2582,9 +2444,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsMigrateSubt
virtualfs.StreamingFileWithModTimeFromReader( virtualfs.StreamingFileWithModTimeFromReader(
encodeElements(inboxFileName1)[0], encodeElements(inboxFileName1)[0],
time.Time{}, time.Time{},
newBackupStreamReader( io.NopCloser(bytes.NewReader(inboxFileData1))),
serializationVersion,
io.NopCloser(bytes.NewReader(inboxFileData1)))),
}), }),
}), }),
virtualfs.NewStaticDirectory( virtualfs.NewStaticDirectory(
@ -2596,9 +2456,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsMigrateSubt
virtualfs.StreamingFileWithModTimeFromReader( virtualfs.StreamingFileWithModTimeFromReader(
encodeElements(contactsFileName1)[0], encodeElements(contactsFileName1)[0],
time.Time{}, time.Time{},
newBackupStreamReader( io.NopCloser(bytes.NewReader(contactsFileData1))),
serializationVersion,
io.NopCloser(bytes.NewReader(contactsFileData1)))),
}), }),
}), }),
}) })
@ -2817,15 +2675,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_SelectiveSubtreeP
virtualfs.StreamingFileWithModTimeFromReader( virtualfs.StreamingFileWithModTimeFromReader(
encodeElements(fileName5)[0], encodeElements(fileName5)[0],
time.Time{}, time.Time{},
newBackupStreamReader( io.NopCloser(bytes.NewReader(fileData5))),
serializationVersion,
io.NopCloser(bytes.NewReader(fileData5)))),
virtualfs.StreamingFileWithModTimeFromReader( virtualfs.StreamingFileWithModTimeFromReader(
encodeElements(fileName6)[0], encodeElements(fileName6)[0],
time.Time{}, time.Time{},
newBackupStreamReader( io.NopCloser(bytes.NewReader(fileData6))),
serializationVersion,
io.NopCloser(bytes.NewReader(fileData6)))),
}) })
counters[folderID3] = count counters[folderID3] = count
@ -2835,15 +2689,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_SelectiveSubtreeP
virtualfs.StreamingFileWithModTimeFromReader( virtualfs.StreamingFileWithModTimeFromReader(
encodeElements(fileName3)[0], encodeElements(fileName3)[0],
time.Time{}, time.Time{},
newBackupStreamReader( io.NopCloser(bytes.NewReader(fileData3))),
serializationVersion,
io.NopCloser(bytes.NewReader(fileData3)))),
virtualfs.StreamingFileWithModTimeFromReader( virtualfs.StreamingFileWithModTimeFromReader(
encodeElements(fileName4)[0], encodeElements(fileName4)[0],
time.Time{}, time.Time{},
newBackupStreamReader( io.NopCloser(bytes.NewReader(fileData4))),
serializationVersion,
io.NopCloser(bytes.NewReader(fileData4)))),
folder, folder,
}) })
counters[folderID2] = count counters[folderID2] = count
@ -2859,15 +2709,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_SelectiveSubtreeP
virtualfs.StreamingFileWithModTimeFromReader( virtualfs.StreamingFileWithModTimeFromReader(
encodeElements(fileName1)[0], encodeElements(fileName1)[0],
time.Time{}, time.Time{},
newBackupStreamReader( io.NopCloser(bytes.NewReader(fileData1))),
serializationVersion,
io.NopCloser(bytes.NewReader(fileData1)))),
virtualfs.StreamingFileWithModTimeFromReader( virtualfs.StreamingFileWithModTimeFromReader(
encodeElements(fileName2)[0], encodeElements(fileName2)[0],
time.Time{}, time.Time{},
newBackupStreamReader( io.NopCloser(bytes.NewReader(fileData2))),
serializationVersion,
io.NopCloser(bytes.NewReader(fileData2)))),
folder, folder,
folder4, folder4,
}) })
@ -2879,15 +2725,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_SelectiveSubtreeP
virtualfs.StreamingFileWithModTimeFromReader( virtualfs.StreamingFileWithModTimeFromReader(
encodeElements(fileName7)[0], encodeElements(fileName7)[0],
time.Time{}, time.Time{},
newBackupStreamReader( io.NopCloser(bytes.NewReader(fileData7))),
serializationVersion,
io.NopCloser(bytes.NewReader(fileData7)))),
virtualfs.StreamingFileWithModTimeFromReader( virtualfs.StreamingFileWithModTimeFromReader(
encodeElements(fileName8)[0], encodeElements(fileName8)[0],
time.Time{}, time.Time{},
newBackupStreamReader( io.NopCloser(bytes.NewReader(fileData8))),
serializationVersion,
io.NopCloser(bytes.NewReader(fileData8)))),
}) })
counters[folderID5] = count counters[folderID5] = count

View File

@ -18,6 +18,7 @@ import (
"golang.org/x/exp/maps" "golang.org/x/exp/maps"
"github.com/alcionai/corso/src/internal/common/prefixmatcher" "github.com/alcionai/corso/src/internal/common/prefixmatcher"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/diagnostics" "github.com/alcionai/corso/src/internal/diagnostics"
"github.com/alcionai/corso/src/internal/observe" "github.com/alcionai/corso/src/internal/observe"
@ -36,8 +37,6 @@ const (
// possibly corresponding to who is making the backup. // possibly corresponding to who is making the backup.
corsoHost = "corso-host" corsoHost = "corso-host"
corsoUser = "corso" corsoUser = "corso"
serializationVersion uint32 = 1
) )
// common manifest tags // common manifest tags
@ -447,7 +446,7 @@ func loadDirsAndItems(
dir: dir, dir: dir,
items: dirItems.items, items: dirItems.items,
counter: bcounter, counter: bcounter,
expectedVersion: serializationVersion, expectedVersion: readers.DefaultSerializationVersion,
} }
if err := mergeCol.addCollection(dirItems.dir.String(), dc); err != nil { if err := mergeCol.addCollection(dirItems.dir.String(), dc); err != nil {

View File

@ -184,7 +184,7 @@ func (suite *BasicKopiaIntegrationSuite) TestMaintenance_FirstRun_NoChanges() {
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
defer flush() defer flush()
k, err := openKopiaRepo(t, ctx) k, err := openLocalKopiaRepo(t, ctx)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
w := &Wrapper{k} w := &Wrapper{k}
@ -204,7 +204,7 @@ func (suite *BasicKopiaIntegrationSuite) TestMaintenance_WrongUser_NoForce_Fails
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
defer flush() defer flush()
k, err := openKopiaRepo(t, ctx) k, err := openLocalKopiaRepo(t, ctx)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
w := &Wrapper{k} w := &Wrapper{k}
@ -241,7 +241,7 @@ func (suite *BasicKopiaIntegrationSuite) TestMaintenance_WrongUser_Force_Succeed
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
defer flush() defer flush()
k, err := openKopiaRepo(t, ctx) k, err := openLocalKopiaRepo(t, ctx)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
w := &Wrapper{k} w := &Wrapper{k}
@ -754,7 +754,7 @@ func (suite *KopiaIntegrationSuite) SetupTest() {
t := suite.T() t := suite.T()
suite.ctx, suite.flush = tester.NewContext(t) suite.ctx, suite.flush = tester.NewContext(t)
c, err := openKopiaRepo(t, suite.ctx) c, err := openLocalKopiaRepo(t, suite.ctx)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
suite.w = &Wrapper{c} suite.w = &Wrapper{c}
@ -1245,7 +1245,7 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
defer flush() defer flush()
k, err := openKopiaRepo(t, ctx) k, err := openLocalKopiaRepo(t, ctx)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
err = k.Compression(ctx, "s2-default") err = k.Compression(ctx, "s2-default")
@ -1268,7 +1268,10 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
ctx, ctx,
[]identity.Reasoner{r}, []identity.Reasoner{r},
nil, nil,
[]data.BackupCollection{dc1, dc2}, []data.BackupCollection{
dataMock.NewVersionedBackupCollection(t, dc1),
dataMock.NewVersionedBackupCollection(t, dc2),
},
nil, nil,
nil, nil,
true, true,
@ -1556,7 +1559,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
//nolint:forbidigo //nolint:forbidigo
suite.ctx, _ = logger.CtxOrSeed(context.Background(), ls) suite.ctx, _ = logger.CtxOrSeed(context.Background(), ls)
c, err := openKopiaRepo(t, suite.ctx) c, err := openLocalKopiaRepo(t, suite.ctx)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
suite.w = &Wrapper{c} suite.w = &Wrapper{c}
@ -1577,12 +1580,15 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
}) })
} }
collections = append(collections, collection) collections = append(
collections,
dataMock.NewVersionedBackupCollection(t, collection))
} }
r := NewReason(testTenant, testUser, path.ExchangeService, path.EmailCategory) r := NewReason(testTenant, testUser, path.ExchangeService, path.EmailCategory)
stats, deets, _, err := suite.w.ConsumeBackupCollections( // Other tests check basic things about deets so not doing that again here.
stats, _, _, err := suite.w.ConsumeBackupCollections(
suite.ctx, suite.ctx,
[]identity.Reasoner{r}, []identity.Reasoner{r},
nil, nil,
@ -1597,8 +1603,6 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
require.Equal(t, stats.TotalDirectoryCount, expectedDirs) require.Equal(t, stats.TotalDirectoryCount, expectedDirs)
require.Equal(t, stats.IgnoredErrorCount, 0) require.Equal(t, stats.IgnoredErrorCount, 0)
require.False(t, stats.Incomplete) require.False(t, stats.Incomplete)
// 6 file and 2 folder entries.
assert.Len(t, deets.Details().Entries, expectedFiles+2)
suite.snapshotID = manifest.ID(stats.SnapshotID) suite.snapshotID = manifest.ID(stats.SnapshotID)
} }
@ -1629,7 +1633,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
excludePrefix bool excludePrefix bool
expectedCachedItems int expectedCachedItems int
expectedUncachedItems int expectedUncachedItems int
cols func() []data.BackupCollection cols func(t *testing.T) []data.BackupCollection
backupIDCheck require.ValueAssertionFunc backupIDCheck require.ValueAssertionFunc
restoreCheck assert.ErrorAssertionFunc restoreCheck assert.ErrorAssertionFunc
}{ }{
@ -1638,7 +1642,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
excludeItem: true, excludeItem: true,
expectedCachedItems: len(suite.filesByPath) - 1, expectedCachedItems: len(suite.filesByPath) - 1,
expectedUncachedItems: 0, expectedUncachedItems: 0,
cols: func() []data.BackupCollection { cols: func(t *testing.T) []data.BackupCollection {
return nil return nil
}, },
backupIDCheck: require.NotEmpty, backupIDCheck: require.NotEmpty,
@ -1650,7 +1654,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
excludePrefix: true, excludePrefix: true,
expectedCachedItems: len(suite.filesByPath) - 1, expectedCachedItems: len(suite.filesByPath) - 1,
expectedUncachedItems: 0, expectedUncachedItems: 0,
cols: func() []data.BackupCollection { cols: func(t *testing.T) []data.BackupCollection {
return nil return nil
}, },
backupIDCheck: require.NotEmpty, backupIDCheck: require.NotEmpty,
@ -1661,7 +1665,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
// No snapshot should be made since there were no changes. // No snapshot should be made since there were no changes.
expectedCachedItems: 0, expectedCachedItems: 0,
expectedUncachedItems: 0, expectedUncachedItems: 0,
cols: func() []data.BackupCollection { cols: func(t *testing.T) []data.BackupCollection {
return nil return nil
}, },
// Backup doesn't run. // Backup doesn't run.
@ -1671,7 +1675,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
name: "NoExcludeItemWithChanges", name: "NoExcludeItemWithChanges",
expectedCachedItems: len(suite.filesByPath), expectedCachedItems: len(suite.filesByPath),
expectedUncachedItems: 1, expectedUncachedItems: 1,
cols: func() []data.BackupCollection { cols: func(t *testing.T) []data.BackupCollection {
c := exchMock.NewCollection( c := exchMock.NewCollection(
suite.testPath1, suite.testPath1,
suite.testPath1, suite.testPath1,
@ -1679,7 +1683,9 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
c.ColState = data.NotMovedState c.ColState = data.NotMovedState
c.PrevPath = suite.testPath1 c.PrevPath = suite.testPath1
return []data.BackupCollection{c} return []data.BackupCollection{
dataMock.NewVersionedBackupCollection(t, c),
}
}, },
backupIDCheck: require.NotEmpty, backupIDCheck: require.NotEmpty,
restoreCheck: assert.NoError, restoreCheck: assert.NoError,
@ -1717,7 +1723,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
Manifest: man, Manifest: man,
Reasons: []identity.Reasoner{r}, Reasons: []identity.Reasoner{r},
}), }),
test.cols(), test.cols(t),
excluded, excluded,
nil, nil,
true, true,

View File

@ -100,7 +100,7 @@ func (ctrl *Controller) ProduceBackupCollections(
} }
case path.GroupsService: case path.GroupsService:
colls, ssmb, canUsePreviousBackup, err = groups.ProduceBackupCollections( colls, ssmb, err = groups.ProduceBackupCollections(
ctx, ctx,
bpc, bpc,
ctrl.AC, ctrl.AC,
@ -111,6 +111,10 @@ func (ctrl *Controller) ProduceBackupCollections(
return nil, nil, false, err return nil, nil, false, err
} }
// canUsePreviousBacukp can be always returned true for groups as we
// return a tombstone collection in case the metadata read fails
canUsePreviousBackup = true
default: default:
return nil, nil, false, clues.Wrap(clues.New(service.String()), "service not supported").WithClues(ctx) return nil, nil, false, clues.Wrap(clues.New(service.String()), "service not supported").WithClues(ctx)
} }

View File

@ -11,6 +11,9 @@ import (
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
inMock "github.com/alcionai/corso/src/internal/common/idname/mock" inMock "github.com/alcionai/corso/src/internal/common/idname/mock"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/data/mock"
"github.com/alcionai/corso/src/internal/m365/service/exchange" "github.com/alcionai/corso/src/internal/m365/service/exchange"
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts" odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
"github.com/alcionai/corso/src/internal/m365/service/sharepoint" "github.com/alcionai/corso/src/internal/m365/service/sharepoint"
@ -458,9 +461,8 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Lists() {
for item := range collection.Items(ctx, fault.New(true)) { for item := range collection.Items(ctx, fault.New(true)) {
t.Log("File: " + item.ID()) t.Log("File: " + item.ID())
bs, err := io.ReadAll(item.ToReader()) _, err := io.ReadAll(item.ToReader())
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
t.Log(string(bs))
} }
} }
} }
@ -575,3 +577,123 @@ func (suite *GroupsCollectionIntgSuite) TestCreateGroupsCollection_SharePoint()
assert.NotZero(t, status.Successes) assert.NotZero(t, status.Successes)
t.Log(status.String()) t.Log(status.String())
} }
func (suite *GroupsCollectionIntgSuite) TestCreateGroupsCollection_SharePoint_InvalidMetadata() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
var (
groupID = tconfig.M365GroupID(t)
ctrl = newController(ctx, t, path.GroupsService)
groupIDs = []string{groupID}
)
id, name, err := ctrl.PopulateProtectedResourceIDAndName(ctx, groupID, nil)
require.NoError(t, err, clues.ToCore(err))
sel := selectors.NewGroupsBackup(groupIDs)
sel.Include(sel.LibraryFolders([]string{"test"}, selectors.PrefixMatch()))
sel.SetDiscreteOwnerIDName(id, name)
site, err := suite.connector.AC.Groups().GetRootSite(ctx, groupID)
require.NoError(t, err, clues.ToCore(err))
pth, err := path.Build(
suite.tenantID,
groupID,
path.GroupsService,
path.LibrariesCategory,
true,
odConsts.SitesPathDir,
ptr.Val(site.GetId()))
require.NoError(t, err, clues.ToCore(err))
mmc := []data.RestoreCollection{
mock.Collection{
Path: pth,
ItemData: []data.Item{
&mock.Item{
ItemID: "previouspath",
Reader: io.NopCloser(bytes.NewReader([]byte("invalid"))),
},
},
},
}
bpc := inject.BackupProducerConfig{
LastBackupVersion: version.NoBackup,
Options: control.DefaultOptions(),
ProtectedResource: inMock.NewProvider(id, name),
Selector: sel.Selector,
MetadataCollections: mmc,
}
collections, excludes, canUsePreviousBackup, err := ctrl.ProduceBackupCollections(
ctx,
bpc,
fault.New(true))
require.NoError(t, err, clues.ToCore(err))
assert.True(t, canUsePreviousBackup, "can use previous backup")
// No excludes yet as this isn't an incremental backup.
assert.True(t, excludes.Empty())
// we don't know an exact count of drives this will produce,
// but it should be more than one.
assert.Greater(t, len(collections), 1)
p, err := path.BuildMetadata(
suite.tenantID,
groupID,
path.GroupsService,
path.LibrariesCategory,
false)
require.NoError(t, err, clues.ToCore(err))
p, err = p.Append(false, odConsts.SitesPathDir)
require.NoError(t, err, clues.ToCore(err))
foundSitesMetadata := false
foundRootTombstone := false
sp, err := path.BuildPrefix(
suite.tenantID,
groupID,
path.GroupsService,
path.LibrariesCategory)
require.NoError(t, err, clues.ToCore(err))
sp, err = sp.Append(false, odConsts.SitesPathDir, ptr.Val(site.GetId()))
require.NoError(t, err, clues.ToCore(err))
for _, coll := range collections {
if coll.State() == data.DeletedState {
if coll.PreviousPath() != nil && coll.PreviousPath().String() == sp.String() {
foundRootTombstone = true
}
continue
}
sitesMetadataCollection := coll.FullPath().String() == p.String()
for object := range coll.Items(ctx, fault.New(true)) {
if object.ID() == "previouspath" && sitesMetadataCollection {
foundSitesMetadata = true
}
buf := &bytes.Buffer{}
_, err := buf.ReadFrom(object.ToReader())
assert.NoError(t, err, "reading item", clues.ToCore(err))
}
}
assert.True(t, foundSitesMetadata, "missing sites metadata")
assert.True(t, foundRootTombstone, "missing root tombstone")
status := ctrl.Wait()
assert.NotZero(t, status.Successes)
t.Log(status.String())
}

View File

@ -33,11 +33,7 @@ const (
MaxOneNoteFileSize = 2 * 1024 * 1024 * 1024 MaxOneNoteFileSize = 2 * 1024 * 1024 * 1024
) )
var ( var _ data.BackupCollection = &Collection{}
_ data.BackupCollection = &Collection{}
_ data.Item = &metadata.Item{}
_ data.ItemModTime = &metadata.Item{}
)
// Collection represents a set of OneDrive objects retrieved from M365 // Collection represents a set of OneDrive objects retrieved from M365
type Collection struct { type Collection struct {
@ -588,14 +584,25 @@ func (oc *Collection) streamDriveItem(
return progReader, nil return progReader, nil
}) })
oc.data <- &metadata.Item{ storeItem, err := data.NewUnindexedPrefetchedItem(
ItemID: metaFileName + metaSuffix, metaReader,
Data: metaReader, metaFileName+metaSuffix,
// Metadata file should always use the latest time as // Metadata file should always use the latest time as
// permissions change does not update mod time. // permissions change does not update mod time.
Mod: time.Now(), time.Now())
if err != nil {
errs.AddRecoverable(ctx, clues.Stack(err).
WithClues(ctx).
Label(fault.LabelForceNoBackupCreation))
return
} }
// We wrap the reader with a lazy reader so that the progress bar is only
// initialized if the file is read. Since we're not actually lazily reading
// data just use the eager item implementation.
oc.data <- storeItem
// Item read successfully, add to collection // Item read successfully, add to collection
if isFile { if isFile {
atomic.AddInt64(&stats.itemsRead, 1) atomic.AddInt64(&stats.itemsRead, 1)

View File

@ -19,6 +19,7 @@ import (
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata" "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
metaTD "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata/testdata" metaTD "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata/testdata"
@ -256,7 +257,7 @@ func (suite *CollectionUnitSuite) TestCollection() {
mt := readItem.(data.ItemModTime) mt := readItem.(data.ItemModTime)
assert.Equal(t, now, mt.ModTime()) assert.Equal(t, now, mt.ModTime())
readData, err := io.ReadAll(readItem.ToReader()) rr, err := readers.NewVersionedRestoreReader(readItem.ToReader())
test.expectErr(t, err) test.expectErr(t, err)
if err != nil { if err != nil {
@ -267,13 +268,25 @@ func (suite *CollectionUnitSuite) TestCollection() {
return return
} }
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
assert.False(t, rr.Format().DelInFlight)
readData, err := io.ReadAll(rr)
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, stubItemContent, readData) assert.Equal(t, stubItemContent, readData)
readItemMeta := readItems[1] readItemMeta := readItems[1]
assert.Equal(t, stubItemID+metadata.MetaFileSuffix, readItemMeta.ID()) assert.Equal(t, stubItemID+metadata.MetaFileSuffix, readItemMeta.ID())
rr, err = readers.NewVersionedRestoreReader(readItemMeta.ToReader())
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
assert.False(t, rr.Format().DelInFlight)
readMeta := metadata.Metadata{} readMeta := metadata.Metadata{}
err = json.NewDecoder(readItemMeta.ToReader()).Decode(&readMeta) err = json.NewDecoder(rr).Decode(&readMeta)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
metaTD.AssertMetadataEqual(t, stubMeta, readMeta) metaTD.AssertMetadataEqual(t, stubMeta, readMeta)
@ -485,12 +498,18 @@ func (suite *CollectionUnitSuite) TestCollectionPermissionBackupLatestModTime()
for _, i := range readItems { for _, i := range readItems {
if strings.HasSuffix(i.ID(), metadata.MetaFileSuffix) { if strings.HasSuffix(i.ID(), metadata.MetaFileSuffix) {
content, err := io.ReadAll(i.ToReader()) rr, err := readers.NewVersionedRestoreReader(i.ToReader())
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
assert.False(t, rr.Format().DelInFlight)
content, err := io.ReadAll(rr)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
require.Equal(t, `{"filename":"Fake Item","permissionMode":1}`, string(content)) require.Equal(t, `{"filename":"Fake Item","permissionMode":1}`, string(content))
im, ok := i.(data.ItemModTime) im, ok := i.(data.ItemModTime)
require.Equal(t, ok, true, "modtime interface") require.True(t, ok, "modtime interface")
require.Greater(t, im.ModTime(), mtime, "permissions time greater than mod time") require.Greater(t, im.ModTime(), mtime, "permissions time greater than mod time")
} }
} }

View File

@ -135,11 +135,6 @@ func deserializeMetadata(
continue continue
} }
if err == nil {
// Successful decode.
continue
}
// This is conservative, but report an error if either any of the items // This is conservative, but report an error if either any of the items
// for any of the deserialized maps have duplicate drive IDs or there's // for any of the deserialized maps have duplicate drive IDs or there's
// some other problem deserializing things. This will cause the entire // some other problem deserializing things. This will cause the entire
@ -147,7 +142,9 @@ func deserializeMetadata(
// these cases. We can make the logic for deciding when to continue vs. // these cases. We can make the logic for deciding when to continue vs.
// when to fail less strict in the future if needed. // when to fail less strict in the future if needed.
if err != nil { if err != nil {
return nil, nil, false, clues.Stack(err).WithClues(ictx) errs.Fail(clues.Stack(err).WithClues(ictx))
return map[string]string{}, map[string]map[string]string{}, false, nil
} }
} }
} }

View File

@ -17,6 +17,7 @@ import (
"github.com/alcionai/corso/src/internal/common/prefixmatcher" "github.com/alcionai/corso/src/internal/common/prefixmatcher"
pmMock "github.com/alcionai/corso/src/internal/common/prefixmatcher/mock" pmMock "github.com/alcionai/corso/src/internal/common/prefixmatcher/mock"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
dataMock "github.com/alcionai/corso/src/internal/data/mock"
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata" "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
"github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/m365/graph"
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts" odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
@ -984,7 +985,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
{ {
// Bad formats are logged but skip adding entries to the maps and don't // Bad formats are logged but skip adding entries to the maps and don't
// return an error. // return an error.
name: "BadFormat", name: "BadFormat",
expectedDeltas: map[string]string{},
expectedPaths: map[string]map[string]string{},
cols: []func() []graph.MetadataCollectionEntry{ cols: []func() []graph.MetadataCollectionEntry{
func() []graph.MetadataCollectionEntry { func() []graph.MetadataCollectionEntry {
return []graph.MetadataCollectionEntry{ return []graph.MetadataCollectionEntry{
@ -995,7 +998,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
}, },
}, },
canUsePreviousBackup: false, canUsePreviousBackup: false,
errCheck: assert.Error, errCheck: assert.NoError,
}, },
{ {
// Unexpected files are logged and skipped. They don't cause an error to // Unexpected files are logged and skipped. They don't cause an error to
@ -1060,10 +1063,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
} }
}, },
}, },
expectedDeltas: nil, expectedDeltas: map[string]string{},
expectedPaths: nil, expectedPaths: map[string]map[string]string{},
canUsePreviousBackup: false, canUsePreviousBackup: false,
errCheck: assert.Error, errCheck: assert.NoError,
}, },
{ {
name: "DriveAlreadyFound_Deltas", name: "DriveAlreadyFound_Deltas",
@ -1090,10 +1093,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
} }
}, },
}, },
expectedDeltas: nil, expectedDeltas: map[string]string{},
expectedPaths: nil, expectedPaths: map[string]map[string]string{},
canUsePreviousBackup: false, canUsePreviousBackup: false,
errCheck: assert.Error, errCheck: assert.NoError,
}, },
} }
@ -1121,7 +1124,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
func(*support.ControllerOperationStatus) {}) func(*support.ControllerOperationStatus) {})
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
cols = append(cols, data.NoFetchRestoreCollection{Collection: mc}) cols = append(cols, dataMock.NewUnversionedRestoreCollection(
t,
data.NoFetchRestoreCollection{Collection: mc}))
} }
deltas, paths, canUsePreviousBackup, err := deserializeMetadata(ctx, cols) deltas, paths, canUsePreviousBackup, err := deserializeMetadata(ctx, cols)
@ -2294,7 +2299,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
func(*support.ControllerOperationStatus) {}) func(*support.ControllerOperationStatus) {})
assert.NoError(t, err, "creating metadata collection", clues.ToCore(err)) assert.NoError(t, err, "creating metadata collection", clues.ToCore(err))
prevMetadata := []data.RestoreCollection{data.NoFetchRestoreCollection{Collection: mc}} prevMetadata := []data.RestoreCollection{
dataMock.NewUnversionedRestoreCollection(t, data.NoFetchRestoreCollection{Collection: mc}),
}
errs := fault.New(true) errs := fault.New(true)
delList := prefixmatcher.NewStringSetBuilder() delList := prefixmatcher.NewStringSetBuilder()
@ -2321,7 +2328,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
deltas, paths, _, err := deserializeMetadata( deltas, paths, _, err := deserializeMetadata(
ctx, ctx,
[]data.RestoreCollection{ []data.RestoreCollection{
data.NoFetchRestoreCollection{Collection: baseCol}, dataMock.NewUnversionedRestoreCollection(
t,
data.NoFetchRestoreCollection{Collection: baseCol}),
}) })
if !assert.NoError(t, err, "deserializing metadata", clues.ToCore(err)) { if !assert.NoError(t, err, "deserializing metadata", clues.ToCore(err)) {
continue continue

View File

@ -1,7 +1,6 @@
package metadata package metadata
import ( import (
"io"
"time" "time"
) )
@ -41,17 +40,3 @@ type Metadata struct {
Permissions []Permission `json:"permissions,omitempty"` Permissions []Permission `json:"permissions,omitempty"`
LinkShares []LinkShare `json:"linkShares,omitempty"` LinkShares []LinkShare `json:"linkShares,omitempty"`
} }
type Item struct {
ItemID string
Data io.ReadCloser
Mod time.Time
}
// Deleted implements an interface function. However, OneDrive items are marked
// as deleted by adding them to the exclude list so this can always return
// false.
func (i *Item) Deleted() bool { return false }
func (i *Item) ID() string { return i.ItemID }
func (i *Item) ToReader() io.ReadCloser { return i.Data }
func (i *Item) ModTime() time.Time { return i.Mod }

View File

@ -15,7 +15,9 @@ import (
inMock "github.com/alcionai/corso/src/internal/common/idname/mock" inMock "github.com/alcionai/corso/src/internal/common/idname/mock"
"github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
dataMock "github.com/alcionai/corso/src/internal/data/mock"
"github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/m365/graph"
"github.com/alcionai/corso/src/internal/m365/support" "github.com/alcionai/corso/src/internal/m365/support"
"github.com/alcionai/corso/src/internal/operations/inject" "github.com/alcionai/corso/src/internal/operations/inject"
@ -322,7 +324,7 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
cdps, canUsePreviousBackup, err := ParseMetadataCollections(ctx, []data.RestoreCollection{ cdps, canUsePreviousBackup, err := ParseMetadataCollections(ctx, []data.RestoreCollection{
data.NoFetchRestoreCollection{Collection: coll}, dataMock.NewUnversionedRestoreCollection(t, data.NoFetchRestoreCollection{Collection: coll}),
}) })
test.expectError(t, err, clues.ToCore(err)) test.expectError(t, err, clues.ToCore(err))
@ -591,7 +593,7 @@ func (suite *BackupIntgSuite) TestDelta() {
require.NotNil(t, metadata, "collections contains a metadata collection") require.NotNil(t, metadata, "collections contains a metadata collection")
cdps, canUsePreviousBackup, err := ParseMetadataCollections(ctx, []data.RestoreCollection{ cdps, canUsePreviousBackup, err := ParseMetadataCollections(ctx, []data.RestoreCollection{
data.NoFetchRestoreCollection{Collection: metadata}, dataMock.NewUnversionedRestoreCollection(t, data.NoFetchRestoreCollection{Collection: metadata}),
}) })
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
assert.True(t, canUsePreviousBackup, "can use previous backup") assert.True(t, canUsePreviousBackup, "can use previous backup")
@ -666,7 +668,12 @@ func (suite *BackupIntgSuite) TestMailSerializationRegression() {
for stream := range streamChannel { for stream := range streamChannel {
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
read, err := buf.ReadFrom(stream.ToReader()) rr, err := readers.NewVersionedRestoreReader(stream.ToReader())
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
read, err := buf.ReadFrom(rr)
assert.NoError(t, err, clues.ToCore(err)) assert.NoError(t, err, clues.ToCore(err))
assert.NotZero(t, read) assert.NotZero(t, read)
@ -744,7 +751,13 @@ func (suite *BackupIntgSuite) TestContactSerializationRegression() {
for stream := range edc.Items(ctx, fault.New(true)) { for stream := range edc.Items(ctx, fault.New(true)) {
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
read, err := buf.ReadFrom(stream.ToReader())
rr, err := readers.NewVersionedRestoreReader(stream.ToReader())
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
read, err := buf.ReadFrom(rr)
assert.NoError(t, err, clues.ToCore(err)) assert.NoError(t, err, clues.ToCore(err))
assert.NotZero(t, read) assert.NotZero(t, read)
@ -878,7 +891,12 @@ func (suite *BackupIntgSuite) TestEventsSerializationRegression() {
for item := range edc.Items(ctx, fault.New(true)) { for item := range edc.Items(ctx, fault.New(true)) {
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
read, err := buf.ReadFrom(item.ToReader()) rr, err := readers.NewVersionedRestoreReader(item.ToReader())
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
read, err := buf.ReadFrom(rr)
assert.NoError(t, err, clues.ToCore(err)) assert.NoError(t, err, clues.ToCore(err))
assert.NotZero(t, read) assert.NotZero(t, read)
@ -1198,7 +1216,9 @@ func checkMetadata(
) { ) {
catPaths, _, err := ParseMetadataCollections( catPaths, _, err := ParseMetadataCollections(
ctx, ctx,
[]data.RestoreCollection{data.NoFetchRestoreCollection{Collection: c}}) []data.RestoreCollection{
dataMock.NewUnversionedRestoreCollection(t, data.NoFetchRestoreCollection{Collection: c}),
})
if !assert.NoError(t, err, "getting metadata", clues.ToCore(err)) { if !assert.NoError(t, err, "getting metadata", clues.ToCore(err)) {
return return
} }

View File

@ -278,10 +278,21 @@ func (col *prefetchCollection) streamItems(
return return
} }
stream <- data.NewPrefetchedItem( item, err := data.NewPrefetchedItem(
io.NopCloser(bytes.NewReader(itemData)), io.NopCloser(bytes.NewReader(itemData)),
id, id,
details.ItemInfo{Exchange: info}) details.ItemInfo{Exchange: info})
if err != nil {
el.AddRecoverable(
ctx,
clues.Stack(err).
WithClues(ctx).
Label(fault.LabelForceNoBackupCreation))
return
}
stream <- item
atomic.AddInt64(&success, 1) atomic.AddInt64(&success, 1)
atomic.AddInt64(&totalBytes, info.Size) atomic.AddInt64(&totalBytes, info.Size)

View File

@ -17,6 +17,7 @@ import (
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
"github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/m365/collection/exchange/mock" "github.com/alcionai/corso/src/internal/m365/collection/exchange/mock"
"github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/m365/graph"
@ -55,13 +56,20 @@ func (suite *CollectionUnitSuite) TestPrefetchedItem_Reader() {
suite.Run(test.name, func() { suite.Run(test.name, func() {
t := suite.T() t := suite.T()
ed := data.NewPrefetchedItem( ed, err := data.NewPrefetchedItem(
io.NopCloser(bytes.NewReader(test.readData)), io.NopCloser(bytes.NewReader(test.readData)),
"itemID", "itemID",
details.ItemInfo{}) details.ItemInfo{})
require.NoError(t, err, clues.ToCore(err))
r, err := readers.NewVersionedRestoreReader(ed.ToReader())
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
assert.False(t, r.Format().DelInFlight)
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
_, err := buf.ReadFrom(ed.ToReader()) _, err = buf.ReadFrom(r)
assert.NoError(t, err, "reading data: %v", clues.ToCore(err)) assert.NoError(t, err, "reading data: %v", clues.ToCore(err))
assert.Equal(t, test.readData, buf.Bytes(), "read data") assert.Equal(t, test.readData, buf.Bytes(), "read data")
assert.Equal(t, "itemID", ed.ID(), "item ID") assert.Equal(t, "itemID", ed.ID(), "item ID")
@ -493,11 +501,11 @@ func (suite *CollectionUnitSuite) TestLazyItem_NoRead_GetInfo_Errors() {
time.Now(), time.Now(),
fault.New(true)) fault.New(true))
_, err := li.(data.ItemInfo).Info() _, err := li.Info()
assert.Error(suite.T(), err, "Info without reading data should error") assert.Error(suite.T(), err, "Info without reading data should error")
} }
func (suite *CollectionUnitSuite) TestLazyItem() { func (suite *CollectionUnitSuite) TestLazyItem_GetDataErrors() {
var ( var (
parentPath = "inbox/private/silly cats" parentPath = "inbox/private/silly cats"
now = time.Now() now = time.Now()
@ -505,44 +513,19 @@ func (suite *CollectionUnitSuite) TestLazyItem() {
table := []struct { table := []struct {
name string name string
modTime time.Time
getErr error getErr error
serializeErr error serializeErr error
expectModTime time.Time
expectReadErrType error expectReadErrType error
dataCheck assert.ValueAssertionFunc
expectInfoErr bool
expectInfoErrType error
}{ }{
{
name: "ReturnsEmptyReaderOnDeletedInFlight",
modTime: now,
getErr: graph.ErrDeletedInFlight,
dataCheck: assert.Empty,
expectInfoErr: true,
expectInfoErrType: data.ErrNotFound,
},
{
name: "ReturnsValidReaderAndInfo",
modTime: now,
dataCheck: assert.NotEmpty,
expectModTime: now,
},
{ {
name: "ReturnsErrorOnGenericGetError", name: "ReturnsErrorOnGenericGetError",
modTime: now,
getErr: assert.AnError, getErr: assert.AnError,
expectReadErrType: assert.AnError, expectReadErrType: assert.AnError,
dataCheck: assert.Empty,
expectInfoErr: true,
}, },
{ {
name: "ReturnsErrorOnGenericSerializeError", name: "ReturnsErrorOnGenericSerializeError",
modTime: now,
serializeErr: assert.AnError, serializeErr: assert.AnError,
expectReadErrType: assert.AnError, expectReadErrType: assert.AnError,
dataCheck: assert.Empty,
expectInfoErr: true,
}, },
} }
@ -575,47 +558,128 @@ func (suite *CollectionUnitSuite) TestLazyItem() {
userID: "userID", userID: "userID",
itemID: "itemID", itemID: "itemID",
getter: getter, getter: getter,
modTime: test.modTime, modTime: now,
immutableIDs: false, immutableIDs: false,
parentPath: parentPath, parentPath: parentPath,
}, },
"itemID", "itemID",
test.modTime, now,
fault.New(true)) fault.New(true))
assert.False(t, li.Deleted(), "item shouldn't be marked deleted") assert.False(t, li.Deleted(), "item shouldn't be marked deleted")
assert.Equal( assert.Equal(t, now, li.ModTime(), "item mod time")
t,
test.modTime,
li.(data.ItemModTime).ModTime(),
"item mod time")
readData, err := io.ReadAll(li.ToReader()) _, err := readers.NewVersionedRestoreReader(li.ToReader())
if test.expectReadErrType == nil { assert.ErrorIs(t, err, test.expectReadErrType)
assert.NoError(t, err, "reading item data: %v", clues.ToCore(err))
} else {
assert.ErrorIs(t, err, test.expectReadErrType, "read error")
}
test.dataCheck(t, readData, "read item data")
info, err := li.(data.ItemInfo).Info()
// Didn't expect an error getting info, it should be valid.
if !test.expectInfoErr {
assert.NoError(t, err, "getting item info: %v", clues.ToCore(err))
assert.Equal(t, parentPath, info.Exchange.ParentPath)
assert.Equal(t, test.expectModTime, info.Modified())
return
}
// Should get some form of error when trying to get info. // Should get some form of error when trying to get info.
_, err = li.Info()
assert.Error(t, err, "Info()") assert.Error(t, err, "Info()")
if test.expectInfoErrType != nil {
assert.ErrorIs(t, err, test.expectInfoErrType, "Info() error")
}
}) })
} }
} }
func (suite *CollectionUnitSuite) TestLazyItem_ReturnsEmptyReaderOnDeletedInFlight() {
var (
t = suite.T()
parentPath = "inbox/private/silly cats"
now = time.Now()
)
ctx, flush := tester.NewContext(t)
defer flush()
getter := &mock.ItemGetSerialize{GetErr: graph.ErrDeletedInFlight}
li := data.NewLazyItem(
ctx,
&lazyItemGetter{
userID: "userID",
itemID: "itemID",
getter: getter,
modTime: now,
immutableIDs: false,
parentPath: parentPath,
},
"itemID",
now,
fault.New(true))
assert.False(t, li.Deleted(), "item shouldn't be marked deleted")
assert.Equal(
t,
now,
li.ModTime(),
"item mod time")
r, err := readers.NewVersionedRestoreReader(li.ToReader())
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
assert.True(t, r.Format().DelInFlight)
readData, err := io.ReadAll(r)
assert.NoError(t, err, "reading item data: %v", clues.ToCore(err))
assert.Empty(t, readData, "read item data")
_, err = li.Info()
assert.ErrorIs(t, err, data.ErrNotFound, "Info() error")
}
func (suite *CollectionUnitSuite) TestLazyItem() {
var (
t = suite.T()
parentPath = "inbox/private/silly cats"
now = time.Now()
)
ctx, flush := tester.NewContext(t)
defer flush()
// Exact data type doesn't really matter.
testData := models.NewMessage()
testData.SetSubject(ptr.To("hello world"))
getter := &mock.ItemGetSerialize{GetData: testData}
li := data.NewLazyItem(
ctx,
&lazyItemGetter{
userID: "userID",
itemID: "itemID",
getter: getter,
modTime: now,
immutableIDs: false,
parentPath: parentPath,
},
"itemID",
now,
fault.New(true))
assert.False(t, li.Deleted(), "item shouldn't be marked deleted")
assert.Equal(
t,
now,
li.ModTime(),
"item mod time")
r, err := readers.NewVersionedRestoreReader(li.ToReader())
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
assert.False(t, r.Format().DelInFlight)
readData, err := io.ReadAll(r)
assert.NoError(t, err, "reading item data: %v", clues.ToCore(err))
assert.NotEmpty(t, readData, "read item data")
info, err := li.Info()
assert.NoError(t, err, "getting item info: %v", clues.ToCore(err))
assert.Equal(t, parentPath, info.Exchange.ParentPath)
assert.Equal(t, now, info.Modified())
}

View File

@ -67,6 +67,15 @@ func (bh channelsBackupHandler) canonicalPath(
false) false)
} }
func (bh channelsBackupHandler) PathPrefix(tenantID string) (path.Path, error) {
return path.Build(
tenantID,
bh.protectedResource,
path.GroupsService,
path.ChannelMessagesCategory,
false)
}
func (bh channelsBackupHandler) GetChannelMessage( func (bh channelsBackupHandler) GetChannelMessage(
ctx context.Context, ctx context.Context,
teamID, channelID, itemID string, teamID, channelID, itemID string,

View File

@ -150,27 +150,47 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) {
parentFolderID, parentFolderID,
id) id)
if err != nil { if err != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "writing channel message to serializer")) el.AddRecoverable(
ctx,
clues.Wrap(err, "writing channel message to serializer").Label(fault.LabelForceNoBackupCreation))
return return
} }
if err := writer.WriteObjectValue("", item); err != nil { if err := writer.WriteObjectValue("", item); err != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "writing channel message to serializer")) el.AddRecoverable(
ctx,
clues.Wrap(err, "writing channel message to serializer").Label(fault.LabelForceNoBackupCreation))
return return
} }
itemData, err := writer.GetSerializedContent() itemData, err := writer.GetSerializedContent()
if err != nil { if err != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "serializing channel message")) el.AddRecoverable(
ctx,
clues.Wrap(err, "serializing channel message").Label(fault.LabelForceNoBackupCreation))
return return
} }
info.ParentPath = col.LocationPath().String() info.ParentPath = col.LocationPath().String()
col.stream <- data.NewPrefetchedItem( storeItem, err := data.NewPrefetchedItem(
io.NopCloser(bytes.NewReader(itemData)), io.NopCloser(bytes.NewReader(itemData)),
id, id,
details.ItemInfo{Groups: info}) details.ItemInfo{Groups: info})
if err != nil {
el.AddRecoverable(
ctx,
clues.Stack(err).
WithClues(ctx).
Label(fault.LabelForceNoBackupCreation))
return
}
col.stream <- storeItem
atomic.AddInt64(&streamedItems, 1) atomic.AddInt64(&streamedItems, 1)
atomic.AddInt64(&totalBytes, info.Size) atomic.AddInt64(&totalBytes, info.Size)

View File

@ -11,6 +11,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/m365/collection/groups/mock" "github.com/alcionai/corso/src/internal/m365/collection/groups/mock"
"github.com/alcionai/corso/src/internal/m365/support" "github.com/alcionai/corso/src/internal/m365/support"
@ -48,13 +49,20 @@ func (suite *CollectionUnitSuite) TestPrefetchedItem_Reader() {
suite.Run(test.name, func() { suite.Run(test.name, func() {
t := suite.T() t := suite.T()
ed := data.NewPrefetchedItem( ed, err := data.NewPrefetchedItem(
io.NopCloser(bytes.NewReader(test.readData)), io.NopCloser(bytes.NewReader(test.readData)),
"itemID", "itemID",
details.ItemInfo{}) details.ItemInfo{})
require.NoError(t, err, clues.ToCore(err))
r, err := readers.NewVersionedRestoreReader(ed.ToReader())
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
assert.False(t, r.Format().DelInFlight)
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
_, err := buf.ReadFrom(ed.ToReader()) _, err = buf.ReadFrom(r)
assert.NoError(t, err, "reading data: %v", clues.ToCore(err)) assert.NoError(t, err, "reading data: %v", clues.ToCore(err))
assert.Equal(t, test.readData, buf.Bytes(), "read data") assert.Equal(t, test.readData, buf.Bytes(), "read data")
assert.Equal(t, "itemID", ed.ID(), "item ID") assert.Equal(t, "itemID", ed.ID(), "item ID")

View File

@ -4,7 +4,6 @@ import (
"bytes" "bytes"
"context" "context"
"io" "io"
"time"
"github.com/alcionai/clues" "github.com/alcionai/clues"
"github.com/microsoft/kiota-abstractions-go/serialization" "github.com/microsoft/kiota-abstractions-go/serialization"
@ -40,12 +39,7 @@ const (
Pages DataCategory = 2 Pages DataCategory = 2
) )
var ( var _ data.BackupCollection = &Collection{}
_ data.BackupCollection = &Collection{}
_ data.Item = &Item{}
_ data.ItemInfo = &Item{}
_ data.ItemModTime = &Item{}
)
// Collection is the SharePoint.List implementation of data.Collection. SharePoint.Libraries collections are supported // Collection is the SharePoint.List implementation of data.Collection. SharePoint.Libraries collections are supported
// by the oneDrive.Collection as the calls are identical for populating the Collection // by the oneDrive.Collection as the calls are identical for populating the Collection
@ -120,43 +114,6 @@ func (sc *Collection) Items(
return sc.data return sc.data
} }
type Item struct {
id string
data io.ReadCloser
info *details.SharePointInfo
modTime time.Time
// true if the item was marked by graph as deleted.
deleted bool
}
func NewItem(name string, d io.ReadCloser) *Item {
return &Item{
id: name,
data: d,
}
}
func (sd *Item) ID() string {
return sd.id
}
func (sd *Item) ToReader() io.ReadCloser {
return sd.data
}
func (sd Item) Deleted() bool {
return sd.deleted
}
func (sd *Item) Info() (details.ItemInfo, error) {
return details.ItemInfo{SharePoint: sd.info}, nil
}
func (sd *Item) ModTime() time.Time {
return sd.modTime
}
func (sc *Collection) finishPopulation( func (sc *Collection) finishPopulation(
ctx context.Context, ctx context.Context,
metrics support.CollectionMetrics, metrics support.CollectionMetrics,
@ -251,21 +208,20 @@ func (sc *Collection) retrieveLists(
size := int64(len(byteArray)) size := int64(len(byteArray))
if size > 0 { if size > 0 {
t := time.Now()
if t1 := lst.GetLastModifiedDateTime(); t1 != nil {
t = *t1
}
metrics.Bytes += size metrics.Bytes += size
metrics.Successes++ metrics.Successes++
sc.data <- &Item{
id: ptr.Val(lst.GetId()), item, err := data.NewPrefetchedItem(
data: io.NopCloser(bytes.NewReader(byteArray)), io.NopCloser(bytes.NewReader(byteArray)),
info: ListToSPInfo(lst, size), ptr.Val(lst.GetId()),
modTime: t, details.ItemInfo{SharePoint: ListToSPInfo(lst, size)})
if err != nil {
el.AddRecoverable(ctx, clues.Stack(err).WithClues(ctx).Label(fault.LabelForceNoBackupCreation))
continue
} }
sc.data <- item
progress <- struct{}{} progress <- struct{}{}
} }
} }
@ -322,13 +278,17 @@ func (sc *Collection) retrievePages(
if size > 0 { if size > 0 {
metrics.Bytes += size metrics.Bytes += size
metrics.Successes++ metrics.Successes++
sc.data <- &Item{
id: ptr.Val(pg.GetId()), item, err := data.NewPrefetchedItem(
data: io.NopCloser(bytes.NewReader(byteArray)), io.NopCloser(bytes.NewReader(byteArray)),
info: pageToSPInfo(pg, root, size), ptr.Val(pg.GetId()),
modTime: ptr.OrNow(pg.GetLastModifiedDateTime()), details.ItemInfo{SharePoint: pageToSPInfo(pg, root, size)})
if err != nil {
el.AddRecoverable(ctx, clues.Stack(err).WithClues(ctx).Label(fault.LabelForceNoBackupCreation))
continue
} }
sc.data <- item
progress <- struct{}{} progress <- struct{}{}
} }
} }

View File

@ -19,6 +19,7 @@ import (
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/control/testdata" "github.com/alcionai/corso/src/pkg/control/testdata"
"github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/fault"
@ -58,21 +59,6 @@ func TestSharePointCollectionSuite(t *testing.T) {
}) })
} }
func (suite *SharePointCollectionSuite) TestCollection_Item_Read() {
t := suite.T()
m := []byte("test message")
name := "aFile"
sc := &Item{
id: name,
data: io.NopCloser(bytes.NewReader(m)),
}
readData, err := io.ReadAll(sc.ToReader())
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, name, sc.id)
assert.Equal(t, readData, m)
}
// TestListCollection tests basic functionality to create // TestListCollection tests basic functionality to create
// SharePoint collection and to use the data stream channel. // SharePoint collection and to use the data stream channel.
func (suite *SharePointCollectionSuite) TestCollection_Items() { func (suite *SharePointCollectionSuite) TestCollection_Items() {
@ -88,7 +74,7 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() {
name, itemName string name, itemName string
scope selectors.SharePointScope scope selectors.SharePointScope
getDir func(t *testing.T) path.Path getDir func(t *testing.T) path.Path
getItem func(t *testing.T, itemName string) *Item getItem func(t *testing.T, itemName string) data.Item
}{ }{
{ {
name: "List", name: "List",
@ -106,7 +92,7 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() {
return dir return dir
}, },
getItem: func(t *testing.T, name string) *Item { getItem: func(t *testing.T, name string) data.Item {
ow := kioser.NewJsonSerializationWriter() ow := kioser.NewJsonSerializationWriter()
listing := spMock.ListDefault(name) listing := spMock.ListDefault(name)
listing.SetDisplayName(&name) listing.SetDisplayName(&name)
@ -117,11 +103,11 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() {
byteArray, err := ow.GetSerializedContent() byteArray, err := ow.GetSerializedContent()
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
data := &Item{ data, err := data.NewPrefetchedItem(
id: name, io.NopCloser(bytes.NewReader(byteArray)),
data: io.NopCloser(bytes.NewReader(byteArray)), name,
info: ListToSPInfo(listing, int64(len(byteArray))), details.ItemInfo{SharePoint: ListToSPInfo(listing, int64(len(byteArray)))})
} require.NoError(t, err, clues.ToCore(err))
return data return data
}, },
@ -142,16 +128,16 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() {
return dir return dir
}, },
getItem: func(t *testing.T, itemName string) *Item { getItem: func(t *testing.T, itemName string) data.Item {
byteArray := spMock.Page(itemName) byteArray := spMock.Page(itemName)
page, err := betaAPI.CreatePageFromBytes(byteArray) page, err := betaAPI.CreatePageFromBytes(byteArray)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
data := &Item{ data, err := data.NewPrefetchedItem(
id: itemName, io.NopCloser(bytes.NewReader(byteArray)),
data: io.NopCloser(bytes.NewReader(byteArray)), itemName,
info: betaAPI.PageInfo(page, int64(len(byteArray))), details.ItemInfo{SharePoint: betaAPI.PageInfo(page, int64(len(byteArray)))})
} require.NoError(t, err, clues.ToCore(err))
return data return data
}, },
@ -210,11 +196,11 @@ func (suite *SharePointCollectionSuite) TestListCollection_Restore() {
byteArray, err := service.Serialize(listing) byteArray, err := service.Serialize(listing)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
listData := &Item{ listData, err := data.NewPrefetchedItem(
id: testName, io.NopCloser(bytes.NewReader(byteArray)),
data: io.NopCloser(bytes.NewReader(byteArray)), testName,
info: ListToSPInfo(listing, int64(len(byteArray))), details.ItemInfo{SharePoint: ListToSPInfo(listing, int64(len(byteArray)))})
} require.NoError(t, err, clues.ToCore(err))
destName := testdata.DefaultRestoreConfig("").Location destName := testdata.DefaultRestoreConfig("").Location

View File

@ -79,20 +79,29 @@ func NewController(
return nil, clues.Wrap(err, "creating api client").WithClues(ctx) return nil, clues.Wrap(err, "creating api client").WithClues(ctx)
} }
rc := resource.UnknownResource var rCli *resourceClient
switch pst { // no failure for unknown service.
case path.ExchangeService, path.OneDriveService: // In that case we create a controller that doesn't attempt to look up any resource
rc = resource.Users // data. This case helps avoid unnecessary service calls when the end user is running
case path.GroupsService: // repo init and connect commands via the CLI. All other callers should be expected
rc = resource.Groups // to pass in a known service, or else expect downstream failures.
case path.SharePointService: if pst != path.UnknownService {
rc = resource.Sites rc := resource.UnknownResource
}
rCli, err := getResourceClient(rc, ac) switch pst {
if err != nil { case path.ExchangeService, path.OneDriveService:
return nil, clues.Wrap(err, "creating resource client").WithClues(ctx) rc = resource.Users
case path.GroupsService:
rc = resource.Groups
case path.SharePointService:
rc = resource.Sites
}
rCli, err = getResourceClient(rc, ac)
if err != nil {
return nil, clues.Wrap(err, "creating resource client").WithClues(ctx)
}
} }
ctrl := Controller{ ctrl := Controller{
@ -110,6 +119,10 @@ func NewController(
return &ctrl, nil return &ctrl, nil
} }
func (ctrl *Controller) VerifyAccess(ctx context.Context) error {
return ctrl.AC.Access().GetToken(ctx)
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Processing Status // Processing Status
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@ -195,7 +208,7 @@ func getResourceClient(rc resource.Category, ac api.Client) (*resourceClient, er
case resource.Groups: case resource.Groups:
return &resourceClient{enum: rc, getter: ac.Groups()}, nil return &resourceClient{enum: rc, getter: ac.Groups()}, nil
default: default:
return nil, clues.New("unrecognized owner resource enum").With("resource_enum", rc) return nil, clues.New("unrecognized owner resource type").With("resource_enum", rc)
} }
} }

View File

@ -861,7 +861,7 @@ func (suite *ControllerIntegrationSuite) TestRestoreAndBackup_core() {
}, },
}, },
{ {
name: "MultipleContactsSingleFolder", name: "MultipleContactsInRestoreFolder",
service: path.ExchangeService, service: path.ExchangeService,
collections: []stub.ColInfo{ collections: []stub.ColInfo{
{ {
@ -887,49 +887,77 @@ func (suite *ControllerIntegrationSuite) TestRestoreAndBackup_core() {
}, },
}, },
}, },
{ // TODO(ashmrtn): Re-enable when we can restore contacts to nested folders.
name: "MultipleContactsMultipleFolders", //{
service: path.ExchangeService, // name: "MultipleContactsSingleFolder",
collections: []stub.ColInfo{ // service: path.ExchangeService,
{ // collections: []stub.ColInfo{
PathElements: []string{"Work"}, // {
Category: path.ContactsCategory, // PathElements: []string{"Contacts"},
Items: []stub.ItemInfo{ // Category: path.ContactsCategory,
{ // Items: []stub.ItemInfo{
Name: "someencodeditemID", // {
Data: exchMock.ContactBytes("Ghimley"), // Name: "someencodeditemID",
LookupKey: "Ghimley", // Data: exchMock.ContactBytes("Ghimley"),
}, // LookupKey: "Ghimley",
{ // },
Name: "someencodeditemID2", // {
Data: exchMock.ContactBytes("Irgot"), // Name: "someencodeditemID2",
LookupKey: "Irgot", // Data: exchMock.ContactBytes("Irgot"),
}, // LookupKey: "Irgot",
{ // },
Name: "someencodeditemID3", // {
Data: exchMock.ContactBytes("Jannes"), // Name: "someencodeditemID3",
LookupKey: "Jannes", // Data: exchMock.ContactBytes("Jannes"),
}, // LookupKey: "Jannes",
}, // },
}, // },
{ // },
PathElements: []string{"Personal"}, // },
Category: path.ContactsCategory, //},
Items: []stub.ItemInfo{ //{
{ // name: "MultipleContactsMultipleFolders",
Name: "someencodeditemID4", // service: path.ExchangeService,
Data: exchMock.ContactBytes("Argon"), // collections: []stub.ColInfo{
LookupKey: "Argon", // {
}, // PathElements: []string{"Work"},
{ // Category: path.ContactsCategory,
Name: "someencodeditemID5", // Items: []stub.ItemInfo{
Data: exchMock.ContactBytes("Bernard"), // {
LookupKey: "Bernard", // Name: "someencodeditemID",
}, // Data: exchMock.ContactBytes("Ghimley"),
}, // LookupKey: "Ghimley",
}, // },
}, // {
}, // Name: "someencodeditemID2",
// Data: exchMock.ContactBytes("Irgot"),
// LookupKey: "Irgot",
// },
// {
// Name: "someencodeditemID3",
// Data: exchMock.ContactBytes("Jannes"),
// LookupKey: "Jannes",
// },
// },
// },
// {
// PathElements: []string{"Personal"},
// Category: path.ContactsCategory,
// Items: []stub.ItemInfo{
// {
// Name: "someencodeditemID4",
// Data: exchMock.ContactBytes("Argon"),
// LookupKey: "Argon",
// },
// {
// Name: "someencodeditemID5",
// Data: exchMock.ContactBytes("Bernard"),
// LookupKey: "Bernard",
// },
// },
// },
// },
//},
// { // {
// name: "MultipleEventsSingleCalendar", // name: "MultipleEventsSingleCalendar",
// service: path.ExchangeService, // service: path.ExchangeService,
@ -1017,34 +1045,35 @@ func (suite *ControllerIntegrationSuite) TestRestoreAndBackup_core() {
func (suite *ControllerIntegrationSuite) TestMultiFolderBackupDifferentNames() { func (suite *ControllerIntegrationSuite) TestMultiFolderBackupDifferentNames() {
table := []restoreBackupInfo{ table := []restoreBackupInfo{
{ // TODO(ashmrtn): Re-enable when we can restore contacts to nested folders.
name: "Contacts", //{
service: path.ExchangeService, // name: "Contacts",
collections: []stub.ColInfo{ // service: path.ExchangeService,
{ // collections: []stub.ColInfo{
PathElements: []string{"Work"}, // {
Category: path.ContactsCategory, // PathElements: []string{"Work"},
Items: []stub.ItemInfo{ // Category: path.ContactsCategory,
{ // Items: []stub.ItemInfo{
Name: "someencodeditemID", // {
Data: exchMock.ContactBytes("Ghimley"), // Name: "someencodeditemID",
LookupKey: "Ghimley", // Data: exchMock.ContactBytes("Ghimley"),
}, // LookupKey: "Ghimley",
}, // },
}, // },
{ // },
PathElements: []string{"Personal"}, // {
Category: path.ContactsCategory, // PathElements: []string{"Personal"},
Items: []stub.ItemInfo{ // Category: path.ContactsCategory,
{ // Items: []stub.ItemInfo{
Name: "someencodeditemID2", // {
Data: exchMock.ContactBytes("Irgot"), // Name: "someencodeditemID2",
LookupKey: "Irgot", // Data: exchMock.ContactBytes("Irgot"),
}, // LookupKey: "Irgot",
}, // },
}, // },
}, // },
}, // },
//},
// { // {
// name: "Events", // name: "Events",
// service: path.ExchangeService, // service: path.ExchangeService,

View File

@ -70,6 +70,7 @@ const (
NoSPLicense errorMessage = "Tenant does not have a SPO license" NoSPLicense errorMessage = "Tenant does not have a SPO license"
parameterDeltaTokenNotSupported errorMessage = "Parameter 'DeltaToken' not supported for this request" parameterDeltaTokenNotSupported errorMessage = "Parameter 'DeltaToken' not supported for this request"
usersCannotBeResolved errorMessage = "One or more users could not be resolved" usersCannotBeResolved errorMessage = "One or more users could not be resolved"
requestedSiteCouldNotBeFound errorMessage = "Requested site could not be found"
) )
const ( const (
@ -259,6 +260,10 @@ func IsErrUsersCannotBeResolved(err error) bool {
return hasErrorCode(err, noResolvedUsers) || hasErrorMessage(err, usersCannotBeResolved) return hasErrorCode(err, noResolvedUsers) || hasErrorMessage(err, usersCannotBeResolved)
} }
func IsErrSiteNotFound(err error) bool {
return hasErrorMessage(err, requestedSiteCouldNotBeFound)
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// error parsers // error parsers
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------

View File

@ -628,6 +628,51 @@ func (suite *GraphErrorsUnitSuite) TestIsErrUsersCannotBeResolved() {
} }
} }
func (suite *GraphErrorsUnitSuite) TestIsErrSiteCouldNotBeFound() {
table := []struct {
name string
err error
expect assert.BoolAssertionFunc
}{
{
name: "nil",
err: nil,
expect: assert.False,
},
{
name: "non-matching",
err: assert.AnError,
expect: assert.False,
},
{
name: "non-matching oDataErr",
err: odErrMsg("InvalidRequest", "cant resolve sites"),
expect: assert.False,
},
{
name: "matching oDataErr msg",
err: odErrMsg("InvalidRequest", string(requestedSiteCouldNotBeFound)),
expect: assert.True,
},
// next two tests are to make sure the checks are case insensitive
{
name: "oDataErr uppercase",
err: odErrMsg("InvalidRequest", strings.ToUpper(string(requestedSiteCouldNotBeFound))),
expect: assert.True,
},
{
name: "oDataErr lowercase",
err: odErrMsg("InvalidRequest", strings.ToLower(string(requestedSiteCouldNotBeFound))),
expect: assert.True,
},
}
for _, test := range table {
suite.Run(test.name, func() {
test.expect(suite.T(), IsErrSiteNotFound(test.err))
})
}
}
func (suite *GraphErrorsUnitSuite) TestIsErrCannotOpenFileAttachment() { func (suite *GraphErrorsUnitSuite) TestIsErrCannotOpenFileAttachment() {
table := []struct { table := []struct {
name string name string

View File

@ -5,6 +5,7 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"io" "io"
"time"
"github.com/alcionai/clues" "github.com/alcionai/clues"
@ -16,7 +17,7 @@ import (
var ( var (
_ data.BackupCollection = &MetadataCollection{} _ data.BackupCollection = &MetadataCollection{}
_ data.Item = &MetadataItem{} _ data.Item = &metadataItem{}
) )
// MetadataCollection in a simple collection that assumes all items to be // MetadataCollection in a simple collection that assumes all items to be
@ -24,7 +25,7 @@ var (
// created. This collection has no logic for lazily fetching item data. // created. This collection has no logic for lazily fetching item data.
type MetadataCollection struct { type MetadataCollection struct {
fullPath path.Path fullPath path.Path
items []MetadataItem items []metadataItem
statusUpdater support.StatusUpdater statusUpdater support.StatusUpdater
} }
@ -40,23 +41,34 @@ func NewMetadataEntry(fileName string, mData any) MetadataCollectionEntry {
return MetadataCollectionEntry{fileName, mData} return MetadataCollectionEntry{fileName, mData}
} }
func (mce MetadataCollectionEntry) toMetadataItem() (MetadataItem, error) { func (mce MetadataCollectionEntry) toMetadataItem() (metadataItem, error) {
if len(mce.fileName) == 0 { if len(mce.fileName) == 0 {
return MetadataItem{}, clues.New("missing metadata filename") return metadataItem{}, clues.New("missing metadata filename")
} }
if mce.data == nil { if mce.data == nil {
return MetadataItem{}, clues.New("missing metadata") return metadataItem{}, clues.New("missing metadata")
} }
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
encoder := json.NewEncoder(buf) encoder := json.NewEncoder(buf)
if err := encoder.Encode(mce.data); err != nil { if err := encoder.Encode(mce.data); err != nil {
return MetadataItem{}, clues.Wrap(err, "serializing metadata") return metadataItem{}, clues.Wrap(err, "serializing metadata")
} }
return NewMetadataItem(mce.fileName, buf.Bytes()), nil item, err := data.NewUnindexedPrefetchedItem(
io.NopCloser(buf),
mce.fileName,
time.Now())
if err != nil {
return metadataItem{}, clues.Stack(err)
}
return metadataItem{
Item: item,
size: int64(buf.Len()),
}, nil
} }
// MakeMetadataCollection creates a metadata collection that has a file // MakeMetadataCollection creates a metadata collection that has a file
@ -71,7 +83,7 @@ func MakeMetadataCollection(
return nil, nil return nil, nil
} }
items := make([]MetadataItem, 0, len(metadata)) items := make([]metadataItem, 0, len(metadata))
for _, md := range metadata { for _, md := range metadata {
item, err := md.toMetadataItem() item, err := md.toMetadataItem()
@ -89,7 +101,7 @@ func MakeMetadataCollection(
func NewMetadataCollection( func NewMetadataCollection(
p path.Path, p path.Path,
items []MetadataItem, items []metadataItem,
statusUpdater support.StatusUpdater, statusUpdater support.StatusUpdater,
) *MetadataCollection { ) *MetadataCollection {
return &MetadataCollection{ return &MetadataCollection{
@ -148,7 +160,7 @@ func (md MetadataCollection) Items(
defer close(res) defer close(res)
for _, item := range md.items { for _, item := range md.items {
totalBytes += int64(len(item.data)) totalBytes += item.size
res <- item res <- item
} }
}() }()
@ -156,36 +168,7 @@ func (md MetadataCollection) Items(
return res return res
} }
// MetadataItem is an in-memory data.Item implementation. MetadataItem does type metadataItem struct {
// not implement additional interfaces like data.ItemInfo, so it should only data.Item
// be used for items with a small amount of content that don't need to be added size int64
// to backup details.
//
// Currently the expected use-case for this struct are storing metadata for a
// backup like delta tokens or a mapping of container IDs to container paths.
type MetadataItem struct {
// uuid is an ID that can be used to refer to the item.
uuid string
// data is a buffer of data that the item refers to.
data []byte
}
func NewMetadataItem(uuid string, itemData []byte) MetadataItem {
return MetadataItem{
uuid: uuid,
data: itemData,
}
}
func (mi MetadataItem) ID() string {
return mi.uuid
}
// TODO(ashmrtn): Fill in once we know how to handle this.
func (mi MetadataItem) Deleted() bool {
return false
}
func (mi MetadataItem) ToReader() io.ReadCloser {
return io.NopCloser(bytes.NewReader(mi.data))
} }

View File

@ -1,9 +1,11 @@
package graph package graph
import ( import (
"bytes"
"encoding/json" "encoding/json"
"io" "io"
"testing" "testing"
"time"
"github.com/alcionai/clues" "github.com/alcionai/clues"
"github.com/google/uuid" "github.com/google/uuid"
@ -11,6 +13,8 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/m365/support" "github.com/alcionai/corso/src/internal/m365/support"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/fault"
@ -63,10 +67,21 @@ func (suite *MetadataCollectionUnitSuite) TestItems() {
len(itemData), len(itemData),
"Requires same number of items and data") "Requires same number of items and data")
items := []MetadataItem{} items := []metadataItem{}
for i := 0; i < len(itemNames); i++ { for i := 0; i < len(itemNames); i++ {
items = append(items, NewMetadataItem(itemNames[i], itemData[i])) item, err := data.NewUnindexedPrefetchedItem(
io.NopCloser(bytes.NewReader(itemData[i])),
itemNames[i],
time.Time{})
require.NoError(t, err, clues.ToCore(err))
items = append(
items,
metadataItem{
Item: item,
size: int64(len(itemData[i])),
})
} }
p, err := path.Build( p, err := path.Build(
@ -92,7 +107,13 @@ func (suite *MetadataCollectionUnitSuite) TestItems() {
for s := range c.Items(ctx, fault.New(true)) { for s := range c.Items(ctx, fault.New(true)) {
gotNames = append(gotNames, s.ID()) gotNames = append(gotNames, s.ID())
buf, err := io.ReadAll(s.ToReader()) rr, err := readers.NewVersionedRestoreReader(s.ToReader())
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
assert.False(t, rr.Format().DelInFlight)
buf, err := io.ReadAll(rr)
if !assert.NoError(t, err, clues.ToCore(err)) { if !assert.NoError(t, err, clues.ToCore(err)) {
continue continue
} }
@ -193,11 +214,17 @@ func (suite *MetadataCollectionUnitSuite) TestMakeMetadataCollection() {
for item := range col.Items(ctx, fault.New(true)) { for item := range col.Items(ctx, fault.New(true)) {
assert.Equal(t, test.metadata.fileName, item.ID()) assert.Equal(t, test.metadata.fileName, item.ID())
rr, err := readers.NewVersionedRestoreReader(item.ToReader())
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
assert.False(t, rr.Format().DelInFlight)
gotMap := map[string]string{} gotMap := map[string]string{}
decoder := json.NewDecoder(item.ToReader()) decoder := json.NewDecoder(rr)
itemCount++ itemCount++
err := decoder.Decode(&gotMap) err = decoder.Decode(&gotMap)
if !assert.NoError(t, err, clues.ToCore(err)) { if !assert.NoError(t, err, clues.ToCore(err)) {
continue continue
} }

View File

@ -16,6 +16,7 @@ import (
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
"github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata" "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
odStub "github.com/alcionai/corso/src/internal/m365/service/onedrive/stub" odStub "github.com/alcionai/corso/src/internal/m365/service/onedrive/stub"
@ -573,7 +574,12 @@ func compareExchangeEmail(
expected map[string][]byte, expected map[string][]byte,
item data.Item, item data.Item,
) { ) {
itemData, err := io.ReadAll(item.ToReader()) rr := versionedReadWrapper(t, item.ToReader())
if rr == nil {
return
}
itemData, err := io.ReadAll(rr)
if !assert.NoError(t, err, "reading collection item", item.ID(), clues.ToCore(err)) { if !assert.NoError(t, err, "reading collection item", item.ID(), clues.ToCore(err)) {
return return
} }
@ -600,7 +606,12 @@ func compareExchangeContact(
expected map[string][]byte, expected map[string][]byte,
item data.Item, item data.Item,
) { ) {
itemData, err := io.ReadAll(item.ToReader()) rr := versionedReadWrapper(t, item.ToReader())
if rr == nil {
return
}
itemData, err := io.ReadAll(rr)
if !assert.NoError(t, err, "reading collection item", item.ID(), clues.ToCore(err)) { if !assert.NoError(t, err, "reading collection item", item.ID(), clues.ToCore(err)) {
return return
} }
@ -628,7 +639,12 @@ func compareExchangeEvent(
expected map[string][]byte, expected map[string][]byte,
item data.Item, item data.Item,
) { ) {
itemData, err := io.ReadAll(item.ToReader()) rr := versionedReadWrapper(t, item.ToReader())
if rr == nil {
return
}
itemData, err := io.ReadAll(rr)
if !assert.NoError(t, err, "reading collection item", item.ID(), clues.ToCore(err)) { if !assert.NoError(t, err, "reading collection item", item.ID(), clues.ToCore(err)) {
return return
} }
@ -718,7 +734,12 @@ func compareDriveItem(
return false return false
} }
buf, err := io.ReadAll(item.ToReader()) rr := versionedReadWrapper(t, item.ToReader())
if rr == nil {
return true
}
buf, err := io.ReadAll(rr)
if !assert.NoError(t, err, clues.ToCore(err)) { if !assert.NoError(t, err, clues.ToCore(err)) {
return true return true
} }
@ -751,10 +772,6 @@ func compareDriveItem(
} }
if isMeta { if isMeta {
var itemType *metadata.Item
assert.IsType(t, itemType, item)
var ( var (
itemMeta metadata.Metadata itemMeta metadata.Metadata
expectedMeta metadata.Metadata expectedMeta metadata.Metadata
@ -854,6 +871,29 @@ func compareDriveItem(
return true return true
} }
// versionedReaderWrapper strips out the version format header and checks it
// meets the current standard for all service types. If it doesn't meet the
// standard, returns nil. Else returns the versionedRestoreReader.
func versionedReadWrapper(
t *testing.T,
reader io.ReadCloser,
) io.ReadCloser {
rr, err := readers.NewVersionedRestoreReader(reader)
if !assert.NoError(t, err, clues.ToCore(err)) {
return nil
}
if !assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version) {
return nil
}
if !assert.False(t, rr.Format().DelInFlight) {
return nil
}
return rr
}
// compareItem compares the data returned by backup with the expected data. // compareItem compares the data returned by backup with the expected data.
// Returns true if a comparison was done else false. Bool return is mostly used // Returns true if a comparison was done else false. Bool return is mostly used
// to exclude OneDrive permissions for the root right now. // to exclude OneDrive permissions for the root right now.
@ -923,30 +963,9 @@ func checkHasCollections(
continue continue
} }
fp := g.FullPath()
loc := g.(data.LocationPather).LocationPath() loc := g.(data.LocationPather).LocationPath()
if fp.Service() == path.OneDriveService || gotNames = append(gotNames, loc.String())
(fp.Service() == path.SharePointService && fp.Category() == path.LibrariesCategory) {
dp, err := path.ToDrivePath(fp)
if !assert.NoError(t, err, clues.ToCore(err)) {
continue
}
loc = path.BuildDriveLocation(dp.DriveID, loc.Elements()...)
}
p, err := loc.ToDataLayerPath(
fp.Tenant(),
fp.ProtectedResource(),
fp.Service(),
fp.Category(),
false)
if !assert.NoError(t, err, clues.ToCore(err)) {
continue
}
gotNames = append(gotNames, p.String())
} }
assert.ElementsMatch(t, expectedNames, gotNames, "returned collections") assert.ElementsMatch(t, expectedNames, gotNames, "returned collections")
@ -967,14 +986,18 @@ func checkCollections(
for _, returned := range got { for _, returned := range got {
var ( var (
hasItems bool expectedColDataByLoc map[string][]byte
service = returned.FullPath().Service() hasItems bool
category = returned.FullPath().Category() service = returned.FullPath().Service()
expectedColData = expected[returned.FullPath().String()] category = returned.FullPath().Category()
folders = returned.FullPath().Elements() folders = returned.FullPath().Elements()
rootDir = folders[len(folders)-1] == mci.RestoreCfg.Location rootDir = folders[len(folders)-1] == mci.RestoreCfg.Location
) )
if p, ok := returned.(data.LocationPather); ok {
expectedColDataByLoc = expected[p.LocationPath().String()]
}
// Need to iterate through all items even if we don't expect to find a match // Need to iterate through all items even if we don't expect to find a match
// because otherwise we'll deadlock waiting for the status. Unexpected or // because otherwise we'll deadlock waiting for the status. Unexpected or
// missing collection paths will be reported by checkHasCollections. // missing collection paths will be reported by checkHasCollections.
@ -994,14 +1017,14 @@ func checkCollections(
hasItems = true hasItems = true
gotItems++ gotItems++
if expectedColData == nil { if expectedColDataByLoc == nil {
continue continue
} }
if !compareItem( if !compareItem(
t, t,
returned.FullPath(), returned.FullPath(),
expectedColData, expectedColDataByLoc,
service, service,
category, category,
item, item,

View File

@ -84,6 +84,7 @@ func (ctrl *Controller) ConsumeRestoreCollections(
rcc, rcc,
ctrl.AC, ctrl.AC,
ctrl.backupDriveIDNames, ctrl.backupDriveIDNames,
ctrl.backupSiteIDWebURL,
dcs, dcs,
deets, deets,
errs, errs,

View File

@ -22,6 +22,7 @@ import (
"github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/backup/identity" "github.com/alcionai/corso/src/pkg/backup/identity"
"github.com/alcionai/corso/src/pkg/backup/metadata" "github.com/alcionai/corso/src/pkg/backup/metadata"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/logger"
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
@ -35,19 +36,18 @@ func ProduceBackupCollections(
creds account.M365Config, creds account.M365Config,
su support.StatusUpdater, su support.StatusUpdater,
errs *fault.Bus, errs *fault.Bus,
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, bool, error) { ) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, error) {
b, err := bpc.Selector.ToGroupsBackup() b, err := bpc.Selector.ToGroupsBackup()
if err != nil { if err != nil {
return nil, nil, false, clues.Wrap(err, "groupsDataCollection: parsing selector") return nil, nil, clues.Wrap(err, "groupsDataCollection: parsing selector")
} }
var ( var (
el = errs.Local() el = errs.Local()
collections = []data.BackupCollection{} collections = []data.BackupCollection{}
categories = map[path.CategoryType]struct{}{} categories = map[path.CategoryType]struct{}{}
ssmb = prefixmatcher.NewStringSetBuilder() ssmb = prefixmatcher.NewStringSetBuilder()
canUsePreviousBackup bool sitesPreviousPaths = map[string]string{}
sitesPreviousPaths = map[string]string{}
) )
ctx = clues.Add( ctx = clues.Add(
@ -55,9 +55,12 @@ func ProduceBackupCollections(
"group_id", clues.Hide(bpc.ProtectedResource.ID()), "group_id", clues.Hide(bpc.ProtectedResource.ID()),
"group_name", clues.Hide(bpc.ProtectedResource.Name())) "group_name", clues.Hide(bpc.ProtectedResource.Name()))
group, err := ac.Groups().GetByID(ctx, bpc.ProtectedResource.ID()) group, err := ac.Groups().GetByID(
ctx,
bpc.ProtectedResource.ID(),
api.CallConfig{})
if err != nil { if err != nil {
return nil, nil, false, clues.Wrap(err, "getting group").WithClues(ctx) return nil, nil, clues.Wrap(err, "getting group").WithClues(ctx)
} }
isTeam := api.IsTeam(ctx, group) isTeam := api.IsTeam(ctx, group)
@ -76,12 +79,9 @@ func ProduceBackupCollections(
switch scope.Category().PathType() { switch scope.Category().PathType() {
case path.LibrariesCategory: case path.LibrariesCategory:
// TODO(meain): Private channels get a separate SharePoint sites, err := ac.Groups().GetAllSites(ctx, bpc.ProtectedResource.ID(), errs)
// site. We should also back those up and not just the
// default one.
resp, err := ac.Groups().GetRootSite(ctx, bpc.ProtectedResource.ID())
if err != nil { if err != nil {
return nil, nil, false, err return nil, nil, err
} }
siteMetadataCollection := map[string][]data.RestoreCollection{} siteMetadataCollection := map[string][]data.RestoreCollection{}
@ -92,39 +92,47 @@ func ProduceBackupCollections(
siteMetadataCollection[siteID] = append(siteMetadataCollection[siteID], c) siteMetadataCollection[siteID] = append(siteMetadataCollection[siteID], c)
} }
pr := idname.NewProvider(ptr.Val(resp.GetId()), ptr.Val(resp.GetName())) for _, s := range sites {
sbpc := inject.BackupProducerConfig{ pr := idname.NewProvider(ptr.Val(s.GetId()), ptr.Val(s.GetName()))
LastBackupVersion: bpc.LastBackupVersion, sbpc := inject.BackupProducerConfig{
Options: bpc.Options, LastBackupVersion: bpc.LastBackupVersion,
ProtectedResource: pr, Options: bpc.Options,
Selector: bpc.Selector, ProtectedResource: pr,
MetadataCollections: siteMetadataCollection[ptr.Val(resp.GetId())], Selector: bpc.Selector,
} MetadataCollections: siteMetadataCollection[ptr.Val(s.GetId())],
}
bh := drive.NewGroupBackupHandler( bh := drive.NewGroupBackupHandler(
bpc.ProtectedResource.ID(), bpc.ProtectedResource.ID(),
ptr.Val(resp.GetId()), ptr.Val(s.GetId()),
ac.Drives(), ac.Drives(),
scope) scope)
cp, err := bh.SitePathPrefix(creds.AzureTenantID) sp, err := bh.SitePathPrefix(creds.AzureTenantID)
if err != nil { if err != nil {
return nil, nil, false, clues.Wrap(err, "getting canonical path") return nil, nil, clues.Wrap(err, "getting site path")
} }
sitesPreviousPaths[ptr.Val(resp.GetId())] = cp.String() sitesPreviousPaths[ptr.Val(s.GetId())] = sp.String()
dbcs, canUsePreviousBackup, err = site.CollectLibraries( cs, canUsePreviousBackup, err := site.CollectLibraries(
ctx, ctx,
sbpc, sbpc,
bh, bh,
creds.AzureTenantID, creds.AzureTenantID,
ssmb, ssmb,
su, su,
errs) errs)
if err != nil { if err != nil {
el.AddRecoverable(ctx, err) el.AddRecoverable(ctx, err)
continue continue
}
if !canUsePreviousBackup {
dbcs = append(dbcs, data.NewTombstoneCollection(sp, control.Options{}))
}
dbcs = append(dbcs, cs...)
} }
case path.ChannelMessagesCategory: case path.ChannelMessagesCategory:
@ -132,10 +140,12 @@ func ProduceBackupCollections(
continue continue
} }
dbcs, canUsePreviousBackup, err = groups.CreateCollections( bh := groups.NewChannelBackupHandler(bpc.ProtectedResource.ID(), ac.Channels())
cs, canUsePreviousBackup, err := groups.CreateCollections(
ctx, ctx,
bpc, bpc,
groups.NewChannelBackupHandler(bpc.ProtectedResource.ID(), ac.Channels()), bh,
creds.AzureTenantID, creds.AzureTenantID,
scope, scope,
su, su,
@ -144,6 +154,17 @@ func ProduceBackupCollections(
el.AddRecoverable(ctx, err) el.AddRecoverable(ctx, err)
continue continue
} }
if !canUsePreviousBackup {
tp, err := bh.PathPrefix(creds.AzureTenantID)
if err != nil {
return nil, nil, clues.Wrap(err, "getting message path")
}
dbcs = append(dbcs, data.NewTombstoneCollection(tp, control.Options{}))
}
dbcs = append(dbcs, cs...)
} }
collections = append(collections, dbcs...) collections = append(collections, dbcs...)
@ -162,7 +183,7 @@ func ProduceBackupCollections(
su, su,
errs) errs)
if err != nil { if err != nil {
return nil, nil, false, err return nil, nil, err
} }
collections = append(collections, baseCols...) collections = append(collections, baseCols...)
@ -175,12 +196,12 @@ func ProduceBackupCollections(
sitesPreviousPaths, sitesPreviousPaths,
su) su)
if err != nil { if err != nil {
return nil, nil, false, err return nil, nil, err
} }
collections = append(collections, md) collections = append(collections, md)
return collections, ssmb.ToReader(), canUsePreviousBackup, el.Failure() return collections, ssmb.ToReader(), el.Failure()
} }
func getSitesMetadataCollection( func getSitesMetadataCollection(

View File

@ -7,18 +7,15 @@ import (
"github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/alcionai/corso/src/pkg/filters" "github.com/alcionai/corso/src/pkg/filters"
"github.com/alcionai/corso/src/pkg/services/m365/api"
) )
type getByIDer interface {
GetByID(ctx context.Context, identifier string) (models.Groupable, error)
}
func IsServiceEnabled( func IsServiceEnabled(
ctx context.Context, ctx context.Context,
gbi getByIDer, gbi api.GetByIDer[models.Groupable],
resource string, resource string,
) (bool, error) { ) (bool, error) {
resp, err := gbi.GetByID(ctx, resource) resp, err := gbi.GetByID(ctx, resource, api.CallConfig{})
if err != nil { if err != nil {
return false, clues.Wrap(err, "getting group").WithClues(ctx) return false, clues.Wrap(err, "getting group").WithClues(ctx)
} }

View File

@ -12,6 +12,7 @@ import (
"github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/m365/graph"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/services/m365/api"
) )
type EnabledUnitSuite struct { type EnabledUnitSuite struct {
@ -22,14 +23,18 @@ func TestEnabledUnitSuite(t *testing.T) {
suite.Run(t, &EnabledUnitSuite{Suite: tester.NewUnitSuite(t)}) suite.Run(t, &EnabledUnitSuite{Suite: tester.NewUnitSuite(t)})
} }
var _ getByIDer = mockGBI{} var _ api.GetByIDer[models.Groupable] = mockGBI{}
type mockGBI struct { type mockGBI struct {
group models.Groupable group models.Groupable
err error err error
} }
func (m mockGBI) GetByID(ctx context.Context, identifier string) (models.Groupable, error) { func (m mockGBI) GetByID(
ctx context.Context,
identifier string,
_ api.CallConfig,
) (models.Groupable, error) {
return m.group, m.err return m.group, m.err
} }
@ -56,13 +61,13 @@ func (suite *EnabledUnitSuite) TestIsServiceEnabled() {
table := []struct { table := []struct {
name string name string
mock func(context.Context) getByIDer mock func(context.Context) api.GetByIDer[models.Groupable]
expect assert.BoolAssertionFunc expect assert.BoolAssertionFunc
expectErr assert.ErrorAssertionFunc expectErr assert.ErrorAssertionFunc
}{ }{
{ {
name: "ok", name: "ok",
mock: func(ctx context.Context) getByIDer { mock: func(ctx context.Context) api.GetByIDer[models.Groupable] {
return mockGBI{ return mockGBI{
group: unified, group: unified,
} }
@ -72,7 +77,7 @@ func (suite *EnabledUnitSuite) TestIsServiceEnabled() {
}, },
{ {
name: "non-unified group", name: "non-unified group",
mock: func(ctx context.Context) getByIDer { mock: func(ctx context.Context) api.GetByIDer[models.Groupable] {
return mockGBI{ return mockGBI{
group: nonUnified, group: nonUnified,
} }
@ -82,7 +87,7 @@ func (suite *EnabledUnitSuite) TestIsServiceEnabled() {
}, },
{ {
name: "group not found", name: "group not found",
mock: func(ctx context.Context) getByIDer { mock: func(ctx context.Context) api.GetByIDer[models.Groupable] {
return mockGBI{ return mockGBI{
err: graph.Stack(ctx, odErrMsg(string(graph.RequestResourceNotFound), "message")), err: graph.Stack(ctx, odErrMsg(string(graph.RequestResourceNotFound), "message")),
} }
@ -92,7 +97,7 @@ func (suite *EnabledUnitSuite) TestIsServiceEnabled() {
}, },
{ {
name: "arbitrary error", name: "arbitrary error",
mock: func(ctx context.Context) getByIDer { mock: func(ctx context.Context) api.GetByIDer[models.Groupable] {
return mockGBI{ return mockGBI{
err: assert.AnError, err: assert.AnError,
} }

Some files were not shown because too many files have changed in this diff Show More