diff --git a/.github/actions/backup-restore-test/action.yml b/.github/actions/backup-restore-test/action.yml index 299243e6a..2603cab27 100644 --- a/.github/actions/backup-restore-test/action.yml +++ b/.github/actions/backup-restore-test/action.yml @@ -45,6 +45,9 @@ runs: shell: bash working-directory: src run: | + echo "---------------------------" + echo Backup ${{ inputs.service }} ${{ inputs.kind }} + echo "---------------------------" set -euo pipefail CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-backup-${{ inputs.service }}-${{inputs.kind }}.log ./corso backup create '${{ inputs.service }}' \ @@ -61,6 +64,9 @@ runs: shell: bash working-directory: src run: | + echo "---------------------------" + echo Restore ${{ inputs.service }} ${{ inputs.kind }} + echo "---------------------------" set -euo pipefail CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-restore-${{ inputs.service }}-${{inputs.kind }}.log ./corso restore '${{ inputs.service }}' \ @@ -85,11 +91,14 @@ runs: SANITY_TEST_KIND: restore SANITY_TEST_FOLDER: ${{ steps.restore.outputs.result }} SANITY_TEST_SERVICE: ${{ inputs.service }} - TEST_DATA: ${{ inputs.test-folder }} - BASE_BACKUP: ${{ inputs.base-backup }} + SANITY_TEST_DATA: ${{ inputs.test-folder }} + SANITY_BASE_BACKUP: ${{ inputs.base-backup }} run: | + echo "---------------------------" + echo Sanity Test Restore ${{ inputs.service }} ${{ inputs.kind }} + echo "---------------------------" CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-validate-${{ inputs.service }}-${{inputs.kind }}.log - ./sanity-test + ./sanity-test restore ${{ inputs.service }} - name: Export ${{ inputs.service }} ${{ inputs.kind }} if: inputs.with-export == true @@ -97,6 +106,9 @@ runs: shell: bash working-directory: src run: | + echo "---------------------------" + echo Export ${{ inputs.service }} ${{ inputs.kind }} + echo "---------------------------" set -euo pipefail CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-restore-${{ inputs.service }}-${{inputs.kind }}.log ./corso export '${{ inputs.service }}' \ @@ -116,11 +128,14 @@ runs: SANITY_TEST_KIND: export SANITY_TEST_FOLDER: /tmp/export-${{ inputs.service }}-${{inputs.kind }} SANITY_TEST_SERVICE: ${{ inputs.service }} - TEST_DATA: ${{ inputs.test-folder }} - BASE_BACKUP: ${{ inputs.base-backup }} + SANITY_TEST_DATA: ${{ inputs.test-folder }} + SANITY_BASE_BACKUP: ${{ inputs.base-backup }} run: | + echo "---------------------------" + echo Sanity-Test Export ${{ inputs.service }} ${{ inputs.kind }} + echo "---------------------------" CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-validate-${{ inputs.service }}-${{inputs.kind }}.log - ./sanity-test + ./sanity-test export ${{ inputs.service }} - name: Export archive ${{ inputs.service }} ${{ inputs.kind }} if: inputs.with-export == true @@ -128,6 +143,9 @@ runs: shell: bash working-directory: src run: | + echo "---------------------------" + echo Export Archive ${{ inputs.service }} ${{ inputs.kind }} + echo "---------------------------" set -euo pipefail CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-restore-${{ inputs.service }}-${{inputs.kind }}.log ./corso export '${{ inputs.service }}' \ @@ -150,16 +168,22 @@ runs: SANITY_TEST_KIND: export SANITY_TEST_FOLDER: /tmp/export-${{ inputs.service }}-${{inputs.kind }}-unzipped SANITY_TEST_SERVICE: ${{ inputs.service }} - TEST_DATA: ${{ inputs.test-folder }} - BASE_BACKUP: ${{ inputs.base-backup }} + SANITY_TEST_DATA: ${{ inputs.test-folder }} + SANITY_BASE_BACKUP: ${{ inputs.base-backup }} run: | + echo "---------------------------" + echo Sanity-Test Export Archive ${{ inputs.service }} ${{ inputs.kind }} + echo "---------------------------" CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-validate-${{ inputs.service }}-${{inputs.kind }}.log - ./sanity-test + ./sanity-test export ${{ inputs.service }} - name: List ${{ inputs.service }} ${{ inputs.kind }} shell: bash working-directory: src run: | + echo "---------------------------" + echo Backup list ${{ inputs.service }} ${{ inputs.kind }} + echo "---------------------------" set -euo pipefail CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-backup-list-${{ inputs.service }}-${{inputs.kind }}.log ./corso backup list ${{ inputs.service }} \ @@ -178,6 +202,9 @@ runs: shell: bash working-directory: src run: | + echo "---------------------------" + echo Backup List w/ Backup ${{ inputs.service }} ${{ inputs.kind }} + echo "---------------------------" set -euo pipefail CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-backup-list-single-${{ inputs.service }}-${{inputs.kind }}.log ./corso backup list ${{ inputs.service }} \ @@ -193,7 +220,13 @@ runs: exit 1 fi - # Upload the original go test output as an artifact for later review. + - if: always() + shell: bash + run: | + echo "---------------------------" + echo Logging Results + echo "---------------------------" + - name: Upload test log if: always() uses: actions/upload-artifact@v3 diff --git a/.github/actions/slack-message/action.yml b/.github/actions/slack-message/action.yml index 57091d430..d79ab6180 100644 --- a/.github/actions/slack-message/action.yml +++ b/.github/actions/slack-message/action.yml @@ -31,7 +31,7 @@ runs: - name: use url or blank val shell: bash run: | - echo "STEP=${{ github.action || '' }}" >> $GITHUB_ENV + echo "STEP=${{ env.trimmed_ref || '' }}" >> $GITHUB_ENV echo "JOB=${{ github.job || '' }}" >> $GITHUB_ENV echo "LOGS=${{ github.run_id && env.logurl || '-' }}" >> $GITHUB_ENV echo "COMMIT=${{ github.sha && env.commiturl || '-' }}" >> $GITHUB_ENV @@ -51,7 +51,7 @@ runs: "type": "section", "text": { "type": "mrkdwn", - "text": "${{ inputs.msg }} :: ${{ env.JOB }} - ${{ env.STEP }}\n${{ env.LOGS }} ${{ env.COMMIT }} ${{ env.REF }}" + "text": "${{ inputs.msg }}\n${{ env.JOB }} :: ${{ env.STEP }}\n${{ env.LOGS }} ${{ env.COMMIT }} ${{ env.REF }}" } } ] diff --git a/.github/workflows/sanity-test.yaml b/.github/workflows/sanity-test.yaml index 096924dba..53a0546d5 100644 --- a/.github/workflows/sanity-test.yaml +++ b/.github/workflows/sanity-test.yaml @@ -181,7 +181,7 @@ jobs: uses: ./.github/actions/backup-restore-test with: service: exchange - kind: initial + kind: first-backup backup-args: '--mailbox "${{ env.TEST_USER }}" --data "email"' restore-args: '--email-folder ${{ env.RESTORE_DEST_PFX }}${{ steps.repo-init.outputs.result }}' test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.repo-init.outputs.result }}' @@ -249,7 +249,7 @@ jobs: uses: ./.github/actions/backup-restore-test with: service: onedrive - kind: initial + kind: first-backup backup-args: '--user "${{ env.TEST_USER }}"' restore-args: '--folder ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-onedrive.outputs.result }}' test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-onedrive.outputs.result }}' @@ -305,7 +305,7 @@ jobs: uses: ./.github/actions/backup-restore-test with: service: sharepoint - kind: initial + kind: first-backup backup-args: '--site "${{ secrets.CORSO_M365_TEST_SITE_URL }}"' restore-args: '--folder ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-sharepoint.outputs.result }}' test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-sharepoint.outputs.result }}' @@ -362,12 +362,34 @@ jobs: uses: ./.github/actions/backup-restore-test with: service: groups - kind: initial + kind: first-backup backup-args: '--group "${{ vars.CORSO_M365_TEST_TEAM_ID }}"' test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }}' log-dir: ${{ env.CORSO_LOG_DIR }} - # TODO: incrementals + # generate some more enteries for incremental check + # - name: Groups - Create new data (for incremental) + # working-directory: ./src/cmd/factory + # run: | + # go run . sharepoint files \ + # --site ${{ secrets.CORSO_M365_TEST_GROUPS_SITE_URL }} \ + # --user ${{ env.TEST_USER }} \ + # --secondaryuser ${{ env.CORSO_SECONDARY_M365_TEST_USER_ID }} \ + # --tenant ${{ secrets.TENANT_ID }} \ + # --destination ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }} \ + # --count 4 + + # - name: Groups - Incremental backup + # id: groups-incremental + # uses: ./.github/actions/backup-restore-test + # with: + # service: groups + # kind: incremental + # backup-args: '--site "${{ secrets.CORSO_M365_TEST_GROUPS_SITE_URL }}"' + # restore-args: '--folder ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }}' + # test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }}' + # log-dir: ${{ env.CORSO_LOG_DIR }} + # with-export: true ########################################################################################################################################## diff --git a/CHANGELOG.md b/CHANGELOG.md index 0e8c12dfd..5e4fca312 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Reduce backup runtime for OneDrive and SharePoint incremental backups that have no file changes. - Increase Exchange backup performance by lazily fetching data only for items whose content changed. - Added `--backups` flag to delete multiple backups in `corso backup delete` command. +- Backup now includes all sites that belongs to a team, not just the root site. ## Fixed - Teams Channels that cannot support delta tokens (those without messages) fall back to non-delta enumeration and no longer fail a backup. diff --git a/src/cli/backup/backup.go b/src/cli/backup/backup.go index 71cb4595c..5d885e059 100644 --- a/src/cli/backup/backup.go +++ b/src/cli/backup/backup.go @@ -16,6 +16,8 @@ import ( "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/pkg/backup" + "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/repository" @@ -48,12 +50,12 @@ func AddCommands(cmd *cobra.Command) { for _, sc := range subCommandFuncs { subCommand := sc() - flags.AddAllProviderFlags(subCommand) - flags.AddAllStorageFlags(subCommand) backupC.AddCommand(subCommand) for _, addBackupTo := range serviceCommands { - addBackupTo(subCommand) + sc := addBackupTo(subCommand) + flags.AddAllProviderFlags(sc) + flags.AddAllStorageFlags(sc) } } } @@ -163,7 +165,7 @@ func handleDeleteCmd(cmd *cobra.Command, args []string) error { // standard set of selector behavior that we want used in the cli var defaultSelectorConfig = selectors.Config{OnlyMatchItemNames: true} -func runBackups( +func genericCreateCommand( ctx context.Context, r repository.Repositoryer, serviceName string, @@ -332,6 +334,65 @@ func genericListCommand( return nil } +func genericDetailsCommand( + cmd *cobra.Command, + backupID string, + sel selectors.Selector, +) (*details.Details, error) { + ctx := cmd.Context() + + r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.OneDriveService) + if err != nil { + return nil, clues.Stack(err) + } + + defer utils.CloseRepo(ctx, r) + + return genericDetailsCore( + ctx, + r, + backupID, + sel, + rdao.Opts) +} + +func genericDetailsCore( + ctx context.Context, + bg repository.BackupGetter, + backupID string, + sel selectors.Selector, + opts control.Options, +) (*details.Details, error) { + ctx = clues.Add(ctx, "backup_id", backupID) + + sel.Configure(selectors.Config{OnlyMatchItemNames: true}) + + d, _, errs := bg.GetBackupDetails(ctx, backupID) + // TODO: log/track recoverable errors + if errs.Failure() != nil { + if errors.Is(errs.Failure(), data.ErrNotFound) { + return nil, clues.New("no backup exists with the id " + backupID) + } + + return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository") + } + + if opts.SkipReduce { + return d, nil + } + + d, err := sel.Reduce(ctx, d, errs) + if err != nil { + return nil, clues.Wrap(err, "filtering backup details to selection") + } + + return d, nil +} + +// --------------------------------------------------------------------------- +// helper funcs +// --------------------------------------------------------------------------- + func ifShow(flag string) bool { return strings.ToLower(strings.TrimSpace(flag)) == "show" } diff --git a/src/cli/backup/backup_test.go b/src/cli/backup/backup_test.go new file mode 100644 index 000000000..4d70702ae --- /dev/null +++ b/src/cli/backup/backup_test.go @@ -0,0 +1,68 @@ +package backup + +import ( + "testing" + + "github.com/alcionai/clues" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/cli/utils/testdata" + "github.com/alcionai/corso/src/internal/tester" + dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata" + "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/selectors" +) + +type BackupUnitSuite struct { + tester.Suite +} + +func TestBackupUnitSuite(t *testing.T) { + suite.Run(t, &BackupUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *BackupUnitSuite) TestGenericDetailsCore() { + t := suite.T() + + expected := append( + append( + dtd.GetItemsForVersion( + t, + path.ExchangeService, + path.EmailCategory, + 0, + -1), + dtd.GetItemsForVersion( + t, + path.ExchangeService, + path.EventsCategory, + 0, + -1)...), + dtd.GetItemsForVersion( + t, + path.ExchangeService, + path.ContactsCategory, + 0, + -1)...) + + ctx, flush := tester.NewContext(t) + defer flush() + + bg := testdata.VersionedBackupGetter{ + Details: dtd.GetDetailsSetForVersion(t, 0), + } + + sel := selectors.NewExchangeBackup([]string{"user-id"}) + sel.Include(sel.AllData()) + + output, err := genericDetailsCore( + ctx, + bg, + "backup-ID", + sel.Selector, + control.DefaultOptions()) + assert.NoError(t, err, clues.ToCore(err)) + assert.ElementsMatch(t, expected, output.Entries) +} diff --git a/src/cli/backup/exchange.go b/src/cli/backup/exchange.go index d4f0d9534..d25cefff0 100644 --- a/src/cli/backup/exchange.go +++ b/src/cli/backup/exchange.go @@ -1,21 +1,15 @@ package backup import ( - "context" - "github.com/alcionai/clues" - "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/alcionai/corso/src/cli/flags" . "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" - "github.com/alcionai/corso/src/internal/data" - "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" - "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/selectors" ) @@ -182,7 +176,7 @@ func createExchangeCmd(cmd *cobra.Command, args []string) error { selectorSet = append(selectorSet, discSel.Selector) } - return runBackups( + return genericCreateCommand( ctx, r, "Exchange", @@ -272,74 +266,31 @@ func detailsExchangeCmd(cmd *cobra.Command, args []string) error { return nil } + return runDetailsExchangeCmd(cmd) +} + +func runDetailsExchangeCmd(cmd *cobra.Command) error { ctx := cmd.Context() opts := utils.MakeExchangeOpts(cmd) - r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.ExchangeService) + sel := utils.IncludeExchangeRestoreDataSelectors(opts) + sel.Configure(selectors.Config{OnlyMatchItemNames: true}) + utils.FilterExchangeRestoreInfoSelectors(sel, opts) + + ds, err := genericDetailsCommand(cmd, flags.BackupIDFV, sel.Selector) if err != nil { return Only(ctx, err) } - defer utils.CloseRepo(ctx, r) - - ds, err := runDetailsExchangeCmd( - ctx, - r, - flags.BackupIDFV, - opts, - rdao.Opts.SkipReduce) - if err != nil { - return Only(ctx, err) - } - - if len(ds.Entries) == 0 { + if len(ds.Entries) > 0 { + ds.PrintEntries(ctx) + } else { Info(ctx, selectors.ErrorNoMatchingItems) - return nil } - ds.PrintEntries(ctx) - return nil } -// runDetailsExchangeCmd actually performs the lookup in backup details. -// the fault.Errors return is always non-nil. Callers should check if -// errs.Failure() == nil. -func runDetailsExchangeCmd( - ctx context.Context, - r repository.BackupGetter, - backupID string, - opts utils.ExchangeOpts, - skipReduce bool, -) (*details.Details, error) { - if err := utils.ValidateExchangeRestoreFlags(backupID, opts); err != nil { - return nil, err - } - - ctx = clues.Add(ctx, "backup_id", backupID) - - d, _, errs := r.GetBackupDetails(ctx, backupID) - // TODO: log/track recoverable errors - if errs.Failure() != nil { - if errors.Is(errs.Failure(), data.ErrNotFound) { - return nil, clues.New("No backup exists with the id " + backupID) - } - - return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository") - } - - ctx = clues.Add(ctx, "details_entries", len(d.Entries)) - - if !skipReduce { - sel := utils.IncludeExchangeRestoreDataSelectors(opts) - sel.Configure(selectors.Config{OnlyMatchItemNames: true}) - utils.FilterExchangeRestoreInfoSelectors(sel, opts) - d = sel.Reduce(ctx, d, errs) - } - - return d, nil -} - // ------------------------------------------------------------------------------------------------ // backup delete // ------------------------------------------------------------------------------------------------ diff --git a/src/cli/backup/exchange_e2e_test.go b/src/cli/backup/exchange_e2e_test.go index 2175a50e6..0807addc6 100644 --- a/src/cli/backup/exchange_e2e_test.go +++ b/src/cli/backup/exchange_e2e_test.go @@ -55,7 +55,7 @@ func (suite *NoBackupExchangeE2ESuite) SetupSuite() { defer flush() suite.its = newIntegrationTesterSetup(t) - suite.dpnd = prepM365Test(t, ctx) + suite.dpnd = prepM365Test(t, ctx, path.ExchangeService) } func (suite *NoBackupExchangeE2ESuite) TestExchangeBackupListCmd_noBackups() { @@ -109,7 +109,7 @@ func (suite *BackupExchangeE2ESuite) SetupSuite() { defer flush() suite.its = newIntegrationTesterSetup(t) - suite.dpnd = prepM365Test(t, ctx) + suite.dpnd = prepM365Test(t, ctx, path.ExchangeService) } func (suite *BackupExchangeE2ESuite) TestExchangeBackupCmd_email() { @@ -336,7 +336,7 @@ func (suite *PreparedBackupExchangeE2ESuite) SetupSuite() { defer flush() suite.its = newIntegrationTesterSetup(t) - suite.dpnd = prepM365Test(t, ctx) + suite.dpnd = prepM365Test(t, ctx, path.ExchangeService) suite.backupOps = make(map[path.CategoryType]string) var ( @@ -579,7 +579,7 @@ func (suite *BackupDeleteExchangeE2ESuite) SetupSuite() { ctx, flush := tester.NewContext(t) defer flush() - suite.dpnd = prepM365Test(t, ctx) + suite.dpnd = prepM365Test(t, ctx, path.ExchangeService) m365UserID := tconfig.M365UserID(t) users := []string{m365UserID} diff --git a/src/cli/backup/exchange_test.go b/src/cli/backup/exchange_test.go index b04f27f07..1ed8f718e 100644 --- a/src/cli/backup/exchange_test.go +++ b/src/cli/backup/exchange_test.go @@ -1,8 +1,6 @@ package backup import ( - "bytes" - "fmt" "strconv" "testing" @@ -14,11 +12,9 @@ import ( "github.com/alcionai/corso/src/cli/flags" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" + cliTD "github.com/alcionai/corso/src/cli/testdata" "github.com/alcionai/corso/src/cli/utils" - utilsTD "github.com/alcionai/corso/src/cli/utils/testdata" "github.com/alcionai/corso/src/internal/tester" - "github.com/alcionai/corso/src/internal/version" - dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata" "github.com/alcionai/corso/src/pkg/control" ) @@ -92,76 +88,46 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() { func (suite *ExchangeUnitSuite) TestBackupCreateFlags() { t := suite.T() - cmd := &cobra.Command{Use: createCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addExchangeCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - exchangeServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: createCommand}, + addExchangeCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) + flagsTD.WithFlags( + exchangeServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.MailBoxFN, flagsTD.FlgInputs(flagsTD.MailboxInput), + "--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.ExchangeCategoryDataInput), + "--" + flags.FetchParallelismFN, flagsTD.FetchParallelism, + "--" + flags.DeltaPageSizeFN, flagsTD.DeltaPageSize, - // Test arg parsing for few args - args := []string{ - exchangeServiceCommand, - "--" + flags.RunModeFN, flags.RunModeFlagTest, - - "--" + flags.MailBoxFN, flagsTD.FlgInputs(flagsTD.MailboxInput), - "--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.ExchangeCategoryDataInput), - - "--" + flags.FetchParallelismFN, flagsTD.FetchParallelism, - "--" + flags.DeltaPageSizeFN, flagsTD.DeltaPageSize, - - // bool flags - "--" + flags.FailFastFN, - "--" + flags.DisableIncrementalsFN, - "--" + flags.ForceItemDataDownloadFN, - "--" + flags.DisableDeltaFN, - "--" + flags.EnableImmutableIDFN, - "--" + flags.DisableConcurrencyLimiterFN, - } - - args = append(args, flagsTD.PreparedProviderFlags()...) - args = append(args, flagsTD.PreparedStorageFlags()...) - - cmd.SetArgs(args) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + // bool flags + "--" + flags.FailFastFN, + "--" + flags.DisableIncrementalsFN, + "--" + flags.ForceItemDataDownloadFN, + "--" + flags.DisableDeltaFN, + "--" + flags.EnableImmutableIDFN, + "--" + flags.DisableConcurrencyLimiterFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) opts := utils.MakeExchangeOpts(cmd) co := utils.Control() assert.ElementsMatch(t, flagsTD.MailboxInput, opts.Users) - // no assertion for category data input - assert.Equal(t, flagsTD.FetchParallelism, strconv.Itoa(co.Parallelism.ItemFetch)) assert.Equal(t, flagsTD.DeltaPageSize, strconv.Itoa(int(co.DeltaPageSize))) - - // bool flags assert.Equal(t, control.FailFast, co.FailureHandling) assert.True(t, co.ToggleFeatures.DisableIncrementals) assert.True(t, co.ToggleFeatures.ForceItemDataDownload) assert.True(t, co.ToggleFeatures.DisableDelta) assert.True(t, co.ToggleFeatures.ExchangeImmutableIDs) assert.True(t, co.ToggleFeatures.DisableConcurrencyLimiter) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) } @@ -169,36 +135,25 @@ func (suite *ExchangeUnitSuite) TestBackupCreateFlags() { func (suite *ExchangeUnitSuite) TestBackupListFlags() { t := suite.T() - cmd := &cobra.Command{Use: listCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addExchangeCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - exchangeServiceCommand, []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: listCommand}, + addExchangeCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedBackupListFlags(), - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + exchangeServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + }, + flagsTD.PreparedBackupListFlags(), + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) - flagsTD.AssertBackupListFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) @@ -207,41 +162,28 @@ func (suite *ExchangeUnitSuite) TestBackupListFlags() { func (suite *ExchangeUnitSuite) TestBackupDetailsFlags() { t := suite.T() - cmd := &cobra.Command{Use: detailsCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addExchangeCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - exchangeServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, - "--" + flags.SkipReduceFN, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: detailsCommand}, + addExchangeCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + exchangeServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + "--" + flags.SkipReduceFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) co := utils.Control() assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) - assert.True(t, co.SkipReduce) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) } @@ -249,36 +191,24 @@ func (suite *ExchangeUnitSuite) TestBackupDetailsFlags() { func (suite *ExchangeUnitSuite) TestBackupDeleteFlags() { t := suite.T() - cmd := &cobra.Command{Use: deleteCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addExchangeCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - exchangeServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: deleteCommand}, + addExchangeCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + exchangeServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) } @@ -434,51 +364,3 @@ func (suite *ExchangeUnitSuite) TestExchangeBackupCreateSelectors() { }) } } - -func (suite *ExchangeUnitSuite) TestExchangeBackupDetailsSelectors() { - for v := 0; v <= version.Backup; v++ { - suite.Run(fmt.Sprintf("version%d", v), func() { - for _, test := range utilsTD.ExchangeOptionDetailLookups { - suite.Run(test.Name, func() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - bg := utilsTD.VersionedBackupGetter{ - Details: dtd.GetDetailsSetForVersion(t, v), - } - - output, err := runDetailsExchangeCmd( - ctx, - bg, - "backup-ID", - test.Opts(t, v), - false) - assert.NoError(t, err, clues.ToCore(err)) - assert.ElementsMatch(t, test.Expected(t, v), output.Entries) - }) - } - }) - } -} - -func (suite *ExchangeUnitSuite) TestExchangeBackupDetailsSelectorsBadFormats() { - for _, test := range utilsTD.BadExchangeOptionsFormats { - suite.Run(test.Name, func() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - output, err := runDetailsExchangeCmd( - ctx, - test.BackupGetter, - "backup-ID", - test.Opts(t, version.Backup), - false) - assert.Error(t, err, clues.ToCore(err)) - assert.Empty(t, output) - }) - } -} diff --git a/src/cli/backup/groups.go b/src/cli/backup/groups.go index c8be220f3..d834e5f29 100644 --- a/src/cli/backup/groups.go +++ b/src/cli/backup/groups.go @@ -2,7 +2,6 @@ package backup import ( "context" - "errors" "fmt" "github.com/alcionai/clues" @@ -14,12 +13,9 @@ import ( . "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/common/idname" - "github.com/alcionai/corso/src/internal/data" - "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/filters" "github.com/alcionai/corso/src/pkg/path" - "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/services/m365" ) @@ -174,7 +170,7 @@ func createGroupsCmd(cmd *cobra.Command, args []string) error { selectorSet = append(selectorSet, discSel.Selector) } - return runBackups( + return genericCreateCommand( ctx, r, "Group", @@ -225,74 +221,31 @@ func detailsGroupsCmd(cmd *cobra.Command, args []string) error { return nil } + return runDetailsGroupsCmd(cmd) +} + +func runDetailsGroupsCmd(cmd *cobra.Command) error { ctx := cmd.Context() opts := utils.MakeGroupsOpts(cmd) - r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.GroupsService) + sel := utils.IncludeGroupsRestoreDataSelectors(ctx, opts) + sel.Configure(selectors.Config{OnlyMatchItemNames: true}) + utils.FilterGroupsRestoreInfoSelectors(sel, opts) + + ds, err := genericDetailsCommand(cmd, flags.BackupIDFV, sel.Selector) if err != nil { return Only(ctx, err) } - defer utils.CloseRepo(ctx, r) - - ds, err := runDetailsGroupsCmd( - ctx, - r, - flags.BackupIDFV, - opts, - rdao.Opts.SkipReduce) - if err != nil { - return Only(ctx, err) - } - - if len(ds.Entries) == 0 { + if len(ds.Entries) > 0 { + ds.PrintEntries(ctx) + } else { Info(ctx, selectors.ErrorNoMatchingItems) - return nil } - ds.PrintEntries(ctx) - return nil } -// runDetailsGroupsCmd actually performs the lookup in backup details. -// the fault.Errors return is always non-nil. Callers should check if -// errs.Failure() == nil. -func runDetailsGroupsCmd( - ctx context.Context, - r repository.BackupGetter, - backupID string, - opts utils.GroupsOpts, - skipReduce bool, -) (*details.Details, error) { - if err := utils.ValidateGroupsRestoreFlags(backupID, opts); err != nil { - return nil, err - } - - ctx = clues.Add(ctx, "backup_id", backupID) - - d, _, errs := r.GetBackupDetails(ctx, backupID) - // TODO: log/track recoverable errors - if errs.Failure() != nil { - if errors.Is(errs.Failure(), data.ErrNotFound) { - return nil, clues.New("no backup exists with the id " + backupID) - } - - return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository") - } - - ctx = clues.Add(ctx, "details_entries", len(d.Entries)) - - if !skipReduce { - sel := utils.IncludeGroupsRestoreDataSelectors(ctx, opts) - sel.Configure(selectors.Config{OnlyMatchItemNames: true}) - utils.FilterGroupsRestoreInfoSelectors(sel, opts) - d = sel.Reduce(ctx, d, errs) - } - - return d, nil -} - // ------------------------------------------------------------------------------------------------ // backup delete // ------------------------------------------------------------------------------------------------ diff --git a/src/cli/backup/groups_e2e_test.go b/src/cli/backup/groups_e2e_test.go index 986979a4f..87ef93d2b 100644 --- a/src/cli/backup/groups_e2e_test.go +++ b/src/cli/backup/groups_e2e_test.go @@ -56,7 +56,7 @@ func (suite *NoBackupGroupsE2ESuite) SetupSuite() { defer flush() suite.its = newIntegrationTesterSetup(t) - suite.dpnd = prepM365Test(t, ctx) + suite.dpnd = prepM365Test(t, ctx, path.GroupsService) } func (suite *NoBackupGroupsE2ESuite) TestGroupsBackupListCmd_noBackups() { @@ -110,7 +110,7 @@ func (suite *BackupGroupsE2ESuite) SetupSuite() { defer flush() suite.its = newIntegrationTesterSetup(t) - suite.dpnd = prepM365Test(t, ctx) + suite.dpnd = prepM365Test(t, ctx, path.GroupsService) } func (suite *BackupGroupsE2ESuite) TestGroupsBackupCmd_channelMessages() { @@ -287,7 +287,7 @@ func (suite *PreparedBackupGroupsE2ESuite) SetupSuite() { defer flush() suite.its = newIntegrationTesterSetup(t) - suite.dpnd = prepM365Test(t, ctx) + suite.dpnd = prepM365Test(t, ctx, path.GroupsService) suite.backupOps = make(map[path.CategoryType]string) var ( @@ -515,7 +515,7 @@ func (suite *BackupDeleteGroupsE2ESuite) SetupSuite() { ctx, flush := tester.NewContext(t) defer flush() - suite.dpnd = prepM365Test(t, ctx) + suite.dpnd = prepM365Test(t, ctx, path.GroupsService) m365GroupID := tconfig.M365GroupID(t) groups := []string{m365GroupID} diff --git a/src/cli/backup/groups_test.go b/src/cli/backup/groups_test.go index 8829915c4..996a9126f 100644 --- a/src/cli/backup/groups_test.go +++ b/src/cli/backup/groups_test.go @@ -1,7 +1,6 @@ package backup import ( - "bytes" "strconv" "testing" @@ -13,6 +12,7 @@ import ( "github.com/alcionai/corso/src/cli/flags" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" + cliTD "github.com/alcionai/corso/src/cli/testdata" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/control" @@ -128,70 +128,38 @@ func (suite *GroupsUnitSuite) TestValidateGroupsBackupCreateFlags() { func (suite *GroupsUnitSuite) TestBackupCreateFlags() { t := suite.T() - cmd := &cobra.Command{Use: createCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addGroupsCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - groupsServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: createCommand}, + addGroupsCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - // Test arg parsing for few args - args := []string{ - groupsServiceCommand, - "--" + flags.RunModeFN, flags.RunModeFlagTest, - - "--" + flags.GroupFN, flagsTD.FlgInputs(flagsTD.GroupsInput), - "--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.GroupsCategoryDataInput), - - "--" + flags.FetchParallelismFN, flagsTD.FetchParallelism, - - // bool flags - "--" + flags.FailFastFN, - "--" + flags.DisableIncrementalsFN, - "--" + flags.ForceItemDataDownloadFN, - "--" + flags.DisableDeltaFN, - } - - args = append(args, flagsTD.PreparedProviderFlags()...) - args = append(args, flagsTD.PreparedStorageFlags()...) - - cmd.SetArgs(args) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + groupsServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.GroupFN, flagsTD.FlgInputs(flagsTD.GroupsInput), + "--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.GroupsCategoryDataInput), + "--" + flags.FetchParallelismFN, flagsTD.FetchParallelism, + "--" + flags.FailFastFN, + "--" + flags.DisableIncrementalsFN, + "--" + flags.ForceItemDataDownloadFN, + "--" + flags.DisableDeltaFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) opts := utils.MakeGroupsOpts(cmd) co := utils.Control() assert.ElementsMatch(t, flagsTD.GroupsInput, opts.Groups) - // no assertion for category data input - assert.Equal(t, flagsTD.FetchParallelism, strconv.Itoa(co.Parallelism.ItemFetch)) - - // bool flags assert.Equal(t, control.FailFast, co.FailureHandling) assert.True(t, co.ToggleFeatures.DisableIncrementals) assert.True(t, co.ToggleFeatures.ForceItemDataDownload) assert.True(t, co.ToggleFeatures.DisableDelta) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) } @@ -199,37 +167,25 @@ func (suite *GroupsUnitSuite) TestBackupCreateFlags() { func (suite *GroupsUnitSuite) TestBackupListFlags() { t := suite.T() - cmd := &cobra.Command{Use: listCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addGroupsCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - groupsServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: listCommand}, + addGroupsCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedBackupListFlags(), - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + groupsServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + }, + flagsTD.PreparedBackupListFlags(), + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) - flagsTD.AssertBackupListFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) @@ -238,41 +194,28 @@ func (suite *GroupsUnitSuite) TestBackupListFlags() { func (suite *GroupsUnitSuite) TestBackupDetailsFlags() { t := suite.T() - cmd := &cobra.Command{Use: detailsCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addGroupsCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - groupsServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, - "--" + flags.SkipReduceFN, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: detailsCommand}, + addGroupsCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + groupsServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + "--" + flags.SkipReduceFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) co := utils.Control() assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) - assert.True(t, co.SkipReduce) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) } @@ -280,48 +223,24 @@ func (suite *GroupsUnitSuite) TestBackupDetailsFlags() { func (suite *GroupsUnitSuite) TestBackupDeleteFlags() { t := suite.T() - cmd := &cobra.Command{Use: deleteCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addGroupsCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - groupsServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: deleteCommand}, + addGroupsCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - // Test arg parsing for few args - args := []string{ - groupsServiceCommand, - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, - } - - args = append(args, flagsTD.PreparedProviderFlags()...) - args = append(args, flagsTD.PreparedStorageFlags()...) - - cmd.SetArgs(args) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + groupsServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) } diff --git a/src/cli/backup/helpers_test.go b/src/cli/backup/helpers_test.go index e7a59f361..e3023f834 100644 --- a/src/cli/backup/helpers_test.go +++ b/src/cli/backup/helpers_test.go @@ -21,7 +21,7 @@ import ( "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/control" - ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository" + "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/services/m365/api" "github.com/alcionai/corso/src/pkg/services/m365/api/mock" @@ -133,6 +133,7 @@ type dependencies struct { func prepM365Test( t *testing.T, ctx context.Context, //revive:disable-line:context-as-argument + pst path.ServiceType, ) dependencies { var ( acct = tconfig.NewM365Account(t) @@ -140,11 +141,9 @@ func prepM365Test( recorder = strings.Builder{} ) - sc, err := st.StorageConfig() + cfg, err := st.ToS3Config() require.NoError(t, err, clues.ToCore(err)) - cfg := sc.(*storage.S3Config) - force := map[string]string{ tconfig.TestCfgAccountProvider: account.ProviderM365.String(), tconfig.TestCfgStorageProvider: storage.ProviderS3.String(), @@ -162,7 +161,9 @@ func prepM365Test( repository.NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = repo.Initialize(ctx, ctrlRepo.Retention{}) + err = repo.Initialize(ctx, repository.InitConfig{ + Service: pst, + }) require.NoError(t, err, clues.ToCore(err)) return dependencies{ diff --git a/src/cli/backup/onedrive.go b/src/cli/backup/onedrive.go index fa8170f64..54d479b7c 100644 --- a/src/cli/backup/onedrive.go +++ b/src/cli/backup/onedrive.go @@ -1,21 +1,15 @@ package backup import ( - "context" - "github.com/alcionai/clues" - "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/alcionai/corso/src/cli/flags" . "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" - "github.com/alcionai/corso/src/internal/data" - "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" - "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/selectors" ) @@ -162,7 +156,7 @@ func createOneDriveCmd(cmd *cobra.Command, args []string) error { selectorSet = append(selectorSet, discSel.Selector) } - return runBackups( + return genericCreateCommand( ctx, r, "OneDrive", @@ -229,74 +223,31 @@ func detailsOneDriveCmd(cmd *cobra.Command, args []string) error { return nil } + return runDetailsOneDriveCmd(cmd) +} + +func runDetailsOneDriveCmd(cmd *cobra.Command) error { ctx := cmd.Context() opts := utils.MakeOneDriveOpts(cmd) - r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.OneDriveService) + sel := utils.IncludeOneDriveRestoreDataSelectors(opts) + sel.Configure(selectors.Config{OnlyMatchItemNames: true}) + utils.FilterOneDriveRestoreInfoSelectors(sel, opts) + + ds, err := genericDetailsCommand(cmd, flags.BackupIDFV, sel.Selector) if err != nil { return Only(ctx, err) } - defer utils.CloseRepo(ctx, r) - - ds, err := runDetailsOneDriveCmd( - ctx, - r, - flags.BackupIDFV, - opts, - rdao.Opts.SkipReduce) - if err != nil { - return Only(ctx, err) - } - - if len(ds.Entries) == 0 { + if len(ds.Entries) > 0 { + ds.PrintEntries(ctx) + } else { Info(ctx, selectors.ErrorNoMatchingItems) - return nil } - ds.PrintEntries(ctx) - return nil } -// runDetailsOneDriveCmd actually performs the lookup in backup details. -// the fault.Errors return is always non-nil. Callers should check if -// errs.Failure() == nil. -func runDetailsOneDriveCmd( - ctx context.Context, - r repository.BackupGetter, - backupID string, - opts utils.OneDriveOpts, - skipReduce bool, -) (*details.Details, error) { - if err := utils.ValidateOneDriveRestoreFlags(backupID, opts); err != nil { - return nil, err - } - - ctx = clues.Add(ctx, "backup_id", backupID) - - d, _, errs := r.GetBackupDetails(ctx, backupID) - // TODO: log/track recoverable errors - if errs.Failure() != nil { - if errors.Is(errs.Failure(), data.ErrNotFound) { - return nil, clues.New("no backup exists with the id " + backupID) - } - - return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository") - } - - ctx = clues.Add(ctx, "details_entries", len(d.Entries)) - - if !skipReduce { - sel := utils.IncludeOneDriveRestoreDataSelectors(opts) - sel.Configure(selectors.Config{OnlyMatchItemNames: true}) - utils.FilterOneDriveRestoreInfoSelectors(sel, opts) - d = sel.Reduce(ctx, d, errs) - } - - return d, nil -} - // `corso backup delete onedrive [...]` func oneDriveDeleteCmd() *cobra.Command { return &cobra.Command{ diff --git a/src/cli/backup/onedrive_e2e_test.go b/src/cli/backup/onedrive_e2e_test.go index f4b2c0bdc..a2bac18b6 100644 --- a/src/cli/backup/onedrive_e2e_test.go +++ b/src/cli/backup/onedrive_e2e_test.go @@ -20,6 +20,7 @@ import ( "github.com/alcionai/corso/src/internal/operations" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester/tconfig" + "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" selTD "github.com/alcionai/corso/src/pkg/selectors/testdata" storeTD "github.com/alcionai/corso/src/pkg/storage/testdata" @@ -48,7 +49,7 @@ func (suite *NoBackupOneDriveE2ESuite) SetupSuite() { ctx, flush := tester.NewContext(t) defer flush() - suite.dpnd = prepM365Test(t, ctx) + suite.dpnd = prepM365Test(t, ctx, path.OneDriveService) } func (suite *NoBackupOneDriveE2ESuite) TestOneDriveBackupListCmd_empty() { @@ -139,7 +140,7 @@ func (suite *BackupDeleteOneDriveE2ESuite) SetupSuite() { ctx, flush := tester.NewContext(t) defer flush() - suite.dpnd = prepM365Test(t, ctx) + suite.dpnd = prepM365Test(t, ctx, path.OneDriveService) var ( m365UserID = tconfig.M365UserID(t) diff --git a/src/cli/backup/onedrive_test.go b/src/cli/backup/onedrive_test.go index 340f598dc..8c1bb583f 100644 --- a/src/cli/backup/onedrive_test.go +++ b/src/cli/backup/onedrive_test.go @@ -1,8 +1,6 @@ package backup import ( - "bytes" - "fmt" "testing" "github.com/alcionai/clues" @@ -13,11 +11,9 @@ import ( "github.com/alcionai/corso/src/cli/flags" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" + cliTD "github.com/alcionai/corso/src/cli/testdata" "github.com/alcionai/corso/src/cli/utils" - utilsTD "github.com/alcionai/corso/src/cli/utils/testdata" "github.com/alcionai/corso/src/internal/tester" - "github.com/alcionai/corso/src/internal/version" - dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata" "github.com/alcionai/corso/src/pkg/control" ) @@ -92,48 +88,33 @@ func (suite *OneDriveUnitSuite) TestAddOneDriveCommands() { func (suite *OneDriveUnitSuite) TestBackupCreateFlags() { t := suite.T() - cmd := &cobra.Command{Use: createCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addOneDriveCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - oneDriveServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.UserFN, flagsTD.FlgInputs(flagsTD.UsersInput), - "--" + flags.FailFastFN, - "--" + flags.DisableIncrementalsFN, - "--" + flags.ForceItemDataDownloadFN, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: createCommand}, + addOneDriveCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + oneDriveServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.UserFN, flagsTD.FlgInputs(flagsTD.UsersInput), + "--" + flags.FailFastFN, + "--" + flags.DisableIncrementalsFN, + "--" + flags.ForceItemDataDownloadFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) opts := utils.MakeOneDriveOpts(cmd) co := utils.Control() assert.ElementsMatch(t, flagsTD.UsersInput, opts.Users) - // no assertion for category data input - - // bool flags assert.Equal(t, control.FailFast, co.FailureHandling) assert.True(t, co.ToggleFeatures.DisableIncrementals) assert.True(t, co.ToggleFeatures.ForceItemDataDownload) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) } @@ -141,37 +122,25 @@ func (suite *OneDriveUnitSuite) TestBackupCreateFlags() { func (suite *OneDriveUnitSuite) TestBackupListFlags() { t := suite.T() - cmd := &cobra.Command{Use: listCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addOneDriveCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - oneDriveServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: listCommand}, + addOneDriveCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedBackupListFlags(), - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + oneDriveServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + }, + flagsTD.PreparedBackupListFlags(), + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) - flagsTD.AssertBackupListFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) @@ -180,41 +149,28 @@ func (suite *OneDriveUnitSuite) TestBackupListFlags() { func (suite *OneDriveUnitSuite) TestBackupDetailsFlags() { t := suite.T() - cmd := &cobra.Command{Use: detailsCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addOneDriveCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - oneDriveServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, - "--" + flags.SkipReduceFN, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: detailsCommand}, + addOneDriveCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + oneDriveServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + "--" + flags.SkipReduceFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) co := utils.Control() - assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) - assert.True(t, co.SkipReduce) - + assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) } @@ -222,36 +178,24 @@ func (suite *OneDriveUnitSuite) TestBackupDetailsFlags() { func (suite *OneDriveUnitSuite) TestBackupDeleteFlags() { t := suite.T() - cmd := &cobra.Command{Use: deleteCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addOneDriveCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - oneDriveServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: deleteCommand}, + addOneDriveCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + oneDriveServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) } @@ -279,51 +223,3 @@ func (suite *OneDriveUnitSuite) TestValidateOneDriveBackupCreateFlags() { }) } } - -func (suite *OneDriveUnitSuite) TestOneDriveBackupDetailsSelectors() { - for v := 0; v <= version.Backup; v++ { - suite.Run(fmt.Sprintf("version%d", v), func() { - for _, test := range utilsTD.OneDriveOptionDetailLookups { - suite.Run(test.Name, func() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - bg := utilsTD.VersionedBackupGetter{ - Details: dtd.GetDetailsSetForVersion(t, v), - } - - output, err := runDetailsOneDriveCmd( - ctx, - bg, - "backup-ID", - test.Opts(t, v), - false) - assert.NoError(t, err, clues.ToCore(err)) - assert.ElementsMatch(t, test.Expected(t, v), output.Entries) - }) - } - }) - } -} - -func (suite *OneDriveUnitSuite) TestOneDriveBackupDetailsSelectorsBadFormats() { - for _, test := range utilsTD.BadOneDriveOptionsFormats { - suite.Run(test.Name, func() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - output, err := runDetailsOneDriveCmd( - ctx, - test.BackupGetter, - "backup-ID", - test.Opts(t, version.Backup), - false) - assert.Error(t, err, clues.ToCore(err)) - assert.Empty(t, output) - }) - } -} diff --git a/src/cli/backup/sharepoint.go b/src/cli/backup/sharepoint.go index 507a4a6d2..bfeefaa54 100644 --- a/src/cli/backup/sharepoint.go +++ b/src/cli/backup/sharepoint.go @@ -4,7 +4,6 @@ import ( "context" "github.com/alcionai/clues" - "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/pflag" "golang.org/x/exp/slices" @@ -13,12 +12,9 @@ import ( . "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/common/idname" - "github.com/alcionai/corso/src/internal/data" - "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/filters" "github.com/alcionai/corso/src/pkg/path" - "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/services/m365" ) @@ -179,7 +175,7 @@ func createSharePointCmd(cmd *cobra.Command, args []string) error { selectorSet = append(selectorSet, discSel.Selector) } - return runBackups( + return genericCreateCommand( ctx, r, "SharePoint", @@ -303,7 +299,7 @@ func deleteSharePointCmd(cmd *cobra.Command, args []string) error { // backup details // ------------------------------------------------------------------------------------------------ -// `corso backup details onedrive [...]` +// `corso backup details SharePoint [...]` func sharePointDetailsCmd() *cobra.Command { return &cobra.Command{ Use: sharePointServiceCommand, @@ -324,70 +320,27 @@ func detailsSharePointCmd(cmd *cobra.Command, args []string) error { return nil } + return runDetailsSharePointCmd(cmd) +} + +func runDetailsSharePointCmd(cmd *cobra.Command) error { ctx := cmd.Context() opts := utils.MakeSharePointOpts(cmd) - r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.SharePointService) + sel := utils.IncludeSharePointRestoreDataSelectors(ctx, opts) + sel.Configure(selectors.Config{OnlyMatchItemNames: true}) + utils.FilterSharePointRestoreInfoSelectors(sel, opts) + + ds, err := genericDetailsCommand(cmd, flags.BackupIDFV, sel.Selector) if err != nil { return Only(ctx, err) } - defer utils.CloseRepo(ctx, r) - - ds, err := runDetailsSharePointCmd( - ctx, - r, - flags.BackupIDFV, - opts, - rdao.Opts.SkipReduce) - if err != nil { - return Only(ctx, err) - } - - if len(ds.Entries) == 0 { + if len(ds.Entries) > 0 { + ds.PrintEntries(ctx) + } else { Info(ctx, selectors.ErrorNoMatchingItems) - return nil } - ds.PrintEntries(ctx) - return nil } - -// runDetailsSharePointCmd actually performs the lookup in backup details. -// the fault.Errors return is always non-nil. Callers should check if -// errs.Failure() == nil. -func runDetailsSharePointCmd( - ctx context.Context, - r repository.BackupGetter, - backupID string, - opts utils.SharePointOpts, - skipReduce bool, -) (*details.Details, error) { - if err := utils.ValidateSharePointRestoreFlags(backupID, opts); err != nil { - return nil, err - } - - ctx = clues.Add(ctx, "backup_id", backupID) - - d, _, errs := r.GetBackupDetails(ctx, backupID) - // TODO: log/track recoverable errors - if errs.Failure() != nil { - if errors.Is(errs.Failure(), data.ErrNotFound) { - return nil, clues.New("no backup exists with the id " + backupID) - } - - return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository") - } - - ctx = clues.Add(ctx, "details_entries", len(d.Entries)) - - if !skipReduce { - sel := utils.IncludeSharePointRestoreDataSelectors(ctx, opts) - sel.Configure(selectors.Config{OnlyMatchItemNames: true}) - utils.FilterSharePointRestoreInfoSelectors(sel, opts) - d = sel.Reduce(ctx, d, errs) - } - - return d, nil -} diff --git a/src/cli/backup/sharepoint_e2e_test.go b/src/cli/backup/sharepoint_e2e_test.go index bfb67f85a..7d7728020 100644 --- a/src/cli/backup/sharepoint_e2e_test.go +++ b/src/cli/backup/sharepoint_e2e_test.go @@ -20,6 +20,7 @@ import ( "github.com/alcionai/corso/src/internal/operations" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester/tconfig" + "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/selectors/testdata" storeTD "github.com/alcionai/corso/src/pkg/storage/testdata" @@ -46,7 +47,7 @@ func (suite *NoBackupSharePointE2ESuite) SetupSuite() { ctx, flush := tester.NewContext(t) defer flush() - suite.dpnd = prepM365Test(t, ctx) + suite.dpnd = prepM365Test(t, ctx, path.SharePointService) } func (suite *NoBackupSharePointE2ESuite) TestSharePointBackupListCmd_empty() { @@ -103,7 +104,7 @@ func (suite *BackupDeleteSharePointE2ESuite) SetupSuite() { ctx, flush := tester.NewContext(t) defer flush() - suite.dpnd = prepM365Test(t, ctx) + suite.dpnd = prepM365Test(t, ctx, path.SharePointService) var ( m365SiteID = tconfig.M365SiteID(t) diff --git a/src/cli/backup/sharepoint_test.go b/src/cli/backup/sharepoint_test.go index fd724d83b..f018a7ba2 100644 --- a/src/cli/backup/sharepoint_test.go +++ b/src/cli/backup/sharepoint_test.go @@ -1,8 +1,6 @@ package backup import ( - "bytes" - "fmt" "strings" "testing" @@ -14,12 +12,10 @@ import ( "github.com/alcionai/corso/src/cli/flags" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" + cliTD "github.com/alcionai/corso/src/cli/testdata" "github.com/alcionai/corso/src/cli/utils" - utilsTD "github.com/alcionai/corso/src/cli/utils/testdata" "github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/internal/tester" - "github.com/alcionai/corso/src/internal/version" - dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/selectors" ) @@ -94,51 +90,36 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() { func (suite *SharePointUnitSuite) TestBackupCreateFlags() { t := suite.T() - cmd := &cobra.Command{Use: createCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addSharePointCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - sharePointServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.SiteIDFN, flagsTD.FlgInputs(flagsTD.SiteIDInput), - "--" + flags.SiteFN, flagsTD.FlgInputs(flagsTD.WebURLInput), - "--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.SharepointCategoryDataInput), - "--" + flags.FailFastFN, - "--" + flags.DisableIncrementalsFN, - "--" + flags.ForceItemDataDownloadFN, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: createCommand}, + addSharePointCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + sharePointServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.SiteIDFN, flagsTD.FlgInputs(flagsTD.SiteIDInput), + "--" + flags.SiteFN, flagsTD.FlgInputs(flagsTD.WebURLInput), + "--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.SharepointCategoryDataInput), + "--" + flags.FailFastFN, + "--" + flags.DisableIncrementalsFN, + "--" + flags.ForceItemDataDownloadFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) opts := utils.MakeSharePointOpts(cmd) co := utils.Control() assert.ElementsMatch(t, []string{strings.Join(flagsTD.SiteIDInput, ",")}, opts.SiteID) assert.ElementsMatch(t, flagsTD.WebURLInput, opts.WebURL) - // no assertion for category data input - - // bool flags assert.Equal(t, control.FailFast, co.FailureHandling) assert.True(t, co.ToggleFeatures.DisableIncrementals) assert.True(t, co.ToggleFeatures.ForceItemDataDownload) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) } @@ -146,37 +127,25 @@ func (suite *SharePointUnitSuite) TestBackupCreateFlags() { func (suite *SharePointUnitSuite) TestBackupListFlags() { t := suite.T() - cmd := &cobra.Command{Use: listCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addSharePointCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - sharePointServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: listCommand}, + addSharePointCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedBackupListFlags(), - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + sharePointServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + }, + flagsTD.PreparedBackupListFlags(), + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) - flagsTD.AssertBackupListFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) @@ -185,41 +154,28 @@ func (suite *SharePointUnitSuite) TestBackupListFlags() { func (suite *SharePointUnitSuite) TestBackupDetailsFlags() { t := suite.T() - cmd := &cobra.Command{Use: detailsCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addSharePointCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - sharePointServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, - "--" + flags.SkipReduceFN, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: detailsCommand}, + addSharePointCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + sharePointServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + "--" + flags.SkipReduceFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) co := utils.Control() assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) - assert.True(t, co.SkipReduce) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) } @@ -227,36 +183,24 @@ func (suite *SharePointUnitSuite) TestBackupDetailsFlags() { func (suite *SharePointUnitSuite) TestBackupDeleteFlags() { t := suite.T() - cmd := &cobra.Command{Use: deleteCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addSharePointCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - sharePointServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: deleteCommand}, + addSharePointCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + sharePointServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) } @@ -391,51 +335,3 @@ func (suite *SharePointUnitSuite) TestSharePointBackupCreateSelectors() { }) } } - -func (suite *SharePointUnitSuite) TestSharePointBackupDetailsSelectors() { - for v := 0; v <= version.Backup; v++ { - suite.Run(fmt.Sprintf("version%d", v), func() { - for _, test := range utilsTD.SharePointOptionDetailLookups { - suite.Run(test.Name, func() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - bg := utilsTD.VersionedBackupGetter{ - Details: dtd.GetDetailsSetForVersion(t, v), - } - - output, err := runDetailsSharePointCmd( - ctx, - bg, - "backup-ID", - test.Opts(t, v), - false) - assert.NoError(t, err, clues.ToCore(err)) - assert.ElementsMatch(t, test.Expected(t, v), output.Entries) - }) - } - }) - } -} - -func (suite *SharePointUnitSuite) TestSharePointBackupDetailsSelectorsBadFormats() { - for _, test := range utilsTD.BadSharePointOptionsFormats { - suite.Run(test.Name, func() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - output, err := runDetailsSharePointCmd( - ctx, - test.BackupGetter, - "backup-ID", - test.Opts(t, version.Backup), - false) - assert.Error(t, err, clues.ToCore(err)) - assert.Empty(t, output) - }) - } -} diff --git a/src/cli/config/account.go b/src/cli/config/account.go index 8d87880d9..22a481b57 100644 --- a/src/cli/config/account.go +++ b/src/cli/config/account.go @@ -54,7 +54,7 @@ func configureAccount( if matchFromConfig { providerType := vpr.GetString(account.AccountProviderTypeKey) if providerType != account.ProviderM365.String() { - return acct, clues.New("unsupported account provider: " + providerType) + return acct, clues.New("unsupported account provider: [" + providerType + "]") } if err := mustMatchConfig(vpr, m365Overrides(overrides)); err != nil { diff --git a/src/cli/config/config.go b/src/cli/config/config.go index df8342ed1..6eab83fea 100644 --- a/src/cli/config/config.go +++ b/src/cli/config/config.go @@ -279,8 +279,7 @@ func getStorageAndAccountWithViper( // possibly read the prior config from a .corso file if readFromFile { - err = vpr.ReadInConfig() - if err != nil { + if err := vpr.ReadInConfig(); err != nil { if _, ok := err.(viper.ConfigFileNotFoundError); !ok { return config, clues.Wrap(err, "reading corso config file: "+vpr.ConfigFileUsed()) } diff --git a/src/cli/config/config_test.go b/src/cli/config/config_test.go index bccc79601..5d9fc42ce 100644 --- a/src/cli/config/config_test.go +++ b/src/cli/config/config_test.go @@ -356,10 +356,9 @@ func (suite *ConfigSuite) TestReadFromFlags() { m365Config, _ := repoDetails.Account.M365Config() - sc, err := repoDetails.Storage.StorageConfig() + s3Cfg, err := repoDetails.Storage.ToS3Config() require.NoError(t, err, "reading s3 config from storage", clues.ToCore(err)) - s3Cfg := sc.(*storage.S3Config) commonConfig, _ := repoDetails.Storage.CommonConfig() pass := commonConfig.Corso.CorsoPassphrase @@ -425,17 +424,21 @@ func (suite *ConfigIntegrationSuite) TestGetStorageAndAccount() { err = writeRepoConfigWithViper(vpr, s3Cfg, m365, repository.Options{}, "repoid") require.NoError(t, err, "writing repo config", clues.ToCore(err)) + require.Equal( + t, + account.ProviderM365.String(), + vpr.GetString(account.AccountProviderTypeKey), + "viper should have m365 as the account provider") + err = vpr.ReadInConfig() require.NoError(t, err, "reading repo config", clues.ToCore(err)) cfg, err := getStorageAndAccountWithViper(vpr, storage.ProviderS3, true, true, nil) require.NoError(t, err, "getting storage and account from config", clues.ToCore(err)) - sc, err := cfg.Storage.StorageConfig() + readS3Cfg, err := cfg.Storage.ToS3Config() require.NoError(t, err, "reading s3 config from storage", clues.ToCore(err)) - readS3Cfg := sc.(*storage.S3Config) - assert.Equal(t, readS3Cfg.Bucket, s3Cfg.Bucket) assert.Equal(t, readS3Cfg.Endpoint, s3Cfg.Endpoint) assert.Equal(t, readS3Cfg.Prefix, s3Cfg.Prefix) @@ -482,11 +485,9 @@ func (suite *ConfigIntegrationSuite) TestGetStorageAndAccount_noFileOnlyOverride cfg, err := getStorageAndAccountWithViper(vpr, storage.ProviderS3, false, true, overrides) require.NoError(t, err, "getting storage and account from config", clues.ToCore(err)) - sc, err := cfg.Storage.StorageConfig() + readS3Cfg, err := cfg.Storage.ToS3Config() require.NoError(t, err, "reading s3 config from storage", clues.ToCore(err)) - readS3Cfg := sc.(*storage.S3Config) - assert.Equal(t, readS3Cfg.Bucket, bkt) assert.Equal(t, cfg.RepoID, "") assert.Equal(t, readS3Cfg.Endpoint, end) diff --git a/src/cli/export/export.go b/src/cli/export/export.go index db48f466a..8415caea3 100644 --- a/src/cli/export/export.go +++ b/src/cli/export/export.go @@ -27,11 +27,11 @@ var exportCommands = []func(cmd *cobra.Command) *cobra.Command{ // AddCommands attaches all `corso export * *` commands to the parent. func AddCommands(cmd *cobra.Command) { subCommand := exportCmd() - flags.AddAllStorageFlags(subCommand) cmd.AddCommand(subCommand) for _, addExportTo := range exportCommands { - addExportTo(subCommand) + sc := addExportTo(subCommand) + flags.AddAllStorageFlags(sc) } } diff --git a/src/cli/export/groups_test.go b/src/cli/export/groups_test.go index 0f53bb6f8..3b75f0252 100644 --- a/src/cli/export/groups_test.go +++ b/src/cli/export/groups_test.go @@ -1,17 +1,15 @@ package export import ( - "bytes" "testing" - "github.com/alcionai/clues" "github.com/spf13/cobra" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/alcionai/corso/src/cli/flags" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" + cliTD "github.com/alcionai/corso/src/cli/testdata" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/tester" ) @@ -39,55 +37,41 @@ func (suite *GroupsUnitSuite) TestAddGroupsCommands() { for _, test := range table { suite.Run(test.name, func() { t := suite.T() + parent := &cobra.Command{Use: exportCommand} - cmd := &cobra.Command{Use: test.use} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addGroupsCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - cmds := cmd.Commands() - require.Len(t, cmds, 1) - - child := cmds[0] - assert.Equal(t, test.expectUse, child.Use) - assert.Equal(t, test.expectShort, child.Short) - tester.AreSameFunc(t, test.expectRunE, child.RunE) - - flagsTD.WithFlags( - cmd, - groupsServiceCommand, - []string{ - flagsTD.RestoreDestination, - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, - - "--" + flags.FormatFN, flagsTD.FormatType, - - // bool flags - "--" + flags.ArchiveFN, + cmd := cliTD.SetUpCmdHasFlags( + t, + parent, + addGroupsCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) + flagsTD.WithFlags( + groupsServiceCommand, + []string{ + flagsTD.RestoreDestination, + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + "--" + flags.FormatFN, flagsTD.FormatType, + "--" + flags.ArchiveFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + cliTD.CheckCmdChild( + t, + parent, + 3, + test.expectUse, + test.expectShort, + test.expectRunE) opts := utils.MakeGroupsOpts(cmd) - assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) + assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) assert.Equal(t, flagsTD.Archive, opts.ExportCfg.Archive) assert.Equal(t, flagsTD.FormatType, opts.ExportCfg.Format) - flagsTD.AssertStorageFlags(t, cmd) }) } diff --git a/src/cli/export/onedrive_test.go b/src/cli/export/onedrive_test.go index 2049234ae..0afe6c437 100644 --- a/src/cli/export/onedrive_test.go +++ b/src/cli/export/onedrive_test.go @@ -1,17 +1,15 @@ package export import ( - "bytes" "testing" - "github.com/alcionai/clues" "github.com/spf13/cobra" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/alcionai/corso/src/cli/flags" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" + cliTD "github.com/alcionai/corso/src/cli/testdata" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/tester" ) @@ -39,67 +37,55 @@ func (suite *OneDriveUnitSuite) TestAddOneDriveCommands() { for _, test := range table { suite.Run(test.name, func() { t := suite.T() + parent := &cobra.Command{Use: exportCommand} - cmd := &cobra.Command{Use: test.use} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addOneDriveCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - cmds := cmd.Commands() - require.Len(t, cmds, 1) - - child := cmds[0] - assert.Equal(t, test.expectUse, child.Use) - assert.Equal(t, test.expectShort, child.Short) - tester.AreSameFunc(t, test.expectRunE, child.RunE) - - flagsTD.WithFlags( - cmd, - oneDriveServiceCommand, - []string{ - flagsTD.RestoreDestination, - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, - "--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput), - "--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput), - "--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput, - "--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput, - "--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput, - "--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput, - - "--" + flags.FormatFN, flagsTD.FormatType, - - // bool flags - "--" + flags.ArchiveFN, + cmd := cliTD.SetUpCmdHasFlags( + t, + parent, + addOneDriveCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) + flagsTD.WithFlags( + oneDriveServiceCommand, + []string{ + flagsTD.RestoreDestination, + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + "--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput), + "--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput), + "--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput, + "--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput, + "--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput, + "--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput, - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output + "--" + flags.FormatFN, flagsTD.FormatType, - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + // bool flags + "--" + flags.ArchiveFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) + + cliTD.CheckCmdChild( + t, + parent, + 3, + test.expectUse, + test.expectShort, + test.expectRunE) opts := utils.MakeOneDriveOpts(cmd) - assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) + assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName) assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath) assert.Equal(t, flagsTD.FileCreatedAfterInput, opts.FileCreatedAfter) assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore) assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter) assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore) - assert.Equal(t, flagsTD.CorsoPassphrase, flags.CorsoPassphraseFV) - flagsTD.AssertStorageFlags(t, cmd) }) } diff --git a/src/cli/export/sharepoint_test.go b/src/cli/export/sharepoint_test.go index affb060e1..4850173ca 100644 --- a/src/cli/export/sharepoint_test.go +++ b/src/cli/export/sharepoint_test.go @@ -1,17 +1,15 @@ package export import ( - "bytes" "testing" - "github.com/alcionai/clues" "github.com/spf13/cobra" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/alcionai/corso/src/cli/flags" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" + cliTD "github.com/alcionai/corso/src/cli/testdata" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/tester" ) @@ -39,63 +37,50 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() { for _, test := range table { suite.Run(test.name, func() { t := suite.T() + parent := &cobra.Command{Use: exportCommand} - cmd := &cobra.Command{Use: test.use} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addSharePointCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - cmds := cmd.Commands() - require.Len(t, cmds, 1) - - child := cmds[0] - assert.Equal(t, test.expectUse, child.Use) - assert.Equal(t, test.expectShort, child.Short) - tester.AreSameFunc(t, test.expectRunE, child.RunE) - - flagsTD.WithFlags( - cmd, - sharePointServiceCommand, - []string{ - flagsTD.RestoreDestination, - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, - "--" + flags.LibraryFN, flagsTD.LibraryInput, - "--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput), - "--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput), - "--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput, - "--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput, - "--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput, - "--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput, - "--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput), - "--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput), - "--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput), - "--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput), - - "--" + flags.FormatFN, flagsTD.FormatType, - - // bool flags - "--" + flags.ArchiveFN, + cmd := cliTD.SetUpCmdHasFlags( + t, + parent, + addSharePointCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) + flagsTD.WithFlags( + sharePointServiceCommand, + []string{ + flagsTD.RestoreDestination, + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + "--" + flags.LibraryFN, flagsTD.LibraryInput, + "--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput), + "--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput), + "--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput, + "--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput, + "--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput, + "--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput, + "--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput), + "--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput), + "--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput), + "--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput), + "--" + flags.FormatFN, flagsTD.FormatType, + "--" + flags.ArchiveFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + cliTD.CheckCmdChild( + t, + parent, + 3, + test.expectUse, + test.expectShort, + test.expectRunE) opts := utils.MakeSharePointOpts(cmd) - assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) + assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) assert.Equal(t, flagsTD.LibraryInput, opts.Library) assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName) assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath) @@ -103,16 +88,12 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() { assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore) assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter) assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore) - assert.ElementsMatch(t, flagsTD.ListItemInput, opts.ListItem) assert.ElementsMatch(t, flagsTD.ListFolderInput, opts.ListFolder) - assert.ElementsMatch(t, flagsTD.PageInput, opts.Page) assert.ElementsMatch(t, flagsTD.PageFolderInput, opts.PageFolder) - assert.Equal(t, flagsTD.Archive, opts.ExportCfg.Archive) assert.Equal(t, flagsTD.FormatType, opts.ExportCfg.Format) - flagsTD.AssertStorageFlags(t, cmd) }) } diff --git a/src/cli/flags/testdata/backup_list.go b/src/cli/flags/testdata/backup_list.go index 911a6b450..82b08646f 100644 --- a/src/cli/flags/testdata/backup_list.go +++ b/src/cli/flags/testdata/backup_list.go @@ -3,9 +3,10 @@ package testdata import ( "testing" - "github.com/alcionai/corso/src/cli/flags" "github.com/spf13/cobra" "gotest.tools/v3/assert" + + "github.com/alcionai/corso/src/cli/flags" ) func PreparedBackupListFlags() []string { diff --git a/src/cli/flags/testdata/flags.go b/src/cli/flags/testdata/flags.go index c8339cf73..7dec134f4 100644 --- a/src/cli/flags/testdata/flags.go +++ b/src/cli/flags/testdata/flags.go @@ -86,7 +86,7 @@ var ( DisableConcurrencyLimiter = true ) -func WithFlags( +func WithFlags2( cc *cobra.Command, command string, flagSets ...[]string, @@ -99,3 +99,18 @@ func WithFlags( cc.SetArgs(args) } + +func WithFlags( + command string, + flagSets ...[]string, +) func(*cobra.Command) { + return func(cc *cobra.Command) { + args := []string{command} + + for _, sl := range flagSets { + args = append(args, sl...) + } + + cc.SetArgs(args) + } +} diff --git a/src/cli/repo/filesystem.go b/src/cli/repo/filesystem.go index ef03d3657..f6a495f21 100644 --- a/src/cli/repo/filesystem.go +++ b/src/cli/repo/filesystem.go @@ -85,7 +85,7 @@ func initFilesystemCmd(cmd *cobra.Command, args []string) error { opt := utils.ControlWithConfig(cfg) // Retention is not supported for filesystem repos. - retention := ctrlRepo.Retention{} + retentionOpts := ctrlRepo.Retention{} // SendStartCorsoEvent uses distict ID as tenant ID because repoID is still not generated utils.SendStartCorsoEvent( @@ -96,13 +96,11 @@ func initFilesystemCmd(cmd *cobra.Command, args []string) error { cfg.Account.ID(), opt) - sc, err := cfg.Storage.StorageConfig() + storageCfg, err := cfg.Storage.ToFilesystemConfig() if err != nil { return Only(ctx, clues.Wrap(err, "Retrieving filesystem configuration")) } - storageCfg := sc.(*storage.FilesystemConfig) - m365, err := cfg.Account.M365Config() if err != nil { return Only(ctx, clues.Wrap(err, "Failed to parse m365 account config")) @@ -118,19 +116,27 @@ func initFilesystemCmd(cmd *cobra.Command, args []string) error { return Only(ctx, clues.Wrap(err, "Failed to construct the repository controller")) } - if err = r.Initialize(ctx, retention); err != nil { + ric := repository.InitConfig{RetentionOpts: retentionOpts} + + if err = r.Initialize(ctx, ric); err != nil { if flags.SucceedIfExistsFV && errors.Is(err, repository.ErrorRepoAlreadyExists) { return nil } - return Only(ctx, clues.Wrap(err, "Failed to initialize a new filesystem repository")) + return Only(ctx, clues.Stack(ErrInitializingRepo, err)) } defer utils.CloseRepo(ctx, r) Infof(ctx, "Initialized a repository at path %s", storageCfg.Path) - if err = config.WriteRepoConfig(ctx, sc, m365, opt.Repo, r.GetID()); err != nil { + err = config.WriteRepoConfig( + ctx, + storageCfg, + m365, + opt.Repo, + r.GetID()) + if err != nil { return Only(ctx, clues.Wrap(err, "Failed to write repository configuration")) } @@ -181,13 +187,11 @@ func connectFilesystemCmd(cmd *cobra.Command, args []string) error { repoID = events.RepoIDNotFound } - sc, err := cfg.Storage.StorageConfig() + storageCfg, err := cfg.Storage.ToFilesystemConfig() if err != nil { return Only(ctx, clues.Wrap(err, "Retrieving filesystem configuration")) } - storageCfg := sc.(*storage.FilesystemConfig) - m365, err := cfg.Account.M365Config() if err != nil { return Only(ctx, clues.Wrap(err, "Failed to parse m365 account config")) @@ -205,15 +209,21 @@ func connectFilesystemCmd(cmd *cobra.Command, args []string) error { return Only(ctx, clues.Wrap(err, "Failed to create a repository controller")) } - if err := r.Connect(ctx); err != nil { - return Only(ctx, clues.Wrap(err, "Failed to connect to the filesystem repository")) + if err := r.Connect(ctx, repository.ConnConfig{}); err != nil { + return Only(ctx, clues.Stack(ErrConnectingRepo, err)) } defer utils.CloseRepo(ctx, r) Infof(ctx, "Connected to repository at path %s", storageCfg.Path) - if err = config.WriteRepoConfig(ctx, sc, m365, opts.Repo, r.GetID()); err != nil { + err = config.WriteRepoConfig( + ctx, + storageCfg, + m365, + opts.Repo, + r.GetID()) + if err != nil { return Only(ctx, clues.Wrap(err, "Failed to write repository configuration")) } diff --git a/src/cli/repo/filesystem_e2e_test.go b/src/cli/repo/filesystem_e2e_test.go index 514d0120b..faeb5a5b1 100644 --- a/src/cli/repo/filesystem_e2e_test.go +++ b/src/cli/repo/filesystem_e2e_test.go @@ -16,7 +16,6 @@ import ( "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/control" - ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository" "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/storage" storeTD "github.com/alcionai/corso/src/pkg/storage/testdata" @@ -56,9 +55,8 @@ func (suite *FilesystemE2ESuite) TestInitFilesystemCmd() { st := storeTD.NewFilesystemStorage(t) - sc, err := st.StorageConfig() + cfg, err := st.ToFilesystemConfig() require.NoError(t, err, clues.ToCore(err)) - cfg := sc.(*storage.FilesystemConfig) force := map[string]string{ tconfig.TestCfgStorageProvider: storage.ProviderFilesystem.String(), @@ -113,9 +111,8 @@ func (suite *FilesystemE2ESuite) TestConnectFilesystemCmd() { defer flush() st := storeTD.NewFilesystemStorage(t) - sc, err := st.StorageConfig() + cfg, err := st.ToFilesystemConfig() require.NoError(t, err, clues.ToCore(err)) - cfg := sc.(*storage.FilesystemConfig) force := map[string]string{ tconfig.TestCfgAccountProvider: account.ProviderM365.String(), @@ -134,13 +131,13 @@ func (suite *FilesystemE2ESuite) TestConnectFilesystemCmd() { // init the repo first r, err := repository.New( ctx, - account.Account{}, + tconfig.NewM365Account(t), st, control.DefaultOptions(), repository.NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + err = r.Initialize(ctx, repository.InitConfig{}) require.NoError(t, err, clues.ToCore(err)) // then test it diff --git a/src/cli/repo/repo.go b/src/cli/repo/repo.go index 53952da0c..c43fef744 100644 --- a/src/cli/repo/repo.go +++ b/src/cli/repo/repo.go @@ -21,6 +21,11 @@ const ( maintenanceCommand = "maintenance" ) +var ( + ErrConnectingRepo = clues.New("connecting repository") + ErrInitializingRepo = clues.New("initializing repository") +) + var repoCommands = []func(cmd *cobra.Command) *cobra.Command{ addS3Commands, addFilesystemCommands, diff --git a/src/cli/repo/s3.go b/src/cli/repo/s3.go index 25cbd7079..1181209c9 100644 --- a/src/cli/repo/s3.go +++ b/src/cli/repo/s3.go @@ -116,13 +116,11 @@ func initS3Cmd(cmd *cobra.Command, args []string) error { cfg.Account.ID(), opt) - sc, err := cfg.Storage.StorageConfig() + s3Cfg, err := cfg.Storage.ToS3Config() if err != nil { return Only(ctx, clues.Wrap(err, "Retrieving s3 configuration")) } - s3Cfg := sc.(*storage.S3Config) - if strings.HasPrefix(s3Cfg.Endpoint, "http://") || strings.HasPrefix(s3Cfg.Endpoint, "https://") { invalidEndpointErr := "endpoint doesn't support specifying protocol. " + "pass --disable-tls flag to use http:// instead of default https://" @@ -145,12 +143,14 @@ func initS3Cmd(cmd *cobra.Command, args []string) error { return Only(ctx, clues.Wrap(err, "Failed to construct the repository controller")) } - if err = r.Initialize(ctx, retentionOpts); err != nil { + ric := repository.InitConfig{RetentionOpts: retentionOpts} + + if err = r.Initialize(ctx, ric); err != nil { if flags.SucceedIfExistsFV && errors.Is(err, repository.ErrorRepoAlreadyExists) { return nil } - return Only(ctx, clues.Wrap(err, "Failed to initialize a new S3 repository")) + return Only(ctx, clues.Stack(ErrInitializingRepo, err)) } defer utils.CloseRepo(ctx, r) @@ -199,13 +199,11 @@ func connectS3Cmd(cmd *cobra.Command, args []string) error { repoID = events.RepoIDNotFound } - sc, err := cfg.Storage.StorageConfig() + s3Cfg, err := cfg.Storage.ToS3Config() if err != nil { return Only(ctx, clues.Wrap(err, "Retrieving s3 configuration")) } - s3Cfg := sc.(*storage.S3Config) - m365, err := cfg.Account.M365Config() if err != nil { return Only(ctx, clues.Wrap(err, "Failed to parse m365 account config")) @@ -230,8 +228,8 @@ func connectS3Cmd(cmd *cobra.Command, args []string) error { return Only(ctx, clues.Wrap(err, "Failed to create a repository controller")) } - if err := r.Connect(ctx); err != nil { - return Only(ctx, clues.Wrap(err, "Failed to connect to the S3 repository")) + if err := r.Connect(ctx, repository.ConnConfig{}); err != nil { + return Only(ctx, clues.Stack(ErrConnectingRepo, err)) } defer utils.CloseRepo(ctx, r) diff --git a/src/cli/repo/s3_e2e_test.go b/src/cli/repo/s3_e2e_test.go index 99987985c..b8fb335b6 100644 --- a/src/cli/repo/s3_e2e_test.go +++ b/src/cli/repo/s3_e2e_test.go @@ -8,15 +8,16 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "golang.org/x/exp/maps" "github.com/alcionai/corso/src/cli" "github.com/alcionai/corso/src/cli/config" cliTD "github.com/alcionai/corso/src/cli/testdata" + "github.com/alcionai/corso/src/internal/common/str" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/control" - ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository" "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/storage" storeTD "github.com/alcionai/corso/src/pkg/storage/testdata" @@ -64,9 +65,8 @@ func (suite *S3E2ESuite) TestInitS3Cmd() { st := storeTD.NewPrefixedS3Storage(t) - sc, err := st.StorageConfig() + cfg, err := st.ToS3Config() require.NoError(t, err, clues.ToCore(err)) - cfg := sc.(*storage.S3Config) vpr, configFP := tconfig.MakeTempTestConfigClone(t, nil) if !test.hasConfigFile { @@ -102,10 +102,9 @@ func (suite *S3E2ESuite) TestInitMultipleTimes() { defer flush() st := storeTD.NewPrefixedS3Storage(t) - sc, err := st.StorageConfig() - require.NoError(t, err, clues.ToCore(err)) - cfg := sc.(*storage.S3Config) + cfg, err := st.ToS3Config() + require.NoError(t, err, clues.ToCore(err)) vpr, configFP := tconfig.MakeTempTestConfigClone(t, nil) @@ -134,11 +133,9 @@ func (suite *S3E2ESuite) TestInitS3Cmd_missingBucket() { st := storeTD.NewPrefixedS3Storage(t) - sc, err := st.StorageConfig() + cfg, err := st.ToS3Config() require.NoError(t, err, clues.ToCore(err)) - cfg := sc.(*storage.S3Config) - force := map[string]string{ tconfig.TestCfgBucket: "", } @@ -189,9 +186,9 @@ func (suite *S3E2ESuite) TestConnectS3Cmd() { defer flush() st := storeTD.NewPrefixedS3Storage(t) - sc, err := st.StorageConfig() + + cfg, err := st.ToS3Config() require.NoError(t, err, clues.ToCore(err)) - cfg := sc.(*storage.S3Config) force := map[string]string{ tconfig.TestCfgAccountProvider: account.ProviderM365.String(), @@ -210,13 +207,13 @@ func (suite *S3E2ESuite) TestConnectS3Cmd() { // init the repo first r, err := repository.New( ctx, - account.Account{}, + tconfig.NewM365Account(t), st, control.DefaultOptions(), repository.NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + err = r.Initialize(ctx, repository.InitConfig{}) require.NoError(t, err, clues.ToCore(err)) // then test it @@ -234,60 +231,65 @@ func (suite *S3E2ESuite) TestConnectS3Cmd() { } } -func (suite *S3E2ESuite) TestConnectS3Cmd_BadBucket() { - t := suite.T() - ctx, flush := tester.NewContext(t) +func (suite *S3E2ESuite) TestConnectS3Cmd_badInputs() { + table := []struct { + name string + bucket string + prefix string + expectErr func(t *testing.T, err error) + }{ + { + name: "bucket", + bucket: "wrong", + expectErr: func(t *testing.T, err error) { + assert.ErrorIs(t, err, storage.ErrVerifyingConfigStorage, clues.ToCore(err)) + }, + }, + { + name: "prefix", + prefix: "wrong", + expectErr: func(t *testing.T, err error) { + assert.ErrorIs(t, err, storage.ErrVerifyingConfigStorage, clues.ToCore(err)) + }, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() - defer flush() + ctx, flush := tester.NewContext(t) + defer flush() - st := storeTD.NewPrefixedS3Storage(t) - sc, err := st.StorageConfig() - require.NoError(t, err, clues.ToCore(err)) + st := storeTD.NewPrefixedS3Storage(t) + cfg, err := st.ToS3Config() + require.NoError(t, err, clues.ToCore(err)) - cfg := sc.(*storage.S3Config) + bucket := str.First(test.bucket, cfg.Bucket) + prefix := str.First(test.prefix, cfg.Prefix) - vpr, configFP := tconfig.MakeTempTestConfigClone(t, nil) + over := map[string]string{} + acct := tconfig.NewM365Account(t) - ctx = config.SetViper(ctx, vpr) + maps.Copy(over, acct.Config) + over[account.AccountProviderTypeKey] = account.ProviderM365.String() + over[storage.StorageProviderTypeKey] = storage.ProviderS3.String() - cmd := cliTD.StubRootCmd( - "repo", "connect", "s3", - "--config-file", configFP, - "--bucket", "wrong", - "--prefix", cfg.Prefix) - cli.BuildCommandTree(cmd) + vpr, configFP := tconfig.MakeTempTestConfigClone(t, over) + ctx = config.SetViper(ctx, vpr) - // run the command - err = cmd.ExecuteContext(ctx) - require.Error(t, err, clues.ToCore(err)) -} + cmd := cliTD.StubRootCmd( + "repo", "connect", "s3", + "--config-file", configFP, + "--bucket", bucket, + "--prefix", prefix) + cli.BuildCommandTree(cmd) -func (suite *S3E2ESuite) TestConnectS3Cmd_BadPrefix() { - t := suite.T() - ctx, flush := tester.NewContext(t) - - defer flush() - - st := storeTD.NewPrefixedS3Storage(t) - sc, err := st.StorageConfig() - require.NoError(t, err, clues.ToCore(err)) - - cfg := sc.(*storage.S3Config) - - vpr, configFP := tconfig.MakeTempTestConfigClone(t, nil) - - ctx = config.SetViper(ctx, vpr) - - cmd := cliTD.StubRootCmd( - "repo", "connect", "s3", - "--config-file", configFP, - "--bucket", cfg.Bucket, - "--prefix", "wrong") - cli.BuildCommandTree(cmd) - - // run the command - err = cmd.ExecuteContext(ctx) - require.Error(t, err, clues.ToCore(err)) + // run the command + err = cmd.ExecuteContext(ctx) + require.Error(t, err, clues.ToCore(err)) + test.expectErr(t, err) + }) + } } func (suite *S3E2ESuite) TestUpdateS3Cmd() { diff --git a/src/cli/restore/exchange_e2e_test.go b/src/cli/restore/exchange_e2e_test.go index effbf14d8..67896831b 100644 --- a/src/cli/restore/exchange_e2e_test.go +++ b/src/cli/restore/exchange_e2e_test.go @@ -20,7 +20,6 @@ import ( "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/control" - ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/selectors" @@ -66,11 +65,9 @@ func (suite *RestoreExchangeE2ESuite) SetupSuite() { suite.acct = tconfig.NewM365Account(t) suite.st = storeTD.NewPrefixedS3Storage(t) - sc, err := suite.st.StorageConfig() + cfg, err := suite.st.ToS3Config() require.NoError(t, err, clues.ToCore(err)) - cfg := sc.(*storage.S3Config) - force := map[string]string{ tconfig.TestCfgAccountProvider: account.ProviderM365.String(), tconfig.TestCfgStorageProvider: storage.ProviderS3.String(), @@ -94,7 +91,7 @@ func (suite *RestoreExchangeE2ESuite) SetupSuite() { repository.NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = suite.repo.Initialize(ctx, ctrlRepo.Retention{}) + err = suite.repo.Initialize(ctx, repository.InitConfig{Service: path.ExchangeService}) require.NoError(t, err, clues.ToCore(err)) suite.backupOps = make(map[path.CategoryType]operations.BackupOperation) diff --git a/src/cli/restore/exchange_test.go b/src/cli/restore/exchange_test.go index d7ffb1b98..c16eac331 100644 --- a/src/cli/restore/exchange_test.go +++ b/src/cli/restore/exchange_test.go @@ -1,17 +1,15 @@ package restore import ( - "bytes" "testing" - "github.com/alcionai/clues" "github.com/spf13/cobra" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/alcionai/corso/src/cli/flags" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" + cliTD "github.com/alcionai/corso/src/cli/testdata" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/tester" ) @@ -39,80 +37,64 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() { for _, test := range table { suite.Run(test.name, func() { t := suite.T() + parent := &cobra.Command{Use: restoreCommand} - cmd := &cobra.Command{Use: test.use} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addExchangeCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - cmds := cmd.Commands() - require.Len(t, cmds, 1) - - child := cmds[0] - assert.Equal(t, test.expectUse, child.Use) - assert.Equal(t, test.expectShort, child.Short) - tester.AreSameFunc(t, test.expectRunE, child.RunE) - - flagsTD.WithFlags( - cmd, - exchangeServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, - - "--" + flags.ContactFN, flagsTD.FlgInputs(flagsTD.ContactInput), - "--" + flags.ContactFolderFN, flagsTD.FlgInputs(flagsTD.ContactFldInput), - "--" + flags.ContactNameFN, flagsTD.ContactNameInput, - - "--" + flags.EmailFN, flagsTD.FlgInputs(flagsTD.EmailInput), - "--" + flags.EmailFolderFN, flagsTD.FlgInputs(flagsTD.EmailFldInput), - "--" + flags.EmailReceivedAfterFN, flagsTD.EmailReceivedAfterInput, - "--" + flags.EmailReceivedBeforeFN, flagsTD.EmailReceivedBeforeInput, - "--" + flags.EmailSenderFN, flagsTD.EmailSenderInput, - "--" + flags.EmailSubjectFN, flagsTD.EmailSubjectInput, - - "--" + flags.EventFN, flagsTD.FlgInputs(flagsTD.EventInput), - "--" + flags.EventCalendarFN, flagsTD.FlgInputs(flagsTD.EventCalInput), - "--" + flags.EventOrganizerFN, flagsTD.EventOrganizerInput, - "--" + flags.EventRecursFN, flagsTD.EventRecursInput, - "--" + flags.EventStartsAfterFN, flagsTD.EventStartsAfterInput, - "--" + flags.EventStartsBeforeFN, flagsTD.EventStartsBeforeInput, - "--" + flags.EventSubjectFN, flagsTD.EventSubjectInput, - - "--" + flags.CollisionsFN, flagsTD.Collisions, - "--" + flags.DestinationFN, flagsTD.Destination, - "--" + flags.ToResourceFN, flagsTD.ToResource, + cmd := cliTD.SetUpCmdHasFlags( + t, + parent, + addExchangeCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) + flagsTD.WithFlags( + exchangeServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + "--" + flags.ContactFN, flagsTD.FlgInputs(flagsTD.ContactInput), + "--" + flags.ContactFolderFN, flagsTD.FlgInputs(flagsTD.ContactFldInput), + "--" + flags.ContactNameFN, flagsTD.ContactNameInput, + "--" + flags.EmailFN, flagsTD.FlgInputs(flagsTD.EmailInput), + "--" + flags.EmailFolderFN, flagsTD.FlgInputs(flagsTD.EmailFldInput), + "--" + flags.EmailReceivedAfterFN, flagsTD.EmailReceivedAfterInput, + "--" + flags.EmailReceivedBeforeFN, flagsTD.EmailReceivedBeforeInput, + "--" + flags.EmailSenderFN, flagsTD.EmailSenderInput, + "--" + flags.EmailSubjectFN, flagsTD.EmailSubjectInput, + "--" + flags.EventFN, flagsTD.FlgInputs(flagsTD.EventInput), + "--" + flags.EventCalendarFN, flagsTD.FlgInputs(flagsTD.EventCalInput), + "--" + flags.EventOrganizerFN, flagsTD.EventOrganizerInput, + "--" + flags.EventRecursFN, flagsTD.EventRecursInput, + "--" + flags.EventStartsAfterFN, flagsTD.EventStartsAfterInput, + "--" + flags.EventStartsBeforeFN, flagsTD.EventStartsBeforeInput, + "--" + flags.EventSubjectFN, flagsTD.EventSubjectInput, + "--" + flags.CollisionsFN, flagsTD.Collisions, + "--" + flags.DestinationFN, flagsTD.Destination, + "--" + flags.ToResourceFN, flagsTD.ToResource, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + cliTD.CheckCmdChild( + t, + parent, + 3, + test.expectUse, + test.expectShort, + test.expectRunE) opts := utils.MakeExchangeOpts(cmd) - assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) + assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) assert.ElementsMatch(t, flagsTD.ContactInput, opts.Contact) assert.ElementsMatch(t, flagsTD.ContactFldInput, opts.ContactFolder) assert.Equal(t, flagsTD.ContactNameInput, opts.ContactName) - assert.ElementsMatch(t, flagsTD.EmailInput, opts.Email) assert.ElementsMatch(t, flagsTD.EmailFldInput, opts.EmailFolder) assert.Equal(t, flagsTD.EmailReceivedAfterInput, opts.EmailReceivedAfter) assert.Equal(t, flagsTD.EmailReceivedBeforeInput, opts.EmailReceivedBefore) assert.Equal(t, flagsTD.EmailSenderInput, opts.EmailSender) assert.Equal(t, flagsTD.EmailSubjectInput, opts.EmailSubject) - assert.ElementsMatch(t, flagsTD.EventInput, opts.Event) assert.ElementsMatch(t, flagsTD.EventCalInput, opts.EventCalendar) assert.Equal(t, flagsTD.EventOrganizerInput, opts.EventOrganizer) @@ -120,11 +102,9 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() { assert.Equal(t, flagsTD.EventStartsAfterInput, opts.EventStartsAfter) assert.Equal(t, flagsTD.EventStartsBeforeInput, opts.EventStartsBefore) assert.Equal(t, flagsTD.EventSubjectInput, opts.EventSubject) - assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions) assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination) assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) }) diff --git a/src/cli/restore/groups_test.go b/src/cli/restore/groups_test.go index f2045e53c..c6753170b 100644 --- a/src/cli/restore/groups_test.go +++ b/src/cli/restore/groups_test.go @@ -1,17 +1,15 @@ package restore import ( - "bytes" "testing" - "github.com/alcionai/clues" "github.com/spf13/cobra" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/alcionai/corso/src/cli/flags" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" + cliTD "github.com/alcionai/corso/src/cli/testdata" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/tester" ) @@ -39,65 +37,51 @@ func (suite *GroupsUnitSuite) TestAddGroupsCommands() { for _, test := range table { suite.Run(test.name, func() { t := suite.T() + parent := &cobra.Command{Use: restoreCommand} - cmd := &cobra.Command{Use: test.use} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addGroupsCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - cmds := cmd.Commands() - require.Len(t, cmds, 1) - - child := cmds[0] - assert.Equal(t, test.expectUse, child.Use) - assert.Equal(t, test.expectShort, child.Short) - tester.AreSameFunc(t, test.expectRunE, child.RunE) - - flagsTD.WithFlags( - cmd, - groupsServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, - - "--" + flags.LibraryFN, flagsTD.LibraryInput, - "--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput), - "--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput), - "--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput, - "--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput, - "--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput, - "--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput, - "--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput), - "--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput), - "--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput), - "--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput), - - "--" + flags.CollisionsFN, flagsTD.Collisions, - "--" + flags.DestinationFN, flagsTD.Destination, - "--" + flags.ToResourceFN, flagsTD.ToResource, - - // bool flags - "--" + flags.NoPermissionsFN, + cmd := cliTD.SetUpCmdHasFlags( + t, + parent, + addGroupsCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) + flagsTD.WithFlags( + groupsServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + "--" + flags.LibraryFN, flagsTD.LibraryInput, + "--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput), + "--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput), + "--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput, + "--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput, + "--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput, + "--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput, + "--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput), + "--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput), + "--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput), + "--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput), + "--" + flags.CollisionsFN, flagsTD.Collisions, + "--" + flags.DestinationFN, flagsTD.Destination, + "--" + flags.ToResourceFN, flagsTD.ToResource, + "--" + flags.NoPermissionsFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + cliTD.CheckCmdChild( + t, + parent, + 3, + test.expectUse, + test.expectShort, + test.expectRunE) opts := utils.MakeGroupsOpts(cmd) - assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) + assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) assert.Equal(t, flagsTD.LibraryInput, opts.Library) assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName) assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath) @@ -105,14 +89,10 @@ func (suite *GroupsUnitSuite) TestAddGroupsCommands() { assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore) assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter) assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore) - assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions) assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination) assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource) - - // bool flags assert.True(t, flags.NoPermissionsFV) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) }) diff --git a/src/cli/restore/onedrive_test.go b/src/cli/restore/onedrive_test.go index 5a94705d8..77fb49c65 100644 --- a/src/cli/restore/onedrive_test.go +++ b/src/cli/restore/onedrive_test.go @@ -1,17 +1,15 @@ package restore import ( - "bytes" "testing" - "github.com/alcionai/clues" "github.com/spf13/cobra" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/alcionai/corso/src/cli/flags" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" + cliTD "github.com/alcionai/corso/src/cli/testdata" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/tester" ) @@ -39,73 +37,56 @@ func (suite *OneDriveUnitSuite) TestAddOneDriveCommands() { for _, test := range table { suite.Run(test.name, func() { t := suite.T() + parent := &cobra.Command{Use: restoreCommand} - cmd := &cobra.Command{Use: test.use} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addOneDriveCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - cmds := cmd.Commands() - require.Len(t, cmds, 1) - - child := cmds[0] - assert.Equal(t, test.expectUse, child.Use) - assert.Equal(t, test.expectShort, child.Short) - tester.AreSameFunc(t, test.expectRunE, child.RunE) - - flagsTD.WithFlags( - cmd, - oneDriveServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, - "--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput), - "--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput), - "--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput, - "--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput, - "--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput, - "--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput, - - "--" + flags.CollisionsFN, flagsTD.Collisions, - "--" + flags.DestinationFN, flagsTD.Destination, - "--" + flags.ToResourceFN, flagsTD.ToResource, - - // bool flags - "--" + flags.NoPermissionsFN, + cmd := cliTD.SetUpCmdHasFlags( + t, + parent, + addOneDriveCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) + flagsTD.WithFlags( + oneDriveServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + "--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput), + "--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput), + "--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput, + "--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput, + "--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput, + "--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput, + "--" + flags.CollisionsFN, flagsTD.Collisions, + "--" + flags.DestinationFN, flagsTD.Destination, + "--" + flags.ToResourceFN, flagsTD.ToResource, + "--" + flags.NoPermissionsFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + cliTD.CheckCmdChild( + t, + parent, + 3, + test.expectUse, + test.expectShort, + test.expectRunE) opts := utils.MakeOneDriveOpts(cmd) - assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) + assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName) assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath) assert.Equal(t, flagsTD.FileCreatedAfterInput, opts.FileCreatedAfter) assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore) assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter) assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore) - assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions) assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination) assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource) - - // bool flags assert.True(t, flags.NoPermissionsFV) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) }) diff --git a/src/cli/restore/restore.go b/src/cli/restore/restore.go index 9dad4ca1c..7db7dc5a7 100644 --- a/src/cli/restore/restore.go +++ b/src/cli/restore/restore.go @@ -25,12 +25,12 @@ var restoreCommands = []func(cmd *cobra.Command) *cobra.Command{ // AddCommands attaches all `corso restore * *` commands to the parent. func AddCommands(cmd *cobra.Command) { subCommand := restoreCmd() - flags.AddAllProviderFlags(subCommand) - flags.AddAllStorageFlags(subCommand) cmd.AddCommand(subCommand) for _, addRestoreTo := range restoreCommands { - addRestoreTo(subCommand) + sc := addRestoreTo(subCommand) + flags.AddAllProviderFlags(sc) + flags.AddAllStorageFlags(sc) } } diff --git a/src/cli/restore/sharepoint_test.go b/src/cli/restore/sharepoint_test.go index 638b03bee..ef28f399a 100644 --- a/src/cli/restore/sharepoint_test.go +++ b/src/cli/restore/sharepoint_test.go @@ -1,17 +1,15 @@ package restore import ( - "bytes" "testing" - "github.com/alcionai/clues" "github.com/spf13/cobra" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/alcionai/corso/src/cli/flags" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" + cliTD "github.com/alcionai/corso/src/cli/testdata" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/tester" ) @@ -39,64 +37,51 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() { for _, test := range table { suite.Run(test.name, func() { t := suite.T() + parent := &cobra.Command{Use: restoreCommand} - cmd := &cobra.Command{Use: test.use} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addSharePointCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - cmds := cmd.Commands() - require.Len(t, cmds, 1) - - child := cmds[0] - assert.Equal(t, test.expectUse, child.Use) - assert.Equal(t, test.expectShort, child.Short) - tester.AreSameFunc(t, test.expectRunE, child.RunE) - - flagsTD.WithFlags( - cmd, - sharePointServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, - "--" + flags.LibraryFN, flagsTD.LibraryInput, - "--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput), - "--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput), - "--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput, - "--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput, - "--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput, - "--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput, - "--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput), - "--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput), - "--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput), - "--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput), - - "--" + flags.CollisionsFN, flagsTD.Collisions, - "--" + flags.DestinationFN, flagsTD.Destination, - "--" + flags.ToResourceFN, flagsTD.ToResource, - - // bool flags - "--" + flags.NoPermissionsFN, + cmd := cliTD.SetUpCmdHasFlags( + t, + parent, + addSharePointCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) + flagsTD.WithFlags( + sharePointServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + "--" + flags.LibraryFN, flagsTD.LibraryInput, + "--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput), + "--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput), + "--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput, + "--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput, + "--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput, + "--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput, + "--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput), + "--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput), + "--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput), + "--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput), + "--" + flags.CollisionsFN, flagsTD.Collisions, + "--" + flags.DestinationFN, flagsTD.Destination, + "--" + flags.ToResourceFN, flagsTD.ToResource, + "--" + flags.NoPermissionsFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + cliTD.CheckCmdChild( + t, + parent, + 3, + test.expectUse, + test.expectShort, + test.expectRunE) opts := utils.MakeSharePointOpts(cmd) - assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) + assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) assert.Equal(t, flagsTD.LibraryInput, opts.Library) assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName) assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath) @@ -104,20 +89,14 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() { assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore) assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter) assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore) - assert.ElementsMatch(t, flagsTD.ListItemInput, opts.ListItem) assert.ElementsMatch(t, flagsTD.ListFolderInput, opts.ListFolder) - assert.ElementsMatch(t, flagsTD.PageInput, opts.Page) assert.ElementsMatch(t, flagsTD.PageFolderInput, opts.PageFolder) - assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions) assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination) assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource) - - // bool flags assert.True(t, flags.NoPermissionsFV) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) }) diff --git a/src/cli/testdata/cli.go b/src/cli/testdata/cli.go index 1c955165f..16a983360 100644 --- a/src/cli/testdata/cli.go +++ b/src/cli/testdata/cli.go @@ -1,11 +1,20 @@ package testdata import ( + "bytes" "fmt" + "strings" + "testing" "time" + "github.com/alcionai/clues" "github.com/google/uuid" "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/alcionai/corso/src/cli/flags" + "github.com/alcionai/corso/src/internal/tester" ) // StubRootCmd builds a stub cobra command to be used as @@ -27,3 +36,82 @@ func StubRootCmd(args ...string) *cobra.Command { return c } + +type UseCobraCommandFn func(*cobra.Command) + +func SetUpCmdHasFlags( + t *testing.T, + parentCmd *cobra.Command, + addChildCommand func(*cobra.Command) *cobra.Command, + addFlags []UseCobraCommandFn, + setArgs UseCobraCommandFn, +) *cobra.Command { + parentCmd.PersistentPreRun = func(c *cobra.Command, args []string) { + t.Log("testing args:") + + for _, arg := range args { + t.Log(arg) + } + } + + // persistent flags not added by addCommands + flags.AddRunModeFlag(parentCmd, true) + + cmd := addChildCommand(parentCmd) + require.NotNil(t, cmd) + + cul := cmd.UseLine() + require.Truef( + t, + strings.HasPrefix(cul, parentCmd.Use+" "+cmd.Use), + "child command has expected usage format 'parent child', got %q", + cul) + + for _, af := range addFlags { + af(cmd) + } + + setArgs(parentCmd) + + parentCmd.SetOut(new(bytes.Buffer)) // drop output + parentCmd.SetErr(new(bytes.Buffer)) // drop output + + err := parentCmd.Execute() + assert.NoError(t, err, clues.ToCore(err)) + + return cmd +} + +type CobraRunEFn func(cmd *cobra.Command, args []string) error + +func CheckCmdChild( + t *testing.T, + cmd *cobra.Command, + expectChildCount int, + expectUse string, + expectShort string, + expectRunE CobraRunEFn, +) { + var ( + cmds = cmd.Commands() + child *cobra.Command + ) + + for _, cc := range cmds { + if cc.Use == expectUse { + child = cc + break + } + } + + require.Len( + t, + cmds, + expectChildCount, + "parent command should have the correct child command count") + + require.NotNil(t, child, "should have found expected child command") + + assert.Equal(t, expectShort, child.Short) + tester.AreSameFunc(t, expectRunE, child.RunE) +} diff --git a/src/cli/utils/utils.go b/src/cli/utils/utils.go index 2a4e3de34..2ee9ac090 100644 --- a/src/cli/utils/utils.go +++ b/src/cli/utils/utils.go @@ -78,16 +78,10 @@ func GetAccountAndConnectWithOverrides( return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "creating a repository controller") } - if err := r.Connect(ctx); err != nil { + if err := r.Connect(ctx, repository.ConnConfig{Service: pst}); err != nil { return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "connecting to the "+cfg.Storage.Provider.String()+" repository") } - // this initializes our graph api client configurations, - // including control options such as concurency limitations. - if _, err := r.ConnectToM365(ctx, pst); err != nil { - return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "connecting to m365") - } - rdao := RepoDetailsAndOpts{ Repo: cfg, Opts: opts, diff --git a/src/cmd/longevity_test/longevity.go b/src/cmd/longevity_test/longevity.go index ec7862191..c8e9f29cf 100644 --- a/src/cmd/longevity_test/longevity.go +++ b/src/cmd/longevity_test/longevity.go @@ -72,7 +72,7 @@ func deleteBackups( // Only supported for S3 repos currently. func pitrListBackups( ctx context.Context, - service path.ServiceType, + pst path.ServiceType, pitr time.Time, backupIDs []string, ) error { @@ -113,14 +113,14 @@ func pitrListBackups( return clues.Wrap(err, "creating a repo") } - err = r.Connect(ctx) + err = r.Connect(ctx, repository.ConnConfig{Service: pst}) if err != nil { return clues.Wrap(err, "connecting to the repository") } defer r.Close(ctx) - backups, err := r.BackupsByTag(ctx, store.Service(service)) + backups, err := r.BackupsByTag(ctx, store.Service(pst)) if err != nil { return clues.Wrap(err, "listing backups").WithClues(ctx) } diff --git a/src/cmd/s3checker/s3checker.go b/src/cmd/s3checker/s3checker.go index 0c42b8aa5..7aa11e79a 100644 --- a/src/cmd/s3checker/s3checker.go +++ b/src/cmd/s3checker/s3checker.go @@ -197,13 +197,11 @@ func handleCheckerCommand(cmd *cobra.Command, args []string, f flags) error { return clues.Wrap(err, "getting storage config") } - sc, err := repoDetails.Storage.StorageConfig() + cfg, err := repoDetails.Storage.ToS3Config() if err != nil { return clues.Wrap(err, "getting S3 config") } - cfg := sc.(*storage.S3Config) - endpoint := defaultS3Endpoint if len(cfg.Endpoint) > 0 { endpoint = cfg.Endpoint diff --git a/src/cmd/sanity_test/common/common.go b/src/cmd/sanity_test/common/common.go index 344d6dc19..c3a24a489 100644 --- a/src/cmd/sanity_test/common/common.go +++ b/src/cmd/sanity_test/common/common.go @@ -1,6 +1,68 @@ package common +import ( + "context" + "fmt" + "os" + "strings" + "time" + + "github.com/alcionai/corso/src/internal/tester/tconfig" + "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/credentials" + "github.com/alcionai/corso/src/pkg/logger" + "github.com/alcionai/corso/src/pkg/services/m365/api" +) + type PermissionInfo struct { EntityID string Roles []string } + +const ( + sanityBaseBackup = "SANITY_BASE_BACKUP" + sanityTestData = "SANITY_TEST_DATA" + sanityTestFolder = "SANITY_TEST_FOLDER" + sanityTestService = "SANITY_TEST_SERVICE" +) + +type Envs struct { + BaseBackupFolder string + DataFolder string + FolderName string + Service string + SiteID string + StartTime time.Time + UserID string +} + +func EnvVars(ctx context.Context) Envs { + folder := strings.TrimSpace(os.Getenv(sanityTestFolder)) + startTime, _ := MustGetTimeFromName(ctx, folder) + + e := Envs{ + BaseBackupFolder: os.Getenv(sanityBaseBackup), + DataFolder: os.Getenv(sanityTestData), + FolderName: folder, + SiteID: tconfig.GetM365SiteID(ctx), + Service: os.Getenv(sanityTestService), + StartTime: startTime, + UserID: tconfig.GetM365UserID(ctx), + } + + fmt.Printf("\n-----\nenvs %+v\n-----\n", e) + + logger.Ctx(ctx).Info("envs", e) + + return e +} + +func GetAC() (api.Client, error) { + creds := account.M365Config{ + M365: credentials.GetM365(), + AzureTenantID: os.Getenv(account.AzureTenantID), + } + + return api.NewClient(creds, control.DefaultOptions()) +} diff --git a/src/cmd/sanity_test/common/filepath.go b/src/cmd/sanity_test/common/filepath.go new file mode 100644 index 000000000..fd47c5b2d --- /dev/null +++ b/src/cmd/sanity_test/common/filepath.go @@ -0,0 +1,38 @@ +package common + +import ( + "os" + "path/filepath" + "time" + + "github.com/alcionai/clues" +) + +func FilepathWalker( + folderName string, + exportFileSizes map[string]int64, + startTime time.Time, +) filepath.WalkFunc { + return func(path string, info os.FileInfo, err error) error { + if err != nil { + return clues.Stack(err) + } + + if info.IsDir() { + return nil + } + + relPath, err := filepath.Rel(folderName, path) + if err != nil { + return clues.Stack(err) + } + + exportFileSizes[relPath] = info.Size() + + if startTime.After(info.ModTime()) { + startTime = info.ModTime() + } + + return nil + } +} diff --git a/src/cmd/sanity_test/common/sanitree.go b/src/cmd/sanity_test/common/sanitree.go new file mode 100644 index 000000000..b0dc8ac29 --- /dev/null +++ b/src/cmd/sanity_test/common/sanitree.go @@ -0,0 +1,69 @@ +package common + +import ( + "context" + + "golang.org/x/exp/maps" +) + +// Sanitree is used to build out a hierarchical tree of items +// for comparison against each other. Primarily so that a restore +// can compare two subtrees easily. +type Sanitree[T any] struct { + Container T + ContainerID string + ContainerName string + // non-containers only + ContainsItems int + // name -> node + Children map[string]*Sanitree[T] +} + +func AssertEqualTrees[T any]( + ctx context.Context, + expect, other *Sanitree[T], +) { + if expect == nil && other == nil { + return + } + + Assert( + ctx, + func() bool { return expect != nil && other != nil }, + "non nil nodes", + expect, + other) + + Assert( + ctx, + func() bool { return expect.ContainerName == other.ContainerName }, + "container names match", + expect.ContainerName, + other.ContainerName) + + Assert( + ctx, + func() bool { return expect.ContainsItems == other.ContainsItems }, + "count of items in container matches", + expect.ContainsItems, + other.ContainsItems) + + Assert( + ctx, + func() bool { return len(expect.Children) == len(other.Children) }, + "count of child containers matches", + len(expect.Children), + len(other.Children)) + + for name, s := range expect.Children { + ch, ok := other.Children[name] + Assert( + ctx, + func() bool { return ok }, + "found matching child container", + name, + maps.Keys(other.Children)) + + AssertEqualTrees(ctx, s, ch) + } +} diff --git a/src/cmd/sanity_test/common/utils.go b/src/cmd/sanity_test/common/utils.go index e14fa86c6..89ddc6711 100644 --- a/src/cmd/sanity_test/common/utils.go +++ b/src/cmd/sanity_test/common/utils.go @@ -22,7 +22,7 @@ func Assert( return } - header = "Error: " + header + header = "TEST FAILURE: " + header expected := fmt.Sprintf("* Expected: %+v", expect) got := fmt.Sprintf("* Current: %+v", current) @@ -37,7 +37,7 @@ func Assert( func Fatal(ctx context.Context, msg string, err error) { logger.CtxErr(ctx, err).Error("test failure: " + msg) - fmt.Println(msg+": ", err) + fmt.Println("TEST FAILURE: "+msg+": ", err) os.Exit(1) } diff --git a/src/cmd/sanity_test/export/groups.go b/src/cmd/sanity_test/export/groups.go new file mode 100644 index 000000000..6da5796e2 --- /dev/null +++ b/src/cmd/sanity_test/export/groups.go @@ -0,0 +1,16 @@ +package export + +import ( + "context" + + "github.com/alcionai/corso/src/cmd/sanity_test/common" + "github.com/alcionai/corso/src/pkg/services/m365/api" +) + +func CheckGroupsExport( + ctx context.Context, + ac api.Client, + envs common.Envs, +) { + // TODO +} diff --git a/src/cmd/sanity_test/export/onedrive.go b/src/cmd/sanity_test/export/onedrive.go index 3d5564bcc..5e78ece04 100644 --- a/src/cmd/sanity_test/export/onedrive.go +++ b/src/cmd/sanity_test/export/onedrive.go @@ -3,28 +3,21 @@ package export import ( "context" "fmt" - "os" "path/filepath" "time" - "github.com/alcionai/clues" - msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go" - "github.com/alcionai/corso/src/cmd/sanity_test/common" "github.com/alcionai/corso/src/cmd/sanity_test/restore" "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) func CheckOneDriveExport( ctx context.Context, - client *msgraphsdk.GraphServiceClient, - userID, folderName, dataFolder string, + ac api.Client, + envs common.Envs, ) { - drive, err := client. - Users(). - ByUserId(userID). - Drive(). - Get(ctx, nil) + drive, err := ac.Users().GetDefaultDrive(ctx, envs.UserID) if err != nil { common.Fatal(ctx, "getting the drive:", err) } @@ -36,37 +29,19 @@ func CheckOneDriveExport( startTime = time.Now() ) - err = filepath.Walk(folderName, func(path string, info os.FileInfo, err error) error { - if err != nil { - return clues.Stack(err) - } - - if info.IsDir() { - return nil - } - - relPath, err := filepath.Rel(folderName, path) - if err != nil { - return clues.Stack(err) - } - - exportFileSizes[relPath] = info.Size() - if startTime.After(info.ModTime()) { - startTime = info.ModTime() - } - - return nil - }) + err = filepath.Walk( + envs.FolderName, + common.FilepathWalker(envs.FolderName, exportFileSizes, startTime)) if err != nil { fmt.Println("Error walking the path:", err) } _ = restore.PopulateDriveDetails( ctx, - client, + ac, ptr.Val(drive.GetId()), - folderName, - dataFolder, + envs.FolderName, + envs.DataFolder, fileSizes, map[string][]common.PermissionInfo{}, startTime) diff --git a/src/cmd/sanity_test/export/sharepoint.go b/src/cmd/sanity_test/export/sharepoint.go index 55ab8ed5c..d53236f34 100644 --- a/src/cmd/sanity_test/export/sharepoint.go +++ b/src/cmd/sanity_test/export/sharepoint.go @@ -3,28 +3,21 @@ package export import ( "context" "fmt" - "os" "path/filepath" "time" - "github.com/alcionai/clues" - msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go" - "github.com/alcionai/corso/src/cmd/sanity_test/common" "github.com/alcionai/corso/src/cmd/sanity_test/restore" "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) func CheckSharePointExport( ctx context.Context, - client *msgraphsdk.GraphServiceClient, - siteID, folderName, dataFolder string, + ac api.Client, + envs common.Envs, ) { - drive, err := client. - Sites(). - BySiteId(siteID). - Drive(). - Get(ctx, nil) + drive, err := ac.Sites().GetDefaultDrive(ctx, envs.SiteID) if err != nil { common.Fatal(ctx, "getting the drive:", err) } @@ -36,37 +29,19 @@ func CheckSharePointExport( startTime = time.Now() ) - err = filepath.Walk(folderName, func(path string, info os.FileInfo, err error) error { - if err != nil { - return clues.Stack(err) - } - - if info.IsDir() { - return nil - } - - relPath, err := filepath.Rel(folderName, path) - if err != nil { - return clues.Stack(err) - } - - exportFileSizes[relPath] = info.Size() - if startTime.After(info.ModTime()) { - startTime = info.ModTime() - } - - return nil - }) + err = filepath.Walk( + envs.FolderName, + common.FilepathWalker(envs.FolderName, exportFileSizes, startTime)) if err != nil { fmt.Println("Error walking the path:", err) } _ = restore.PopulateDriveDetails( ctx, - client, + ac, ptr.Val(drive.GetId()), - folderName, - dataFolder, + envs.FolderName, + envs.DataFolder, fileSizes, map[string][]common.PermissionInfo{}, startTime) diff --git a/src/cmd/sanity_test/restore/exchange.go b/src/cmd/sanity_test/restore/exchange.go index 2dc65e6e1..dd51e5b40 100644 --- a/src/cmd/sanity_test/restore/exchange.go +++ b/src/cmd/sanity_test/restore/exchange.go @@ -3,99 +3,43 @@ package restore import ( "context" "fmt" - stdpath "path" - "strings" - "time" "github.com/alcionai/clues" - msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go" "github.com/microsoftgraph/msgraph-sdk-go/models" - "github.com/microsoftgraph/msgraph-sdk-go/users" "github.com/alcionai/corso/src/cmd/sanity_test/common" "github.com/alcionai/corso/src/internal/common/ptr" - "github.com/alcionai/corso/src/pkg/filters" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) // CheckEmailRestoration verifies that the emails count in restored folder is equivalent to // emails in actual m365 account func CheckEmailRestoration( ctx context.Context, - client *msgraphsdk.GraphServiceClient, - testUser, folderName, dataFolder, baseBackupFolder string, - startTime time.Time, + ac api.Client, + envs common.Envs, ) { var ( - restoreFolder models.MailFolderable - itemCount = make(map[string]int32) - restoreItemCount = make(map[string]int32) - builder = client.Users().ByUserId(testUser).MailFolders() + folderNameToItemCount = make(map[string]int32) + folderNameToRestoreItemCount = make(map[string]int32) ) - for { - result, err := builder.Get(ctx, nil) - if err != nil { - common.Fatal(ctx, "getting mail folders", err) - } + restoredTree := buildSanitree(ctx, ac, envs.UserID, envs.FolderName) + dataTree := buildSanitree(ctx, ac, envs.UserID, envs.DataFolder) - values := result.GetValue() - - for _, v := range values { - itemName := ptr.Val(v.GetDisplayName()) - - if itemName == folderName { - restoreFolder = v - continue - } - - if itemName == dataFolder || itemName == baseBackupFolder { - // otherwise, recursively aggregate all child folders. - getAllMailSubFolders(ctx, client, testUser, v, itemName, dataFolder, itemCount) - - itemCount[itemName] = ptr.Val(v.GetTotalItemCount()) - } - } - - link, ok := ptr.ValOK(result.GetOdataNextLink()) - if !ok { - break - } - - builder = users.NewItemMailFoldersRequestBuilder(link, client.GetAdapter()) - } - - folderID := ptr.Val(restoreFolder.GetId()) - folderName = ptr.Val(restoreFolder.GetDisplayName()) ctx = clues.Add( ctx, - "restore_folder_id", folderID, - "restore_folder_name", folderName) + "restore_folder_id", restoredTree.ContainerID, + "restore_folder_name", restoredTree.ContainerName, + "original_folder_id", dataTree.ContainerID, + "original_folder_name", dataTree.ContainerName) - childFolder, err := client. - Users(). - ByUserId(testUser). - MailFolders(). - ByMailFolderId(folderID). - ChildFolders(). - Get(ctx, nil) - if err != nil { - common.Fatal(ctx, "getting restore folder child folders", err) - } + verifyEmailData(ctx, folderNameToRestoreItemCount, folderNameToItemCount) - for _, fld := range childFolder.GetValue() { - restoreDisplayName := ptr.Val(fld.GetDisplayName()) - - // check if folder is the data folder we loaded or the base backup to verify - // the incremental backup worked fine - if strings.EqualFold(restoreDisplayName, dataFolder) || strings.EqualFold(restoreDisplayName, baseBackupFolder) { - count, _ := ptr.ValOK(fld.GetTotalItemCount()) - - restoreItemCount[restoreDisplayName] = count - checkAllSubFolder(ctx, client, fld, testUser, restoreDisplayName, dataFolder, restoreItemCount) - } - } - - verifyEmailData(ctx, restoreItemCount, itemCount) + common.AssertEqualTrees[models.MailFolderable]( + ctx, + dataTree, + restoredTree.Children[envs.DataFolder]) } func verifyEmailData(ctx context.Context, restoreMessageCount, messageCount map[string]int32) { @@ -111,109 +55,71 @@ func verifyEmailData(ctx context.Context, restoreMessageCount, messageCount map[ } } -// getAllSubFolder will recursively check for all subfolders and get the corresponding -// email count. -func getAllMailSubFolders( +func buildSanitree( ctx context.Context, - client *msgraphsdk.GraphServiceClient, - testUser string, - r models.MailFolderable, - parentFolder, - dataFolder string, - messageCount map[string]int32, -) { - var ( - folderID = ptr.Val(r.GetId()) - count int32 = 99 - options = &users.ItemMailFoldersItemChildFoldersRequestBuilderGetRequestConfiguration{ - QueryParameters: &users.ItemMailFoldersItemChildFoldersRequestBuilderGetQueryParameters{ - Top: &count, - }, - } - ) - - ctx = clues.Add(ctx, "parent_folder_id", folderID) - - childFolder, err := client. - Users(). - ByUserId(testUser). - MailFolders(). - ByMailFolderId(folderID). - ChildFolders(). - Get(ctx, options) + ac api.Client, + userID, folderName string, +) *common.Sanitree[models.MailFolderable] { + gcc, err := ac.Mail().GetContainerByName( + ctx, + userID, + api.MsgFolderRoot, + folderName) if err != nil { - common.Fatal(ctx, "getting mail subfolders", err) + common.Fatal( + ctx, + fmt.Sprintf("finding folder by name %q", folderName), + err) } - for _, child := range childFolder.GetValue() { - var ( - childDisplayName = ptr.Val(child.GetDisplayName()) - childFolderCount = ptr.Val(child.GetChildFolderCount()) - //nolint:forbidigo - fullFolderName = stdpath.Join(parentFolder, childDisplayName) - ) + mmf, ok := gcc.(models.MailFolderable) + if !ok { + common.Fatal( + ctx, + "mail folderable required", + clues.New("casting "+*gcc.GetDisplayName()+" to models.MailFolderable")) + } - if filters.PathContains([]string{dataFolder}).Compare(fullFolderName) { - messageCount[fullFolderName] = ptr.Val(child.GetTotalItemCount()) - // recursively check for subfolders - if childFolderCount > 0 { - parentFolder := fullFolderName + root := &common.Sanitree[models.MailFolderable]{ + Container: mmf, + ContainerID: ptr.Val(mmf.GetId()), + ContainerName: ptr.Val(mmf.GetDisplayName()), + ContainsItems: int(ptr.Val(mmf.GetTotalItemCount())), + Children: map[string]*common.Sanitree[models.MailFolderable]{}, + } - getAllMailSubFolders(ctx, client, testUser, child, parentFolder, dataFolder, messageCount) - } - } - } -} - -// checkAllSubFolder will recursively traverse inside the restore folder and -// verify that data matched in all subfolders -func checkAllSubFolder( - ctx context.Context, - client *msgraphsdk.GraphServiceClient, - r models.MailFolderable, - testUser, - parentFolder, - dataFolder string, - restoreMessageCount map[string]int32, -) { - var ( - folderID = ptr.Val(r.GetId()) - count int32 = 99 - options = &users.ItemMailFoldersItemChildFoldersRequestBuilderGetRequestConfiguration{ - QueryParameters: &users.ItemMailFoldersItemChildFoldersRequestBuilderGetQueryParameters{ - Top: &count, - }, - } - ) - - childFolder, err := client. - Users(). - ByUserId(testUser). - MailFolders(). - ByMailFolderId(folderID). - ChildFolders(). - Get(ctx, options) - if err != nil { - common.Fatal(ctx, "getting mail subfolders", err) - } - - for _, child := range childFolder.GetValue() { - var ( - childDisplayName = ptr.Val(child.GetDisplayName()) - //nolint:forbidigo - fullFolderName = stdpath.Join(parentFolder, childDisplayName) - ) - - if filters.PathContains([]string{dataFolder}).Compare(fullFolderName) { - childTotalCount, _ := ptr.ValOK(child.GetTotalItemCount()) - restoreMessageCount[fullFolderName] = childTotalCount - } - - childFolderCount := ptr.Val(child.GetChildFolderCount()) - - if childFolderCount > 0 { - parentFolder := fullFolderName - checkAllSubFolder(ctx, client, child, testUser, parentFolder, dataFolder, restoreMessageCount) + recurseSubfolders(ctx, ac, root, userID) + + return root +} + +func recurseSubfolders( + ctx context.Context, + ac api.Client, + parent *common.Sanitree[models.MailFolderable], + userID string, +) { + childFolders, err := ac.Mail().GetContainerChildren( + ctx, + userID, + parent.ContainerID) + if err != nil { + common.Fatal(ctx, "getting subfolders", err) + } + + for _, child := range childFolders { + c := &common.Sanitree[models.MailFolderable]{ + Container: child, + ContainerID: ptr.Val(child.GetId()), + ContainerName: ptr.Val(child.GetDisplayName()), + ContainsItems: int(ptr.Val(child.GetTotalItemCount())), + Children: map[string]*common.Sanitree[models.MailFolderable]{}, + } + + parent.Children[c.ContainerName] = c + + if ptr.Val(child.GetChildFolderCount()) > 0 { + recurseSubfolders(ctx, ac, c, userID) } } } diff --git a/src/cmd/sanity_test/restore/groups.go b/src/cmd/sanity_test/restore/groups.go new file mode 100644 index 000000000..190b4481d --- /dev/null +++ b/src/cmd/sanity_test/restore/groups.go @@ -0,0 +1,16 @@ +package restore + +import ( + "context" + + "github.com/alcionai/corso/src/cmd/sanity_test/common" + "github.com/alcionai/corso/src/pkg/services/m365/api" +) + +func CheckGroupsRestoration( + ctx context.Context, + ac api.Client, + envs common.Envs, +) { + // TODO +} diff --git a/src/cmd/sanity_test/restore/onedrive.go b/src/cmd/sanity_test/restore/onedrive.go index 14fa3b8cd..1efddc87d 100644 --- a/src/cmd/sanity_test/restore/onedrive.go +++ b/src/cmd/sanity_test/restore/onedrive.go @@ -7,12 +7,12 @@ import ( "time" "github.com/alcionai/clues" - msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go" "golang.org/x/exp/slices" "github.com/alcionai/corso/src/cmd/sanity_test/common" "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) const ( @@ -21,34 +21,29 @@ const ( func CheckOneDriveRestoration( ctx context.Context, - client *msgraphsdk.GraphServiceClient, - userID, folderName, dataFolder string, - startTime time.Time, + ac api.Client, + envs common.Envs, ) { - drive, err := client. - Users(). - ByUserId(userID). - Drive(). - Get(ctx, nil) + drive, err := ac.Users().GetDefaultDrive(ctx, envs.UserID) if err != nil { common.Fatal(ctx, "getting the drive:", err) } checkDriveRestoration( ctx, - client, + ac, path.OneDriveService, - folderName, + envs.FolderName, ptr.Val(drive.GetId()), ptr.Val(drive.GetName()), - dataFolder, - startTime, + envs.DataFolder, + envs.StartTime, false) } func checkDriveRestoration( ctx context.Context, - client *msgraphsdk.GraphServiceClient, + ac api.Client, service path.ServiceType, folderName, driveID, @@ -70,7 +65,7 @@ func checkDriveRestoration( restoreFolderID := PopulateDriveDetails( ctx, - client, + ac, driveID, folderName, dataFolder, @@ -78,7 +73,14 @@ func checkDriveRestoration( folderPermissions, startTime) - getRestoredDrive(ctx, client, driveID, restoreFolderID, restoreFile, restoredFolderPermissions, startTime) + getRestoredDrive( + ctx, + ac, + driveID, + restoreFolderID, + restoreFile, + restoredFolderPermissions, + startTime) checkRestoredDriveItemPermissions( ctx, @@ -105,7 +107,7 @@ func checkDriveRestoration( func PopulateDriveDetails( ctx context.Context, - client *msgraphsdk.GraphServiceClient, + ac api.Client, driveID, folderName, dataFolder string, fileSizes map[string]int64, folderPermissions map[string][]common.PermissionInfo, @@ -113,18 +115,12 @@ func PopulateDriveDetails( ) string { var restoreFolderID string - response, err := client. - Drives(). - ByDriveId(driveID). - Items(). - ByDriveItemId("root"). - Children(). - Get(ctx, nil) + children, err := ac.Drives().GetFolderChildren(ctx, driveID, "root") if err != nil { common.Fatal(ctx, "getting drive by id", err) } - for _, driveItem := range response.GetValue() { + for _, driveItem := range children { var ( itemID = ptr.Val(driveItem.GetId()) itemName = ptr.Val(driveItem.GetName()) @@ -156,8 +152,17 @@ func PopulateDriveDetails( continue } - folderPermissions[itemName] = permissionIn(ctx, client, driveID, itemID) - getOneDriveChildFolder(ctx, client, driveID, itemID, itemName, fileSizes, folderPermissions, startTime) + folderPermissions[itemName] = permissionIn(ctx, ac, driveID, itemID) + + getOneDriveChildFolder( + ctx, + ac, + driveID, + itemID, + itemName, + fileSizes, + folderPermissions, + startTime) } return restoreFolderID @@ -228,18 +233,18 @@ func checkRestoredDriveItemPermissions( func getOneDriveChildFolder( ctx context.Context, - client *msgraphsdk.GraphServiceClient, + ac api.Client, driveID, itemID, parentName string, fileSizes map[string]int64, folderPermission map[string][]common.PermissionInfo, startTime time.Time, ) { - response, err := client.Drives().ByDriveId(driveID).Items().ByDriveItemId(itemID).Children().Get(ctx, nil) + children, err := ac.Drives().GetFolderChildren(ctx, driveID, itemID) if err != nil { common.Fatal(ctx, "getting child folder", err) } - for _, driveItem := range response.GetValue() { + for _, driveItem := range children { var ( itemID = ptr.Val(driveItem.GetId()) itemName = ptr.Val(driveItem.GetName()) @@ -268,31 +273,33 @@ func getOneDriveChildFolder( continue } - folderPermission[fullName] = permissionIn(ctx, client, driveID, itemID) - getOneDriveChildFolder(ctx, client, driveID, itemID, fullName, fileSizes, folderPermission, startTime) + folderPermission[fullName] = permissionIn(ctx, ac, driveID, itemID) + getOneDriveChildFolder( + ctx, + ac, + driveID, + itemID, + fullName, + fileSizes, + folderPermission, + startTime) } } func getRestoredDrive( ctx context.Context, - client *msgraphsdk.GraphServiceClient, + ac api.Client, driveID, restoreFolderID string, restoreFile map[string]int64, restoreFolder map[string][]common.PermissionInfo, startTime time.Time, ) { - restored, err := client. - Drives(). - ByDriveId(driveID). - Items(). - ByDriveItemId(restoreFolderID). - Children(). - Get(ctx, nil) + children, err := ac.Drives().GetFolderChildren(ctx, driveID, restoreFolderID) if err != nil { common.Fatal(ctx, "getting child folder", err) } - for _, item := range restored.GetValue() { + for _, item := range children { var ( itemID = ptr.Val(item.GetId()) itemName = ptr.Val(item.GetName()) @@ -308,8 +315,16 @@ func getRestoredDrive( continue } - restoreFolder[itemName] = permissionIn(ctx, client, driveID, itemID) - getOneDriveChildFolder(ctx, client, driveID, itemID, itemName, restoreFile, restoreFolder, startTime) + restoreFolder[itemName] = permissionIn(ctx, ac, driveID, itemID) + getOneDriveChildFolder( + ctx, + ac, + driveID, + itemID, + itemName, + restoreFile, + restoreFolder, + startTime) } } @@ -319,18 +334,12 @@ func getRestoredDrive( func permissionIn( ctx context.Context, - client *msgraphsdk.GraphServiceClient, + ac api.Client, driveID, itemID string, ) []common.PermissionInfo { pi := []common.PermissionInfo{} - pcr, err := client. - Drives(). - ByDriveId(driveID). - Items(). - ByDriveItemId(itemID). - Permissions(). - Get(ctx, nil) + pcr, err := ac.Drives().GetItemPermission(ctx, driveID, itemID) if err != nil { common.Fatal(ctx, "getting permission", err) } diff --git a/src/cmd/sanity_test/restore/sharepoint.go b/src/cmd/sanity_test/restore/sharepoint.go index a5146d7a4..62c761dff 100644 --- a/src/cmd/sanity_test/restore/sharepoint.go +++ b/src/cmd/sanity_test/restore/sharepoint.go @@ -2,38 +2,31 @@ package restore import ( "context" - "time" - - msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go" "github.com/alcionai/corso/src/cmd/sanity_test/common" "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) func CheckSharePointRestoration( ctx context.Context, - client *msgraphsdk.GraphServiceClient, - siteID, userID, folderName, dataFolder string, - startTime time.Time, + ac api.Client, + envs common.Envs, ) { - drive, err := client. - Sites(). - BySiteId(siteID). - Drive(). - Get(ctx, nil) + drive, err := ac.Sites().GetDefaultDrive(ctx, envs.SiteID) if err != nil { common.Fatal(ctx, "getting the drive:", err) } checkDriveRestoration( ctx, - client, + ac, path.SharePointService, - folderName, + envs.FolderName, ptr.Val(drive.GetId()), ptr.Val(drive.GetName()), - dataFolder, - startTime, + envs.DataFolder, + envs.StartTime, true) } diff --git a/src/cmd/sanity_test/sanity_tests.go b/src/cmd/sanity_test/sanity_tests.go index 84bce47a0..cf47744a4 100644 --- a/src/cmd/sanity_test/sanity_tests.go +++ b/src/cmd/sanity_test/sanity_tests.go @@ -2,21 +2,40 @@ package main import ( "context" + "fmt" "os" - "strings" - "time" "github.com/alcionai/clues" - msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go" + "github.com/spf13/cobra" + "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cmd/sanity_test/common" "github.com/alcionai/corso/src/cmd/sanity_test/export" "github.com/alcionai/corso/src/cmd/sanity_test/restore" "github.com/alcionai/corso/src/internal/m365/graph" - "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/logger" ) +// --------------------------------------------------------------------------- +// root command +// --------------------------------------------------------------------------- + +func rootCMD() *cobra.Command { + return &cobra.Command{ + Use: "sanity-test", + Short: "run the sanity tests", + DisableAutoGenTag: true, + RunE: sanityTestRoot, + PersistentPreRun: func(cmd *cobra.Command, args []string) { + fmt.Println("running", cmd.UseLine()) + }, + } +} + +func sanityTestRoot(cmd *cobra.Command, args []string) error { + return print.Only(cmd.Context(), clues.New("must specify a kind of test")) +} + func main() { ls := logger.Settings{ File: logger.GetLogFile(""), @@ -29,60 +48,226 @@ func main() { _ = log.Sync() // flush all logs in the buffer }() + // TODO: only needed for exchange graph.InitializeConcurrencyLimiter(ctx, true, 4) - adapter, err := graph.CreateAdapter( - tconfig.GetM365TenantID(ctx), - os.Getenv("AZURE_CLIENT_ID"), - os.Getenv("AZURE_CLIENT_SECRET")) - if err != nil { - common.Fatal(ctx, "creating adapter", err) - } + root := rootCMD() - var ( - client = msgraphsdk.NewGraphServiceClient(adapter) - testUser = tconfig.GetM365UserID(ctx) - testSite = tconfig.GetM365SiteID(ctx) - testKind = os.Getenv("SANITY_TEST_KIND") // restore or export (cli arg?) - testService = os.Getenv("SANITY_TEST_SERVICE") - folder = strings.TrimSpace(os.Getenv("SANITY_TEST_FOLDER")) - dataFolder = os.Getenv("TEST_DATA") - baseBackupFolder = os.Getenv("BASE_BACKUP") - ) + restCMD := restoreCMD() - ctx = clues.Add( - ctx, - "resource_owner", testUser, - "service", testService, - "sanity_restore_folder", folder) + restCMD.AddCommand(restoreExchangeCMD()) + restCMD.AddCommand(restoreOneDriveCMD()) + restCMD.AddCommand(restoreSharePointCMD()) + restCMD.AddCommand(restoreGroupsCMD()) + root.AddCommand(restCMD) - logger.Ctx(ctx).Info("starting sanity test check") + expCMD := exportCMD() - switch testKind { - case "restore": - startTime, _ := common.MustGetTimeFromName(ctx, folder) - clues.Add(ctx, "sanity_restore_start_time", startTime.Format(time.RFC3339)) + expCMD.AddCommand(exportOneDriveCMD()) + expCMD.AddCommand(exportSharePointCMD()) + expCMD.AddCommand(exportGroupsCMD()) + root.AddCommand(expCMD) - switch testService { - case "exchange": - restore.CheckEmailRestoration(ctx, client, testUser, folder, dataFolder, baseBackupFolder, startTime) - case "onedrive": - restore.CheckOneDriveRestoration(ctx, client, testUser, folder, dataFolder, startTime) - case "sharepoint": - restore.CheckSharePointRestoration(ctx, client, testSite, testUser, folder, dataFolder, startTime) - default: - common.Fatal(ctx, "unknown service for restore sanity tests", nil) - } - case "export": - switch testService { - case "onedrive": - export.CheckOneDriveExport(ctx, client, testUser, folder, dataFolder) - case "sharepoint": - export.CheckSharePointExport(ctx, client, testSite, folder, dataFolder) - default: - common.Fatal(ctx, "unknown service for export sanity tests", nil) - } - default: - common.Fatal(ctx, "unknown test kind (expected restore or export)", nil) + if err := root.Execute(); err != nil { + os.Exit(1) } } + +// --------------------------------------------------------------------------- +// restore/export command +// --------------------------------------------------------------------------- + +func exportCMD() *cobra.Command { + return &cobra.Command{ + Use: "restore", + Short: "run the post-export sanity tests", + DisableAutoGenTag: true, + RunE: sanityTestExport, + } +} + +func sanityTestExport(cmd *cobra.Command, args []string) error { + return print.Only(cmd.Context(), clues.New("must specify a service")) +} + +func restoreCMD() *cobra.Command { + return &cobra.Command{ + Use: "restore", + Short: "run the post-restore sanity tests", + DisableAutoGenTag: true, + RunE: sanityTestRestore, + } +} + +func sanityTestRestore(cmd *cobra.Command, args []string) error { + return print.Only(cmd.Context(), clues.New("must specify a service")) +} + +// --------------------------------------------------------------------------- +// service commands - export +// --------------------------------------------------------------------------- + +func exportGroupsCMD() *cobra.Command { + return &cobra.Command{ + Use: "groups", + Short: "run the groups export sanity tests", + DisableAutoGenTag: true, + RunE: sanityTestExportGroups, + } +} + +func sanityTestExportGroups(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + envs := common.EnvVars(ctx) + + ac, err := common.GetAC() + if err != nil { + return print.Only(ctx, err) + } + + export.CheckGroupsExport(ctx, ac, envs) + + return nil +} + +func exportOneDriveCMD() *cobra.Command { + return &cobra.Command{ + Use: "onedrive", + Short: "run the onedrive export sanity tests", + DisableAutoGenTag: true, + RunE: sanityTestExportOneDrive, + } +} + +func sanityTestExportOneDrive(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + envs := common.EnvVars(ctx) + + ac, err := common.GetAC() + if err != nil { + return print.Only(ctx, err) + } + + export.CheckOneDriveExport(ctx, ac, envs) + + return nil +} + +func exportSharePointCMD() *cobra.Command { + return &cobra.Command{ + Use: "sharepoint", + Short: "run the sharepoint export sanity tests", + DisableAutoGenTag: true, + RunE: sanityTestExportSharePoint, + } +} + +func sanityTestExportSharePoint(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + envs := common.EnvVars(ctx) + + ac, err := common.GetAC() + if err != nil { + return print.Only(ctx, err) + } + + export.CheckSharePointExport(ctx, ac, envs) + + return nil +} + +// --------------------------------------------------------------------------- +// service commands - restore +// --------------------------------------------------------------------------- + +func restoreExchangeCMD() *cobra.Command { + return &cobra.Command{ + Use: "exchange", + Short: "run the exchange restore sanity tests", + DisableAutoGenTag: true, + RunE: sanityTestRestoreExchange, + } +} + +func sanityTestRestoreExchange(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + envs := common.EnvVars(ctx) + + ac, err := common.GetAC() + if err != nil { + return print.Only(ctx, err) + } + + restore.CheckEmailRestoration(ctx, ac, envs) + + return nil +} + +func restoreOneDriveCMD() *cobra.Command { + return &cobra.Command{ + Use: "onedrive", + Short: "run the onedrive restore sanity tests", + DisableAutoGenTag: true, + RunE: sanityTestRestoreOneDrive, + } +} + +func sanityTestRestoreOneDrive(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + envs := common.EnvVars(ctx) + + ac, err := common.GetAC() + if err != nil { + return print.Only(ctx, err) + } + + restore.CheckOneDriveRestoration(ctx, ac, envs) + + return nil +} + +func restoreSharePointCMD() *cobra.Command { + return &cobra.Command{ + Use: "sharepoint", + Short: "run the sharepoint restore sanity tests", + DisableAutoGenTag: true, + RunE: sanityTestRestoreSharePoint, + } +} + +func sanityTestRestoreSharePoint(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + envs := common.EnvVars(ctx) + + ac, err := common.GetAC() + if err != nil { + return print.Only(ctx, err) + } + + restore.CheckSharePointRestoration(ctx, ac, envs) + + return nil +} + +func restoreGroupsCMD() *cobra.Command { + return &cobra.Command{ + Use: "groups", + Short: "run the groups restore sanity tests", + DisableAutoGenTag: true, + RunE: sanityTestRestoreGroups, + } +} + +func sanityTestRestoreGroups(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + envs := common.EnvVars(ctx) + + ac, err := common.GetAC() + if err != nil { + return print.Only(ctx, err) + } + + restore.CheckGroupsRestoration(ctx, ac, envs) + + return nil +} diff --git a/src/go.mod b/src/go.mod index 96d163731..146e144c6 100644 --- a/src/go.mod +++ b/src/go.mod @@ -8,7 +8,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1 github.com/alcionai/clues v0.0.0-20230920212840-728ac1a1d8b8 github.com/armon/go-metrics v0.4.1 - github.com/aws/aws-xray-sdk-go v1.8.1 + github.com/aws/aws-xray-sdk-go v1.8.2 github.com/cenkalti/backoff/v4 v4.2.1 github.com/google/uuid v1.3.1 github.com/h2non/gock v1.2.0 diff --git a/src/go.sum b/src/go.sum index 054c65071..d381c9e69 100644 --- a/src/go.sum +++ b/src/go.sum @@ -71,8 +71,8 @@ github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJ github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/aws/aws-sdk-go v1.45.0 h1:qoVOQHuLacxJMO71T49KeE70zm+Tk3vtrl7XO4VUPZc= github.com/aws/aws-sdk-go v1.45.0/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= -github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo= -github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A= +github.com/aws/aws-xray-sdk-go v1.8.2 h1:PVxNWnQG+rAYjxsmhEN97DTO57Dipg6VS0wsu6bXUB0= +github.com/aws/aws-xray-sdk-go v1.8.2/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= diff --git a/src/internal/common/readers/serialization_version.go b/src/internal/common/readers/serialization_version.go new file mode 100644 index 000000000..f203c3233 --- /dev/null +++ b/src/internal/common/readers/serialization_version.go @@ -0,0 +1,187 @@ +package readers + +import ( + "bytes" + "encoding/binary" + "io" + "os" + "unsafe" + + "github.com/alcionai/clues" +) + +// persistedSerializationVersion is the size of the serialization version in +// storage. +// +// The current on-disk format of this field is written in big endian. The +// highest bit denotes if the item is empty because it was deleted between the +// time we told the storage about it and when we needed to get data for it. The +// lowest two bytes are the version number. All other bits are reserved for +// future use. +// +// MSB 31 30 16 8 0 LSB +// +----------+----+---------+--------+-------+ +// | del flag | reserved | version number | +// +----------+----+---------+--------+-------+ +type persistedSerializationVersion = uint32 + +// SerializationVersion is the in-memory size of the version number that gets +// added to the persisted serialization version. +// +// Right now it's only a uint16 but we can expand it to be larger so long as the +// expanded size doesn't clash with the flags in the high-order bits. +type SerializationVersion uint16 + +// DefaultSerializationVersion is the current (default) version number for all +// services. As services evolve their storage format they should begin tracking +// their own version numbers separate from other services. +const DefaultSerializationVersion SerializationVersion = 1 + +const ( + VersionFormatSize = int(unsafe.Sizeof(persistedSerializationVersion(0))) + delInFlightMask persistedSerializationVersion = 1 << ((VersionFormatSize * 8) - 1) +) + +// SerializationFormat is a struct describing serialization format versions and +// flags to add for this item. +type SerializationFormat struct { + Version SerializationVersion + DelInFlight bool +} + +// NewVersionedBackupReader creates a reader that injects format into the first +// bytes of the returned data. After format has been returned, data is returned +// from baseReaders in the order they're passed in. +func NewVersionedBackupReader( + format SerializationFormat, + baseReaders ...io.ReadCloser, +) (io.ReadCloser, error) { + if format.DelInFlight && len(baseReaders) > 0 { + // This is a conservative check, but we can always loosen it later on if + // needed. At the moment we really don't expect any data if the item was + // deleted. + return nil, clues.New("item marked deleted but has reader(s)") + } + + formattedVersion := persistedSerializationVersion(format.Version) + if format.DelInFlight { + formattedVersion |= delInFlightMask + } + + formattedBuf := make([]byte, VersionFormatSize) + binary.BigEndian.PutUint32(formattedBuf, formattedVersion) + + versionReader := io.NopCloser(bytes.NewReader(formattedBuf)) + + // Need to add readers individually because types differ. + allReaders := make([]io.Reader, 0, len(baseReaders)+1) + allReaders = append(allReaders, versionReader) + + for _, r := range baseReaders { + allReaders = append(allReaders, r) + } + + res := &versionedBackupReader{ + baseReaders: append([]io.ReadCloser{versionReader}, baseReaders...), + combined: io.MultiReader(allReaders...), + } + + return res, nil +} + +type versionedBackupReader struct { + // baseReaders is a reference to the original readers so we can close them. + baseReaders []io.ReadCloser + // combined is the reader that will return all data. + combined io.Reader +} + +func (vbr *versionedBackupReader) Read(p []byte) (int, error) { + if vbr.combined == nil { + return 0, os.ErrClosed + } + + n, err := vbr.combined.Read(p) + if err == io.EOF { + // Golang doesn't allow wrapping of EOF. If we wrap it other things start + // thinking it's an actual error. + return n, err + } + + return n, clues.Stack(err).OrNil() +} + +func (vbr *versionedBackupReader) Close() error { + if vbr.combined == nil { + return nil + } + + vbr.combined = nil + + var errs *clues.Err + + for i, r := range vbr.baseReaders { + if err := r.Close(); err != nil { + errs = clues.Stack( + errs, + clues.Wrap(err, "closing reader").With("reader_index", i)) + } + } + + vbr.baseReaders = nil + + return errs.OrNil() +} + +// NewVersionedRestoreReader wraps baseReader and provides easy access to the +// SerializationFormat info in the first bytes of the data contained in +// baseReader. +func NewVersionedRestoreReader( + baseReader io.ReadCloser, +) (*VersionedRestoreReader, error) { + versionBuf := make([]byte, VersionFormatSize) + + // Loop to account for the unlikely case where we get a short read. + for read := 0; read < VersionFormatSize; { + n, err := baseReader.Read(versionBuf[read:]) + if err != nil { + return nil, clues.Wrap(err, "reading serialization version") + } + + read += n + } + + formattedVersion := binary.BigEndian.Uint32(versionBuf) + + return &VersionedRestoreReader{ + baseReader: baseReader, + format: SerializationFormat{ + Version: SerializationVersion(formattedVersion), + DelInFlight: (formattedVersion & delInFlightMask) != 0, + }, + }, nil +} + +type VersionedRestoreReader struct { + baseReader io.ReadCloser + format SerializationFormat +} + +func (vrr *VersionedRestoreReader) Read(p []byte) (int, error) { + n, err := vrr.baseReader.Read(p) + if err == io.EOF { + // Golang doesn't allow wrapping of EOF. If we wrap it other things start + // thinking it's an actual error. + return n, err + } + + return n, clues.Stack(err).OrNil() +} + +func (vrr *VersionedRestoreReader) Close() error { + return clues.Stack(vrr.baseReader.Close()).OrNil() +} + +func (vrr VersionedRestoreReader) Format() SerializationFormat { + return vrr.format +} diff --git a/src/internal/common/readers/serialization_version_test.go b/src/internal/common/readers/serialization_version_test.go new file mode 100644 index 000000000..7d99c7721 --- /dev/null +++ b/src/internal/common/readers/serialization_version_test.go @@ -0,0 +1,362 @@ +package readers_test + +import ( + "bytes" + "io" + "testing" + + "github.com/alcionai/clues" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "golang.org/x/exp/slices" + + "github.com/alcionai/corso/src/internal/common/readers" + "github.com/alcionai/corso/src/internal/tester" +) + +type shortReader struct { + maxReadLen int + io.ReadCloser +} + +func (s *shortReader) Read(p []byte) (int, error) { + toRead := s.maxReadLen + if len(p) < toRead { + toRead = len(p) + } + + return s.ReadCloser.Read(p[:toRead]) +} + +type SerializationReaderUnitSuite struct { + tester.Suite +} + +func TestSerializationReaderUnitSuite(t *testing.T) { + suite.Run(t, &SerializationReaderUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *SerializationReaderUnitSuite) TestBackupSerializationReader() { + baseData := []byte("hello world") + + table := []struct { + name string + format readers.SerializationFormat + inputReaders []io.ReadCloser + + expectErr require.ErrorAssertionFunc + expectData []byte + }{ + { + name: "DeletedInFlight NoVersion NoReaders", + format: readers.SerializationFormat{ + DelInFlight: true, + }, + expectErr: require.NoError, + expectData: []byte{0x80, 0x0, 0x0, 0x0}, + }, + { + name: "DeletedInFlight NoReaders", + format: readers.SerializationFormat{ + Version: 42, + DelInFlight: true, + }, + expectErr: require.NoError, + expectData: []byte{0x80, 0x0, 0x0, 42}, + }, + { + name: "NoVersion NoReaders", + expectErr: require.NoError, + expectData: []byte{0x00, 0x0, 0x0, 0x0}, + }, + { + name: "NoReaders", + format: readers.SerializationFormat{ + Version: 42, + }, + expectErr: require.NoError, + expectData: []byte{0x00, 0x0, 0x0, 42}, + }, + { + name: "SingleReader", + format: readers.SerializationFormat{ + Version: 42, + }, + inputReaders: []io.ReadCloser{io.NopCloser(bytes.NewReader(baseData))}, + expectErr: require.NoError, + expectData: append([]byte{0x00, 0x0, 0x0, 42}, baseData...), + }, + { + name: "MultipleReaders", + format: readers.SerializationFormat{ + Version: 42, + }, + inputReaders: []io.ReadCloser{ + io.NopCloser(bytes.NewReader(baseData)), + io.NopCloser(bytes.NewReader(baseData)), + }, + expectErr: require.NoError, + expectData: append( + append([]byte{0x00, 0x0, 0x0, 42}, baseData...), + baseData...), + }, + // Uncomment if we expand the version to 32 bits. + //{ + // name: "VersionWithHighBitSet NoReaders Errors", + // format: readers.SerializationFormat{ + // Version: 0x80000000, + // }, + // expectErr: require.Error, + //}, + { + name: "DeletedInFlight SingleReader Errors", + format: readers.SerializationFormat{ + DelInFlight: true, + }, + inputReaders: []io.ReadCloser{io.NopCloser(bytes.NewReader(baseData))}, + expectErr: require.Error, + }, + } + + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + r, err := readers.NewVersionedBackupReader( + test.format, + test.inputReaders...) + test.expectErr(t, err, "getting backup reader: %v", clues.ToCore(err)) + + if err != nil { + return + } + + defer func() { + err := r.Close() + assert.NoError(t, err, "closing reader: %v", clues.ToCore(err)) + }() + + buf, err := io.ReadAll(r) + require.NoError( + t, + err, + "reading serialized data: %v", + clues.ToCore(err)) + + // Need to use equal because output is order-sensitive. + assert.Equal(t, test.expectData, buf, "serialized data") + }) + } +} + +func (suite *SerializationReaderUnitSuite) TestBackupSerializationReader_ShortReads() { + t := suite.T() + + baseData := []byte("hello world") + expectData := append( + append([]byte{0x00, 0x0, 0x0, 42}, baseData...), + baseData...) + + r, err := readers.NewVersionedBackupReader( + readers.SerializationFormat{Version: 42}, + io.NopCloser(bytes.NewReader(baseData)), + io.NopCloser(bytes.NewReader(baseData))) + require.NoError(t, err, "getting backup reader: %v", clues.ToCore(err)) + + defer func() { + err := r.Close() + assert.NoError(t, err, "closing reader: %v", clues.ToCore(err)) + }() + + buf := make([]byte, len(expectData)) + r = &shortReader{ + maxReadLen: 3, + ReadCloser: r, + } + + for read := 0; ; { + n, err := r.Read(buf[read:]) + + read += n + if read >= len(buf) { + break + } + + require.NoError(t, err, "reading data: %v", clues.ToCore(err)) + } + + // Need to use equal because output is order-sensitive. + assert.Equal(t, expectData, buf, "serialized data") +} + +// TestRestoreSerializationReader checks that we can read previously serialized +// data. For simplicity, it uses the versionedBackupReader to generate the +// input. This should be relatively safe because the tests for +// versionedBackupReader do compare directly against serialized data. +func (suite *SerializationReaderUnitSuite) TestRestoreSerializationReader() { + baseData := []byte("hello world") + + table := []struct { + name string + inputReader func(*testing.T) io.ReadCloser + + expectErr require.ErrorAssertionFunc + expectVersion readers.SerializationVersion + expectDelInFlight bool + expectData []byte + }{ + { + name: "NoVersion NoReaders", + inputReader: func(t *testing.T) io.ReadCloser { + r, err := readers.NewVersionedBackupReader(readers.SerializationFormat{}) + require.NoError(t, err, "making reader: %v", clues.ToCore(err)) + + return r + }, + expectErr: require.NoError, + expectData: []byte{}, + }, + { + name: "DeletedInFlight NoReaders", + inputReader: func(t *testing.T) io.ReadCloser { + r, err := readers.NewVersionedBackupReader( + readers.SerializationFormat{ + Version: 42, + DelInFlight: true, + }) + require.NoError(t, err, "making reader: %v", clues.ToCore(err)) + + return r + }, + expectErr: require.NoError, + expectVersion: 42, + expectDelInFlight: true, + expectData: []byte{}, + }, + { + name: "DeletedInFlight SingleReader", + inputReader: func(t *testing.T) io.ReadCloser { + // Need to specify the bytes manually because the backup reader won't + // allow creating something with the deleted flag and data. + return io.NopCloser(bytes.NewReader(append( + []byte{0x80, 0x0, 0x0, 42}, + baseData...))) + }, + expectErr: require.NoError, + expectVersion: 42, + expectDelInFlight: true, + expectData: baseData, + }, + { + name: "NoVersion SingleReader", + inputReader: func(t *testing.T) io.ReadCloser { + r, err := readers.NewVersionedBackupReader( + readers.SerializationFormat{}, + io.NopCloser(bytes.NewReader(baseData))) + require.NoError(t, err, "making reader: %v", clues.ToCore(err)) + + return r + }, + expectErr: require.NoError, + expectData: baseData, + }, + { + name: "SingleReader", + inputReader: func(t *testing.T) io.ReadCloser { + r, err := readers.NewVersionedBackupReader( + readers.SerializationFormat{Version: 42}, + io.NopCloser(bytes.NewReader(baseData))) + require.NoError(t, err, "making reader: %v", clues.ToCore(err)) + + return r + }, + expectErr: require.NoError, + expectVersion: 42, + expectData: baseData, + }, + { + name: "ShortReads SingleReader", + inputReader: func(t *testing.T) io.ReadCloser { + r, err := readers.NewVersionedBackupReader( + readers.SerializationFormat{Version: 42}, + io.NopCloser(bytes.NewReader(baseData))) + require.NoError(t, err, "making reader: %v", clues.ToCore(err)) + + r = &shortReader{ + maxReadLen: 3, + ReadCloser: r, + } + + return r + }, + expectErr: require.NoError, + expectVersion: 42, + expectData: baseData, + }, + { + name: "MultipleReaders", + inputReader: func(t *testing.T) io.ReadCloser { + r, err := readers.NewVersionedBackupReader( + readers.SerializationFormat{Version: 42}, + io.NopCloser(bytes.NewReader(baseData)), + io.NopCloser(bytes.NewReader(baseData))) + require.NoError(t, err, "making reader: %v", clues.ToCore(err)) + + return r + }, + expectErr: require.NoError, + expectVersion: 42, + expectData: append(slices.Clone(baseData), baseData...), + }, + { + name: "EmptyReader Errors", + inputReader: func(t *testing.T) io.ReadCloser { + return io.NopCloser(bytes.NewReader([]byte{})) + }, + expectErr: require.Error, + }, + { + name: "TruncatedVersion Errors", + inputReader: func(t *testing.T) io.ReadCloser { + return io.NopCloser(bytes.NewReader([]byte{0x80, 0x0})) + }, + expectErr: require.Error, + }, + } + + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + r, err := readers.NewVersionedRestoreReader(test.inputReader(t)) + test.expectErr(t, err, "getting restore reader: %v", clues.ToCore(err)) + + if err != nil { + return + } + + defer func() { + err := r.Close() + assert.NoError(t, err, "closing reader: %v", clues.ToCore(err)) + }() + + assert.Equal( + t, + test.expectVersion, + r.Format().Version, + "version") + assert.Equal( + t, + test.expectDelInFlight, + r.Format().DelInFlight, + "deleted in flight") + + buf, err := io.ReadAll(r) + require.NoError(t, err, "reading serialized data: %v", clues.ToCore(err)) + + // Need to use equal because output is order-sensitive. + assert.Equal(t, test.expectData, buf, "serialized data") + }) + } +} diff --git a/src/internal/data/item.go b/src/internal/data/item.go index 2403e63aa..c6cb064e7 100644 --- a/src/internal/data/item.go +++ b/src/internal/data/item.go @@ -1,30 +1,38 @@ package data import ( - "bytes" "context" "io" + "sync" "time" "github.com/alcionai/clues" "github.com/spatialcurrent/go-lazy/pkg/lazy" + "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" ) var ( + _ Item = &unindexedPrefetchedItem{} + _ ItemModTime = &unindexedPrefetchedItem{} + _ Item = &prefetchedItem{} _ ItemInfo = &prefetchedItem{} _ ItemModTime = &prefetchedItem{} + + _ Item = &unindexedLazyItem{} + _ ItemModTime = &unindexedLazyItem{} + _ Item = &lazyItem{} _ ItemInfo = &lazyItem{} _ ItemModTime = &lazyItem{} ) func NewDeletedItem(itemID string) Item { - return &prefetchedItem{ + return &unindexedPrefetchedItem{ id: itemID, deleted: true, // TODO(ashmrtn): This really doesn't need to be set since deleted items are @@ -34,24 +42,33 @@ func NewDeletedItem(itemID string) Item { } } -func NewPrefetchedItem( +func NewUnindexedPrefetchedItem( reader io.ReadCloser, itemID string, - info details.ItemInfo, -) Item { - return &prefetchedItem{ - id: itemID, - reader: reader, - info: info, - modTime: info.Modified(), + modTime time.Time, +) (*unindexedPrefetchedItem, error) { + r, err := readers.NewVersionedBackupReader( + readers.SerializationFormat{Version: readers.DefaultSerializationVersion}, + reader) + if err != nil { + return nil, clues.Stack(err) } + + return &unindexedPrefetchedItem{ + id: itemID, + reader: r, + modTime: modTime, + }, nil } -// prefetchedItem represents a single item retrieved from the remote service. -type prefetchedItem struct { +// unindexedPrefetchedItem represents a single item retrieved from the remote +// service. +// +// This item doesn't implement ItemInfo so it's safe to use for items like +// metadata that shouldn't appear in backup details. +type unindexedPrefetchedItem struct { id string reader io.ReadCloser - info details.ItemInfo // modTime is the modified time of the item. It should match the modTime in // info if info is present. Here as a separate field so that deleted items // don't error out by trying to source it from info. @@ -62,26 +79,51 @@ type prefetchedItem struct { deleted bool } -func (i prefetchedItem) ID() string { +func (i unindexedPrefetchedItem) ID() string { return i.id } -func (i *prefetchedItem) ToReader() io.ReadCloser { +func (i *unindexedPrefetchedItem) ToReader() io.ReadCloser { return i.reader } -func (i prefetchedItem) Deleted() bool { +func (i unindexedPrefetchedItem) Deleted() bool { return i.deleted } +func (i unindexedPrefetchedItem) ModTime() time.Time { + return i.modTime +} + +func NewPrefetchedItem( + reader io.ReadCloser, + itemID string, + info details.ItemInfo, +) (*prefetchedItem, error) { + inner, err := NewUnindexedPrefetchedItem(reader, itemID, info.Modified()) + if err != nil { + return nil, clues.Stack(err) + } + + return &prefetchedItem{ + unindexedPrefetchedItem: inner, + info: info, + }, nil +} + +// prefetchedItem represents a single item retrieved from the remote service. +// +// This item implements ItemInfo so it should be used for things that need to +// appear in backup details. +type prefetchedItem struct { + *unindexedPrefetchedItem + info details.ItemInfo +} + func (i prefetchedItem) Info() (details.ItemInfo, error) { return i.info, nil } -func (i prefetchedItem) ModTime() time.Time { - return i.modTime -} - type ItemDataGetter interface { GetData( context.Context, @@ -89,14 +131,14 @@ type ItemDataGetter interface { ) (io.ReadCloser, *details.ItemInfo, bool, error) } -func NewLazyItem( +func NewUnindexedLazyItem( ctx context.Context, itemGetter ItemDataGetter, itemID string, modTime time.Time, errs *fault.Bus, -) Item { - return &lazyItem{ +) *unindexedLazyItem { + return &unindexedLazyItem{ ctx: ctx, id: itemID, itemGetter: itemGetter, @@ -105,11 +147,15 @@ func NewLazyItem( } } -// lazyItem represents a single item retrieved from the remote service. It -// lazily fetches the item's data when the first call to ToReader().Read() is +// unindexedLazyItem represents a single item retrieved from the remote service. +// It lazily fetches the item's data when the first call to ToReader().Read() is // made. -type lazyItem struct { +// +// This item doesn't implement ItemInfo so it's safe to use for items like +// metadata that shouldn't appear in backup details. +type unindexedLazyItem struct { ctx context.Context + mu sync.Mutex id string errs *fault.Bus itemGetter ItemDataGetter @@ -127,17 +173,27 @@ type lazyItem struct { delInFlight bool } -func (i lazyItem) ID() string { +func (i *unindexedLazyItem) ID() string { return i.id } -func (i *lazyItem) ToReader() io.ReadCloser { +func (i *unindexedLazyItem) ToReader() io.ReadCloser { return lazy.NewLazyReadCloser(func() (io.ReadCloser, error) { + // Don't allow getting Item info while trying to initialize said info. + // GetData could be a long running call, but in theory nothing should happen + // with the item until a reader is returned anyway. + i.mu.Lock() + defer i.mu.Unlock() + reader, info, delInFlight, err := i.itemGetter.GetData(i.ctx, i.errs) if err != nil { return nil, clues.Stack(err) } + format := readers.SerializationFormat{ + Version: readers.DefaultSerializationVersion, + } + // If an item was deleted then return an empty file so we don't fail the // backup and return a sentinel error when asked for ItemInfo so we don't // display the item in the backup. @@ -149,21 +205,59 @@ func (i *lazyItem) ToReader() io.ReadCloser { logger.Ctx(i.ctx).Info("item not found") i.delInFlight = true + format.DelInFlight = true + r, err := readers.NewVersionedBackupReader(format) - return io.NopCloser(bytes.NewReader([]byte{})), nil + return r, clues.Stack(err).OrNil() } i.info = info - return reader, nil + r, err := readers.NewVersionedBackupReader(format, reader) + + return r, clues.Stack(err).OrNil() }) } -func (i lazyItem) Deleted() bool { +func (i *unindexedLazyItem) Deleted() bool { return false } -func (i lazyItem) Info() (details.ItemInfo, error) { +func (i *unindexedLazyItem) ModTime() time.Time { + return i.modTime +} + +func NewLazyItem( + ctx context.Context, + itemGetter ItemDataGetter, + itemID string, + modTime time.Time, + errs *fault.Bus, +) *lazyItem { + return &lazyItem{ + unindexedLazyItem: NewUnindexedLazyItem( + ctx, + itemGetter, + itemID, + modTime, + errs), + } +} + +// lazyItem represents a single item retrieved from the remote service. It +// lazily fetches the item's data when the first call to ToReader().Read() is +// made. +// +// This item implements ItemInfo so it should be used for things that need to +// appear in backup details. +type lazyItem struct { + *unindexedLazyItem +} + +func (i *lazyItem) Info() (details.ItemInfo, error) { + i.mu.Lock() + defer i.mu.Unlock() + if i.delInFlight { return details.ItemInfo{}, clues.Stack(ErrNotFound).WithClues(i.ctx) } else if i.info == nil { @@ -173,7 +267,3 @@ func (i lazyItem) Info() (details.ItemInfo, error) { return *i.info, nil } - -func (i lazyItem) ModTime() time.Time { - return i.modTime -} diff --git a/src/internal/data/item_test.go b/src/internal/data/item_test.go index 864e70890..f0c7e9009 100644 --- a/src/internal/data/item_test.go +++ b/src/internal/data/item_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/backup/details" @@ -49,6 +50,38 @@ func TestItemUnitSuite(t *testing.T) { suite.Run(t, &ItemUnitSuite{Suite: tester.NewUnitSuite(t)}) } +func (suite *ItemUnitSuite) TestUnindexedPrefetchedItem() { + prefetch, err := data.NewUnindexedPrefetchedItem( + io.NopCloser(bytes.NewReader([]byte{})), + "foo", + time.Time{}) + require.NoError(suite.T(), err, clues.ToCore(err)) + + var item data.Item = prefetch + + _, ok := item.(data.ItemInfo) + assert.False(suite.T(), ok, "unindexedPrefetchedItem implements Info()") +} + +func (suite *ItemUnitSuite) TestUnindexedLazyItem() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + lazy := data.NewUnindexedLazyItem( + ctx, + nil, + "foo", + time.Time{}, + fault.New(true)) + + var item data.Item = lazy + + _, ok := item.(data.ItemInfo) + assert.False(t, ok, "unindexedLazyItem implements Info()") +} + func (suite *ItemUnitSuite) TestDeletedItem() { var ( t = suite.T() @@ -115,18 +148,29 @@ func (suite *ItemUnitSuite) TestPrefetchedItem() { suite.Run(test.name, func() { t := suite.T() - item := data.NewPrefetchedItem(test.reader, id, test.info) + item, err := data.NewPrefetchedItem(test.reader, id, test.info) + require.NoError(t, err, clues.ToCore(err)) assert.Equal(t, id, item.ID(), "ID") assert.False(t, item.Deleted(), "deleted") assert.Equal( t, test.info.Modified(), - item.(data.ItemModTime).ModTime(), + item.ModTime(), "mod time") - readData, err := io.ReadAll(item.ToReader()) - test.readErr(t, err, clues.ToCore(err), "read error") + r, err := readers.NewVersionedRestoreReader(item.ToReader()) + require.NoError(t, err, "version error: %v", clues.ToCore(err)) + + if err != nil { + return + } + + assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version) + assert.False(t, r.Format().DelInFlight) + + readData, err := io.ReadAll(r) + test.readErr(t, err, "read error: %v", clues.ToCore(err)) assert.Equal(t, test.expectData, readData, "read data") }) } @@ -169,6 +213,7 @@ func (suite *ItemUnitSuite) TestLazyItem() { table := []struct { name string mid *mockItemDataGetter + versionErr assert.ErrorAssertionFunc readErr assert.ErrorAssertionFunc infoErr assert.ErrorAssertionFunc expectData []byte @@ -180,6 +225,7 @@ func (suite *ItemUnitSuite) TestLazyItem() { reader: io.NopCloser(bytes.NewReader([]byte{})), info: &details.ItemInfo{Exchange: &details.ExchangeInfo{Modified: now}}, }, + versionErr: assert.NoError, readErr: assert.NoError, infoErr: assert.NoError, expectData: []byte{}, @@ -190,6 +236,7 @@ func (suite *ItemUnitSuite) TestLazyItem() { reader: io.NopCloser(bytes.NewReader(baseData)), info: &details.ItemInfo{Exchange: &details.ExchangeInfo{Modified: now}}, }, + versionErr: assert.NoError, readErr: assert.NoError, infoErr: assert.NoError, expectData: baseData, @@ -200,6 +247,7 @@ func (suite *ItemUnitSuite) TestLazyItem() { reader: io.NopCloser(bytes.NewReader(baseData)), info: &details.ItemInfo{OneDrive: &details.OneDriveInfo{Modified: now}}, }, + versionErr: assert.NoError, readErr: assert.NoError, infoErr: assert.NoError, expectData: baseData, @@ -209,6 +257,7 @@ func (suite *ItemUnitSuite) TestLazyItem() { mid: &mockItemDataGetter{ err: assert.AnError, }, + versionErr: assert.Error, readErr: assert.Error, infoErr: assert.Error, expectData: []byte{}, @@ -224,6 +273,7 @@ func (suite *ItemUnitSuite) TestLazyItem() { }, info: &details.ItemInfo{OneDrive: &details.OneDriveInfo{Modified: now}}, }, + versionErr: assert.NoError, readErr: assert.Error, infoErr: assert.NoError, expectData: baseData[:5], @@ -253,15 +303,25 @@ func (suite *ItemUnitSuite) TestLazyItem() { assert.Equal( t, now, - item.(data.ItemModTime).ModTime(), + item.ModTime(), "mod time") // Read data to execute lazy reader. - readData, err := io.ReadAll(item.ToReader()) + r, err := readers.NewVersionedRestoreReader(item.ToReader()) + test.versionErr(t, err, "version error: %v", clues.ToCore(err)) + + if err != nil { + return + } + + assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version) + assert.False(t, r.Format().DelInFlight) + + readData, err := io.ReadAll(r) test.readErr(t, err, clues.ToCore(err), "read error") assert.Equal(t, test.expectData, readData, "read data") - _, err = item.(data.ItemInfo).Info() + _, err = item.Info() test.infoErr(t, err, "Info(): %v", clues.ToCore(err)) e := errs.Errors() @@ -301,15 +361,21 @@ func (suite *ItemUnitSuite) TestLazyItem_DeletedInFlight() { assert.Equal( t, now, - item.(data.ItemModTime).ModTime(), + item.ModTime(), "mod time") // Read data to execute lazy reader. - readData, err := io.ReadAll(item.ToReader()) + r, err := readers.NewVersionedRestoreReader(item.ToReader()) + require.NoError(t, err, "version error: %v", clues.ToCore(err)) + + assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version) + assert.True(t, r.Format().DelInFlight) + + readData, err := io.ReadAll(r) require.NoError(t, err, clues.ToCore(err), "read error") assert.Empty(t, readData, "read data") - _, err = item.(data.ItemInfo).Info() + _, err = item.Info() assert.ErrorIs(t, err, data.ErrNotFound, "Info() error") e := errs.Errors() @@ -341,9 +407,9 @@ func (suite *ItemUnitSuite) TestLazyItem_InfoBeforeReadErrors() { assert.Equal( t, now, - item.(data.ItemModTime).ModTime(), + item.ModTime(), "mod time") - _, err := item.(data.ItemInfo).Info() + _, err := item.Info() assert.Error(t, err, "Info() error") } diff --git a/src/internal/data/mock/collection.go b/src/internal/data/mock/collection.go index 6fd461db6..39a974e36 100644 --- a/src/internal/data/mock/collection.go +++ b/src/internal/data/mock/collection.go @@ -3,8 +3,13 @@ package mock import ( "context" "io" + "testing" "time" + "github.com/alcionai/clues" + "github.com/stretchr/testify/require" + + "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/fault" @@ -163,3 +168,106 @@ func (rc RestoreCollection) FetchItemByName( return res, nil } + +var ( + _ data.BackupCollection = &versionedBackupCollection{} + _ data.RestoreCollection = &unversionedRestoreCollection{} + _ data.Item = &itemWrapper{} +) + +type itemWrapper struct { + data.Item + reader io.ReadCloser +} + +func (i *itemWrapper) ToReader() io.ReadCloser { + return i.reader +} + +func NewUnversionedRestoreCollection( + t *testing.T, + col data.RestoreCollection, +) *unversionedRestoreCollection { + return &unversionedRestoreCollection{ + RestoreCollection: col, + t: t, + } +} + +// unversionedRestoreCollection strips out version format headers on all items. +// +// Wrap data.RestoreCollections in this type if you don't need access to the +// version format header during tests and you know the item readers can't return +// an error. +type unversionedRestoreCollection struct { + data.RestoreCollection + t *testing.T +} + +func (c *unversionedRestoreCollection) Items( + ctx context.Context, + errs *fault.Bus, +) <-chan data.Item { + res := make(chan data.Item) + go func() { + defer close(res) + + for item := range c.RestoreCollection.Items(ctx, errs) { + r, err := readers.NewVersionedRestoreReader(item.ToReader()) + require.NoError(c.t, err, clues.ToCore(err)) + + res <- &itemWrapper{ + Item: item, + reader: r, + } + } + }() + + return res +} + +func NewVersionedBackupCollection( + t *testing.T, + col data.BackupCollection, +) *versionedBackupCollection { + return &versionedBackupCollection{ + BackupCollection: col, + t: t, + } +} + +// versionedBackupCollection injects basic version information on all items. +// +// Wrap data.BackupCollections in this type if you don't need to explicitly set +// the version format header during tests, aren't trying to check reader errors +// cases, and aren't populating backup details. +type versionedBackupCollection struct { + data.BackupCollection + t *testing.T +} + +func (c *versionedBackupCollection) Items( + ctx context.Context, + errs *fault.Bus, +) <-chan data.Item { + res := make(chan data.Item) + go func() { + defer close(res) + + for item := range c.BackupCollection.Items(ctx, errs) { + r, err := readers.NewVersionedBackupReader( + readers.SerializationFormat{ + Version: readers.DefaultSerializationVersion, + }, + item.ToReader()) + require.NoError(c.t, err, clues.ToCore(err)) + + res <- &itemWrapper{ + Item: item, + reader: r, + } + } + }() + + return res +} diff --git a/src/internal/kopia/conn.go b/src/internal/kopia/conn.go index f1c0f82df..7a4948787 100644 --- a/src/internal/kopia/conn.go +++ b/src/internal/kopia/conn.go @@ -205,7 +205,7 @@ func (w *conn) commonConnect( bst, password, kopiaOpts); err != nil { - return clues.Wrap(err, "connecting to repo").WithClues(ctx) + return clues.Wrap(err, "connecting to kopia repo").WithClues(ctx) } if err := w.open(ctx, cfgFile, password); err != nil { @@ -580,6 +580,10 @@ func (w *conn) SnapshotRoot(man *snapshot.Manifest) (fs.Entry, error) { } func (w *conn) UpdatePassword(ctx context.Context, password string, opts repository.Options) error { + if len(password) <= 0 { + return clues.New("empty password provided") + } + kopiaRef := NewConn(w.storage) if err := kopiaRef.Connect(ctx, opts); err != nil { return clues.Wrap(err, "connecting kopia client") @@ -587,8 +591,10 @@ func (w *conn) UpdatePassword(ctx context.Context, password string, opts reposit defer kopiaRef.Close(ctx) - repository := kopiaRef.Repository.(repo.DirectRepository) - err := repository.FormatManager().ChangePassword(ctx, password) + kopiaRepo := kopiaRef.Repository.(repo.DirectRepository) + if err := kopiaRepo.FormatManager().ChangePassword(ctx, password); err != nil { + return clues.Wrap(err, "unable to update password") + } - return errors.Wrap(err, "unable to update password") + return nil } diff --git a/src/internal/kopia/conn_test.go b/src/internal/kopia/conn_test.go index bbd824c3d..e5c2dbdec 100644 --- a/src/internal/kopia/conn_test.go +++ b/src/internal/kopia/conn_test.go @@ -22,6 +22,20 @@ import ( storeTD "github.com/alcionai/corso/src/pkg/storage/testdata" ) +func openLocalKopiaRepo( + t tester.TestT, + ctx context.Context, //revive:disable-line:context-as-argument +) (*conn, error) { + st := storeTD.NewFilesystemStorage(t) + + k := NewConn(st) + if err := k.Initialize(ctx, repository.Options{}, repository.Retention{}); err != nil { + return nil, err + } + + return k, nil +} + func openKopiaRepo( t tester.TestT, ctx context.Context, //revive:disable-line:context-as-argument @@ -81,7 +95,7 @@ func (suite *WrapperIntegrationSuite) TestRepoExistsError() { ctx, flush := tester.NewContext(t) defer flush() - st := storeTD.NewPrefixedS3Storage(t) + st := storeTD.NewFilesystemStorage(t) k := NewConn(st) err := k.Initialize(ctx, repository.Options{}, repository.Retention{}) @@ -101,7 +115,7 @@ func (suite *WrapperIntegrationSuite) TestBadProviderErrors() { ctx, flush := tester.NewContext(t) defer flush() - st := storeTD.NewPrefixedS3Storage(t) + st := storeTD.NewFilesystemStorage(t) st.Provider = storage.ProviderUnknown k := NewConn(st) @@ -115,7 +129,7 @@ func (suite *WrapperIntegrationSuite) TestConnectWithoutInitErrors() { ctx, flush := tester.NewContext(t) defer flush() - st := storeTD.NewPrefixedS3Storage(t) + st := storeTD.NewFilesystemStorage(t) k := NewConn(st) err := k.Connect(ctx, repository.Options{}) @@ -408,7 +422,7 @@ func (suite *WrapperIntegrationSuite) TestSetUserAndHost() { Host: "bar", } - st := storeTD.NewPrefixedS3Storage(t) + st := storeTD.NewFilesystemStorage(t) k := NewConn(st) err := k.Initialize(ctx, opts, repository.Retention{}) diff --git a/src/internal/kopia/data_collection.go b/src/internal/kopia/data_collection.go index 319914f1a..c5899afdf 100644 --- a/src/internal/kopia/data_collection.go +++ b/src/internal/kopia/data_collection.go @@ -7,6 +7,7 @@ import ( "github.com/alcionai/clues" "github.com/kopia/kopia/fs" + "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" @@ -16,6 +17,7 @@ import ( var ( _ data.RestoreCollection = &kopiaDataCollection{} _ data.Item = &kopiaDataStream{} + _ data.ItemSize = &kopiaDataStream{} ) type kopiaDataCollection struct { @@ -23,7 +25,7 @@ type kopiaDataCollection struct { dir fs.Directory items []string counter ByteCounter - expectedVersion uint32 + expectedVersion readers.SerializationVersion } func (kdc *kopiaDataCollection) Items( @@ -102,7 +104,7 @@ func (kdc kopiaDataCollection) FetchItemByName( return nil, clues.New("object is not a file").WithClues(ctx) } - size := f.Size() - int64(versionSize) + size := f.Size() - int64(readers.VersionFormatSize) if size < 0 { logger.Ctx(ctx).Infow("negative file size; resetting to 0", "file_size", size) @@ -118,13 +120,32 @@ func (kdc kopiaDataCollection) FetchItemByName( return nil, clues.Wrap(err, "opening file").WithClues(ctx) } + // TODO(ashmrtn): Remove this when individual services implement checks for + // version and deleted items. + rr, err := readers.NewVersionedRestoreReader(r) + if err != nil { + return nil, clues.Stack(err).WithClues(ctx) + } + + if rr.Format().Version != kdc.expectedVersion { + return nil, clues.New("unexpected data format"). + WithClues(ctx). + With( + "read_version", rr.Format().Version, + "expected_version", kdc.expectedVersion) + } + + // This is a conservative check, but we shouldn't be seeing items that were + // deleted in flight during restores because there's no way to select them. + if rr.Format().DelInFlight { + return nil, clues.New("selected item marked as deleted in flight"). + WithClues(ctx) + } + return &kopiaDataStream{ - id: name, - reader: &restoreStreamReader{ - ReadCloser: r, - expectedVersion: kdc.expectedVersion, - }, - size: size, + id: name, + reader: rr, + size: size, }, nil } diff --git a/src/internal/kopia/data_collection_test.go b/src/internal/kopia/data_collection_test.go index d587730ca..4b1b4a4b2 100644 --- a/src/internal/kopia/data_collection_test.go +++ b/src/internal/kopia/data_collection_test.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/internal/data" dataMock "github.com/alcionai/corso/src/internal/data/mock" "github.com/alcionai/corso/src/internal/tester" @@ -121,25 +122,35 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() { ) // Needs to be a function so the readers get refreshed each time. - getLayout := func() fs.Directory { + getLayout := func(t *testing.T) fs.Directory { + format := readers.SerializationFormat{ + Version: readers.DefaultSerializationVersion, + } + + r1, err := readers.NewVersionedBackupReader( + format, + io.NopCloser(bytes.NewReader(files[0].data))) + require.NoError(t, err, clues.ToCore(err)) + + r2, err := readers.NewVersionedBackupReader( + format, + io.NopCloser(bytes.NewReader(files[1].data))) + require.NoError(t, err, clues.ToCore(err)) + return virtualfs.NewStaticDirectory(encodeAsPath("foo"), []fs.Entry{ &mockFile{ StreamingFile: virtualfs.StreamingFileFromReader( encodeAsPath(files[0].uuid), nil), - r: newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(files[0].data))), - size: int64(len(files[0].data) + versionSize), + r: r1, + size: int64(len(files[0].data) + readers.VersionFormatSize), }, &mockFile{ StreamingFile: virtualfs.StreamingFileFromReader( encodeAsPath(files[1].uuid), nil), - r: newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(files[1].data))), - size: int64(len(files[1].data) + versionSize), + r: r2, + size: int64(len(files[1].data) + readers.VersionFormatSize), }, &mockFile{ StreamingFile: virtualfs.StreamingFileFromReader( @@ -224,10 +235,10 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() { } c := kopiaDataCollection{ - dir: getLayout(), + dir: getLayout(t), path: nil, items: items, - expectedVersion: serializationVersion, + expectedVersion: readers.DefaultSerializationVersion, } var ( @@ -291,23 +302,34 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() { // Needs to be a function so we can switch the serialization version as // needed. - getLayout := func(serVersion uint32) fs.Directory { + getLayout := func( + t *testing.T, + serVersion readers.SerializationVersion, + ) fs.Directory { + format := readers.SerializationFormat{Version: serVersion} + + r1, err := readers.NewVersionedBackupReader( + format, + io.NopCloser(bytes.NewReader([]byte(noErrFileData)))) + require.NoError(t, err, clues.ToCore(err)) + + r2, err := readers.NewVersionedBackupReader( + format, + errReader.ToReader()) + require.NoError(t, err, clues.ToCore(err)) + return virtualfs.NewStaticDirectory(encodeAsPath(folder2), []fs.Entry{ &mockFile{ StreamingFile: virtualfs.StreamingFileFromReader( encodeAsPath(noErrFileName), nil), - r: newBackupStreamReader( - serVersion, - io.NopCloser(bytes.NewReader([]byte(noErrFileData)))), + r: r1, }, &mockFile{ StreamingFile: virtualfs.StreamingFileFromReader( encodeAsPath(errFileName), nil), - r: newBackupStreamReader( - serVersion, - errReader.ToReader()), + r: r2, }, &mockFile{ StreamingFile: virtualfs.StreamingFileFromReader( @@ -330,7 +352,7 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() { table := []struct { name string inputName string - inputSerializationVersion uint32 + inputSerializationVersion readers.SerializationVersion expectedData []byte lookupErr assert.ErrorAssertionFunc readErr assert.ErrorAssertionFunc @@ -339,7 +361,7 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() { { name: "FileFound_NoError", inputName: noErrFileName, - inputSerializationVersion: serializationVersion, + inputSerializationVersion: readers.DefaultSerializationVersion, expectedData: []byte(noErrFileData), lookupErr: assert.NoError, readErr: assert.NoError, @@ -347,21 +369,20 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() { { name: "FileFound_ReadError", inputName: errFileName, - inputSerializationVersion: serializationVersion, + inputSerializationVersion: readers.DefaultSerializationVersion, lookupErr: assert.NoError, readErr: assert.Error, }, { name: "FileFound_VersionError", inputName: noErrFileName, - inputSerializationVersion: serializationVersion + 1, - lookupErr: assert.NoError, - readErr: assert.Error, + inputSerializationVersion: readers.DefaultSerializationVersion + 1, + lookupErr: assert.Error, }, { name: "FileNotFound", inputName: "foo", - inputSerializationVersion: serializationVersion + 1, + inputSerializationVersion: readers.DefaultSerializationVersion + 1, lookupErr: assert.Error, notFoundErr: true, }, @@ -373,14 +394,14 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() { ctx, flush := tester.NewContext(t) defer flush() - root := getLayout(test.inputSerializationVersion) + root := getLayout(t, test.inputSerializationVersion) c := &i64counter{} col := &kopiaDataCollection{ path: pth, dir: root, counter: c, - expectedVersion: serializationVersion, + expectedVersion: readers.DefaultSerializationVersion, } s, err := col.FetchItemByName(ctx, test.inputName) diff --git a/src/internal/kopia/filesystem.go b/src/internal/kopia/filesystem.go index 3081ac286..e67afa85e 100644 --- a/src/internal/kopia/filesystem.go +++ b/src/internal/kopia/filesystem.go @@ -16,12 +16,11 @@ func filesystemStorage( repoOpts repository.Options, s storage.Storage, ) (blob.Storage, error) { - cfg, err := s.StorageConfig() + fsCfg, err := s.ToFilesystemConfig() if err != nil { return nil, clues.Stack(err).WithClues(ctx) } - fsCfg := cfg.(*storage.FilesystemConfig) opts := filesystem.Options{ Path: fsCfg.Path, } diff --git a/src/internal/kopia/merge_collection_test.go b/src/internal/kopia/merge_collection_test.go index f89c2dd95..fefbfbb15 100644 --- a/src/internal/kopia/merge_collection_test.go +++ b/src/internal/kopia/merge_collection_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/m365/service/exchange/mock" "github.com/alcionai/corso/src/internal/tester" @@ -150,20 +151,27 @@ func (suite *MergeCollectionUnitSuite) TestFetchItemByName() { require.NoError(suite.T(), err, clues.ToCore(err)) // Needs to be a function so the readers get refreshed each time. - layouts := []func() fs.Directory{ + layouts := []func(t *testing.T) fs.Directory{ // Has the following; // - file1: data[0] // - errOpen: (error opening file) - func() fs.Directory { + func(t *testing.T) fs.Directory { + format := readers.SerializationFormat{ + Version: readers.DefaultSerializationVersion, + } + + r1, err := readers.NewVersionedBackupReader( + format, + io.NopCloser(bytes.NewReader(fileData1))) + require.NoError(t, err, clues.ToCore(err)) + return virtualfs.NewStaticDirectory(encodeAsPath(colPaths[0]), []fs.Entry{ &mockFile{ StreamingFile: virtualfs.StreamingFileFromReader( encodeAsPath(fileName1), nil), - r: newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(fileData1))), - size: int64(len(fileData1) + versionSize), + r: r1, + size: int64(len(fileData1) + readers.VersionFormatSize), }, &mockFile{ StreamingFile: virtualfs.StreamingFileFromReader( @@ -178,34 +186,47 @@ func (suite *MergeCollectionUnitSuite) TestFetchItemByName() { // - file1: data[1] // - file2: data[0] // - errOpen: data[2] - func() fs.Directory { + func(t *testing.T) fs.Directory { + format := readers.SerializationFormat{ + Version: readers.DefaultSerializationVersion, + } + + r1, err := readers.NewVersionedBackupReader( + format, + io.NopCloser(bytes.NewReader(fileData2))) + require.NoError(t, err, clues.ToCore(err)) + + r2, err := readers.NewVersionedBackupReader( + format, + io.NopCloser(bytes.NewReader(fileData1))) + require.NoError(t, err, clues.ToCore(err)) + + r3, err := readers.NewVersionedBackupReader( + format, + io.NopCloser(bytes.NewReader(fileData3))) + require.NoError(t, err, clues.ToCore(err)) + return virtualfs.NewStaticDirectory(encodeAsPath(colPaths[1]), []fs.Entry{ &mockFile{ StreamingFile: virtualfs.StreamingFileFromReader( encodeAsPath(fileName1), nil), - r: newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(fileData2))), - size: int64(len(fileData2) + versionSize), + r: r1, + size: int64(len(fileData2) + readers.VersionFormatSize), }, &mockFile{ StreamingFile: virtualfs.StreamingFileFromReader( encodeAsPath(fileName2), nil), - r: newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(fileData1))), - size: int64(len(fileData1) + versionSize), + r: r2, + size: int64(len(fileData1) + readers.VersionFormatSize), }, &mockFile{ StreamingFile: virtualfs.StreamingFileFromReader( encodeAsPath(fileOpenErrName), nil), - r: newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(fileData3))), - size: int64(len(fileData3) + versionSize), + r: r3, + size: int64(len(fileData3) + readers.VersionFormatSize), }, }) }, @@ -257,9 +278,9 @@ func (suite *MergeCollectionUnitSuite) TestFetchItemByName() { for i, layout := range layouts { col := &kopiaDataCollection{ path: pth, - dir: layout(), + dir: layout(t), counter: c, - expectedVersion: serializationVersion, + expectedVersion: readers.DefaultSerializationVersion, } err := dc.addCollection(colPaths[i], col) diff --git a/src/internal/kopia/model_store_test.go b/src/internal/kopia/model_store_test.go index 6226a14ad..db25eee57 100644 --- a/src/internal/kopia/model_store_test.go +++ b/src/internal/kopia/model_store_test.go @@ -29,7 +29,7 @@ type fooModel struct { //revive:disable-next-line:context-as-argument func getModelStore(t *testing.T, ctx context.Context) *ModelStore { - c, err := openKopiaRepo(t, ctx) + c, err := openLocalKopiaRepo(t, ctx) require.NoError(t, err, clues.ToCore(err)) return &ModelStore{c: c, modelVersion: globalModelVersion} @@ -856,7 +856,7 @@ func openConnAndModelStore( t *testing.T, ctx context.Context, //revive:disable-line:context-as-argument ) (*conn, *ModelStore) { - st := storeTD.NewPrefixedS3Storage(t) + st := storeTD.NewFilesystemStorage(t) c := NewConn(st) err := c.Initialize(ctx, repository.Options{}, repository.Retention{}) diff --git a/src/internal/kopia/s3.go b/src/internal/kopia/s3.go index f4a379ada..b7dbbd5cf 100644 --- a/src/internal/kopia/s3.go +++ b/src/internal/kopia/s3.go @@ -20,13 +20,11 @@ func s3BlobStorage( repoOpts repository.Options, s storage.Storage, ) (blob.Storage, error) { - sc, err := s.StorageConfig() + cfg, err := s.ToS3Config() if err != nil { return nil, clues.Stack(err).WithClues(ctx) } - cfg := sc.(*storage.S3Config) - endpoint := defaultS3Endpoint if len(cfg.Endpoint) > 0 { endpoint = cfg.Endpoint diff --git a/src/internal/kopia/upload.go b/src/internal/kopia/upload.go index bc7a1f034..6030ec838 100644 --- a/src/internal/kopia/upload.go +++ b/src/internal/kopia/upload.go @@ -1,19 +1,14 @@ package kopia import ( - "bytes" "context" "encoding/base64" - "encoding/binary" "errors" - "io" - "os" "runtime/trace" "strings" "sync" "sync/atomic" "time" - "unsafe" "github.com/alcionai/clues" "github.com/kopia/kopia/fs" @@ -37,101 +32,6 @@ import ( const maxInflateTraversalDepth = 500 -var versionSize = int(unsafe.Sizeof(serializationVersion)) - -func newBackupStreamReader(version uint32, reader io.ReadCloser) *backupStreamReader { - buf := make([]byte, versionSize) - binary.BigEndian.PutUint32(buf, version) - bufReader := io.NopCloser(bytes.NewReader(buf)) - - return &backupStreamReader{ - readers: []io.ReadCloser{bufReader, reader}, - combined: io.NopCloser(io.MultiReader(bufReader, reader)), - } -} - -// backupStreamReader is a wrapper around the io.Reader that other Corso -// components return when backing up information. It injects a version number at -// the start of the data stream. Future versions of Corso may not need this if -// they use more complex serialization logic as serialization/version injection -// will be handled by other components. -type backupStreamReader struct { - readers []io.ReadCloser - combined io.ReadCloser -} - -func (rw *backupStreamReader) Read(p []byte) (n int, err error) { - if rw.combined == nil { - return 0, os.ErrClosed - } - - return rw.combined.Read(p) -} - -func (rw *backupStreamReader) Close() error { - if rw.combined == nil { - return nil - } - - rw.combined = nil - - var errs *clues.Err - - for _, r := range rw.readers { - err := r.Close() - if err != nil { - errs = clues.Stack(clues.Wrap(err, "closing reader"), errs) - } - } - - return errs.OrNil() -} - -// restoreStreamReader is a wrapper around the io.Reader that kopia returns when -// reading data from an item. It examines and strips off the version number of -// the restored data. Future versions of Corso may not need this if they use -// more complex serialization logic as version checking/deserialization will be -// handled by other components. A reader that returns a version error is no -// longer valid and should not be used once the version error is returned. -type restoreStreamReader struct { - io.ReadCloser - expectedVersion uint32 - readVersion bool -} - -func (rw *restoreStreamReader) checkVersion() error { - versionBuf := make([]byte, versionSize) - - for newlyRead := 0; newlyRead < versionSize; { - n, err := rw.ReadCloser.Read(versionBuf[newlyRead:]) - if err != nil { - return clues.Wrap(err, "reading data format version") - } - - newlyRead += n - } - - version := binary.BigEndian.Uint32(versionBuf) - - if version != rw.expectedVersion { - return clues.New("unexpected data format").With("read_version", version) - } - - return nil -} - -func (rw *restoreStreamReader) Read(p []byte) (n int, err error) { - if !rw.readVersion { - rw.readVersion = true - - if err := rw.checkVersion(); err != nil { - return 0, err - } - } - - return rw.ReadCloser.Read(p) -} - type itemDetails struct { infoer data.ItemInfo repoPath path.Path @@ -436,7 +336,7 @@ func collectionEntries( entry := virtualfs.StreamingFileWithModTimeFromReader( encodedName, modTime, - newBackupStreamReader(serializationVersion, e.ToReader())) + e.ToReader()) err = ctr(ctx, entry) if err != nil { diff --git a/src/internal/kopia/upload_test.go b/src/internal/kopia/upload_test.go index fd74cd9fa..168d32617 100644 --- a/src/internal/kopia/upload_test.go +++ b/src/internal/kopia/upload_test.go @@ -14,7 +14,6 @@ import ( "github.com/kopia/kopia/repo/manifest" "github.com/kopia/kopia/snapshot" "github.com/kopia/kopia/snapshot/snapshotfs" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -124,12 +123,6 @@ func expectFileData( return } - // Need to wrap with a restore stream reader to remove the version. - r = &restoreStreamReader{ - ReadCloser: io.NopCloser(r), - expectedVersion: serializationVersion, - } - got, err := io.ReadAll(r) if !assert.NoError(t, err, "reading data in file", name, clues.ToCore(err)) { return @@ -226,135 +219,6 @@ func getDirEntriesForEntry( // --------------- // unit tests // --------------- -type limitedRangeReader struct { - readLen int - io.ReadCloser -} - -func (lrr *limitedRangeReader) Read(p []byte) (int, error) { - if len(p) == 0 { - // Not well specified behavior, defer to underlying reader. - return lrr.ReadCloser.Read(p) - } - - toRead := lrr.readLen - if len(p) < toRead { - toRead = len(p) - } - - return lrr.ReadCloser.Read(p[:toRead]) -} - -type VersionReadersUnitSuite struct { - tester.Suite -} - -func TestVersionReadersUnitSuite(t *testing.T) { - suite.Run(t, &VersionReadersUnitSuite{Suite: tester.NewUnitSuite(t)}) -} - -func (suite *VersionReadersUnitSuite) TestWriteAndRead() { - inputData := []byte("This is some data for the reader to test with") - table := []struct { - name string - readVersion uint32 - writeVersion uint32 - check assert.ErrorAssertionFunc - }{ - { - name: "SameVersionSucceeds", - readVersion: 42, - writeVersion: 42, - check: assert.NoError, - }, - { - name: "DifferentVersionsFail", - readVersion: 7, - writeVersion: 42, - check: assert.Error, - }, - } - - for _, test := range table { - suite.Run(test.name, func() { - t := suite.T() - - baseReader := bytes.NewReader(inputData) - - reversible := &restoreStreamReader{ - expectedVersion: test.readVersion, - ReadCloser: newBackupStreamReader( - test.writeVersion, - io.NopCloser(baseReader)), - } - - defer reversible.Close() - - allData, err := io.ReadAll(reversible) - test.check(t, err, clues.ToCore(err)) - - if err != nil { - return - } - - assert.Equal(t, inputData, allData) - }) - } -} - -func readAllInParts( - t *testing.T, - partLen int, - reader io.ReadCloser, -) ([]byte, int) { - res := []byte{} - read := 0 - tmp := make([]byte, partLen) - - for { - n, err := reader.Read(tmp) - if errors.Is(err, io.EOF) { - break - } - - require.NoError(t, err, clues.ToCore(err)) - - read += n - res = append(res, tmp[:n]...) - } - - return res, read -} - -func (suite *VersionReadersUnitSuite) TestWriteHandlesShortReads() { - t := suite.T() - inputData := []byte("This is some data for the reader to test with") - version := uint32(42) - baseReader := bytes.NewReader(inputData) - versioner := newBackupStreamReader(version, io.NopCloser(baseReader)) - expectedToWrite := len(inputData) + int(versionSize) - - // "Write" all the data. - versionedData, writtenLen := readAllInParts(t, 1, versioner) - assert.Equal(t, expectedToWrite, writtenLen) - - // Read all of the data back. - baseReader = bytes.NewReader(versionedData) - reader := &restoreStreamReader{ - expectedVersion: version, - // Be adversarial and only allow reads of length 1 from the byte reader. - ReadCloser: &limitedRangeReader{ - readLen: 1, - ReadCloser: io.NopCloser(baseReader), - }, - } - readData, readLen := readAllInParts(t, 1, reader) - // This reports the bytes read and returned to the user, excluding the version - // that is stripped off at the start. - assert.Equal(t, len(inputData), readLen) - assert.Equal(t, inputData, readData) -} - type CorsoProgressUnitSuite struct { tester.Suite targetFilePath path.Path @@ -2420,9 +2284,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsCorrectSubt encodeElements(inboxFileName1)[0], time.Time{}, // Wrap with a backup reader so it gets the version injected. - newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(inboxFileData1v2)))), + io.NopCloser(bytes.NewReader(inboxFileData1v2))), }), }), virtualfs.NewStaticDirectory( @@ -2582,9 +2444,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsMigrateSubt virtualfs.StreamingFileWithModTimeFromReader( encodeElements(inboxFileName1)[0], time.Time{}, - newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(inboxFileData1)))), + io.NopCloser(bytes.NewReader(inboxFileData1))), }), }), virtualfs.NewStaticDirectory( @@ -2596,9 +2456,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsMigrateSubt virtualfs.StreamingFileWithModTimeFromReader( encodeElements(contactsFileName1)[0], time.Time{}, - newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(contactsFileData1)))), + io.NopCloser(bytes.NewReader(contactsFileData1))), }), }), }) @@ -2817,15 +2675,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_SelectiveSubtreeP virtualfs.StreamingFileWithModTimeFromReader( encodeElements(fileName5)[0], time.Time{}, - newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(fileData5)))), + io.NopCloser(bytes.NewReader(fileData5))), virtualfs.StreamingFileWithModTimeFromReader( encodeElements(fileName6)[0], time.Time{}, - newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(fileData6)))), + io.NopCloser(bytes.NewReader(fileData6))), }) counters[folderID3] = count @@ -2835,15 +2689,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_SelectiveSubtreeP virtualfs.StreamingFileWithModTimeFromReader( encodeElements(fileName3)[0], time.Time{}, - newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(fileData3)))), + io.NopCloser(bytes.NewReader(fileData3))), virtualfs.StreamingFileWithModTimeFromReader( encodeElements(fileName4)[0], time.Time{}, - newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(fileData4)))), + io.NopCloser(bytes.NewReader(fileData4))), folder, }) counters[folderID2] = count @@ -2859,15 +2709,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_SelectiveSubtreeP virtualfs.StreamingFileWithModTimeFromReader( encodeElements(fileName1)[0], time.Time{}, - newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(fileData1)))), + io.NopCloser(bytes.NewReader(fileData1))), virtualfs.StreamingFileWithModTimeFromReader( encodeElements(fileName2)[0], time.Time{}, - newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(fileData2)))), + io.NopCloser(bytes.NewReader(fileData2))), folder, folder4, }) @@ -2879,15 +2725,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_SelectiveSubtreeP virtualfs.StreamingFileWithModTimeFromReader( encodeElements(fileName7)[0], time.Time{}, - newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(fileData7)))), + io.NopCloser(bytes.NewReader(fileData7))), virtualfs.StreamingFileWithModTimeFromReader( encodeElements(fileName8)[0], time.Time{}, - newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(fileData8)))), + io.NopCloser(bytes.NewReader(fileData8))), }) counters[folderID5] = count diff --git a/src/internal/kopia/wrapper.go b/src/internal/kopia/wrapper.go index 24e0708b5..10523de6c 100644 --- a/src/internal/kopia/wrapper.go +++ b/src/internal/kopia/wrapper.go @@ -18,6 +18,7 @@ import ( "golang.org/x/exp/maps" "github.com/alcionai/corso/src/internal/common/prefixmatcher" + "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/diagnostics" "github.com/alcionai/corso/src/internal/observe" @@ -36,8 +37,6 @@ const ( // possibly corresponding to who is making the backup. corsoHost = "corso-host" corsoUser = "corso" - - serializationVersion uint32 = 1 ) // common manifest tags @@ -447,7 +446,7 @@ func loadDirsAndItems( dir: dir, items: dirItems.items, counter: bcounter, - expectedVersion: serializationVersion, + expectedVersion: readers.DefaultSerializationVersion, } if err := mergeCol.addCollection(dirItems.dir.String(), dc); err != nil { diff --git a/src/internal/kopia/wrapper_test.go b/src/internal/kopia/wrapper_test.go index 452bc4ffa..7b4508465 100644 --- a/src/internal/kopia/wrapper_test.go +++ b/src/internal/kopia/wrapper_test.go @@ -184,7 +184,7 @@ func (suite *BasicKopiaIntegrationSuite) TestMaintenance_FirstRun_NoChanges() { ctx, flush := tester.NewContext(t) defer flush() - k, err := openKopiaRepo(t, ctx) + k, err := openLocalKopiaRepo(t, ctx) require.NoError(t, err, clues.ToCore(err)) w := &Wrapper{k} @@ -204,7 +204,7 @@ func (suite *BasicKopiaIntegrationSuite) TestMaintenance_WrongUser_NoForce_Fails ctx, flush := tester.NewContext(t) defer flush() - k, err := openKopiaRepo(t, ctx) + k, err := openLocalKopiaRepo(t, ctx) require.NoError(t, err, clues.ToCore(err)) w := &Wrapper{k} @@ -241,7 +241,7 @@ func (suite *BasicKopiaIntegrationSuite) TestMaintenance_WrongUser_Force_Succeed ctx, flush := tester.NewContext(t) defer flush() - k, err := openKopiaRepo(t, ctx) + k, err := openLocalKopiaRepo(t, ctx) require.NoError(t, err, clues.ToCore(err)) w := &Wrapper{k} @@ -754,7 +754,7 @@ func (suite *KopiaIntegrationSuite) SetupTest() { t := suite.T() suite.ctx, suite.flush = tester.NewContext(t) - c, err := openKopiaRepo(t, suite.ctx) + c, err := openLocalKopiaRepo(t, suite.ctx) require.NoError(t, err, clues.ToCore(err)) suite.w = &Wrapper{c} @@ -1245,7 +1245,7 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() { ctx, flush := tester.NewContext(t) defer flush() - k, err := openKopiaRepo(t, ctx) + k, err := openLocalKopiaRepo(t, ctx) require.NoError(t, err, clues.ToCore(err)) err = k.Compression(ctx, "s2-default") @@ -1268,7 +1268,10 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() { ctx, []identity.Reasoner{r}, nil, - []data.BackupCollection{dc1, dc2}, + []data.BackupCollection{ + dataMock.NewVersionedBackupCollection(t, dc1), + dataMock.NewVersionedBackupCollection(t, dc2), + }, nil, nil, true, @@ -1556,7 +1559,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() { //nolint:forbidigo suite.ctx, _ = logger.CtxOrSeed(context.Background(), ls) - c, err := openKopiaRepo(t, suite.ctx) + c, err := openLocalKopiaRepo(t, suite.ctx) require.NoError(t, err, clues.ToCore(err)) suite.w = &Wrapper{c} @@ -1577,12 +1580,15 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() { }) } - collections = append(collections, collection) + collections = append( + collections, + dataMock.NewVersionedBackupCollection(t, collection)) } r := NewReason(testTenant, testUser, path.ExchangeService, path.EmailCategory) - stats, deets, _, err := suite.w.ConsumeBackupCollections( + // Other tests check basic things about deets so not doing that again here. + stats, _, _, err := suite.w.ConsumeBackupCollections( suite.ctx, []identity.Reasoner{r}, nil, @@ -1597,8 +1603,6 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() { require.Equal(t, stats.TotalDirectoryCount, expectedDirs) require.Equal(t, stats.IgnoredErrorCount, 0) require.False(t, stats.Incomplete) - // 6 file and 2 folder entries. - assert.Len(t, deets.Details().Entries, expectedFiles+2) suite.snapshotID = manifest.ID(stats.SnapshotID) } @@ -1629,7 +1633,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() { excludePrefix bool expectedCachedItems int expectedUncachedItems int - cols func() []data.BackupCollection + cols func(t *testing.T) []data.BackupCollection backupIDCheck require.ValueAssertionFunc restoreCheck assert.ErrorAssertionFunc }{ @@ -1638,7 +1642,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() { excludeItem: true, expectedCachedItems: len(suite.filesByPath) - 1, expectedUncachedItems: 0, - cols: func() []data.BackupCollection { + cols: func(t *testing.T) []data.BackupCollection { return nil }, backupIDCheck: require.NotEmpty, @@ -1650,7 +1654,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() { excludePrefix: true, expectedCachedItems: len(suite.filesByPath) - 1, expectedUncachedItems: 0, - cols: func() []data.BackupCollection { + cols: func(t *testing.T) []data.BackupCollection { return nil }, backupIDCheck: require.NotEmpty, @@ -1661,7 +1665,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() { // No snapshot should be made since there were no changes. expectedCachedItems: 0, expectedUncachedItems: 0, - cols: func() []data.BackupCollection { + cols: func(t *testing.T) []data.BackupCollection { return nil }, // Backup doesn't run. @@ -1671,7 +1675,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() { name: "NoExcludeItemWithChanges", expectedCachedItems: len(suite.filesByPath), expectedUncachedItems: 1, - cols: func() []data.BackupCollection { + cols: func(t *testing.T) []data.BackupCollection { c := exchMock.NewCollection( suite.testPath1, suite.testPath1, @@ -1679,7 +1683,9 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() { c.ColState = data.NotMovedState c.PrevPath = suite.testPath1 - return []data.BackupCollection{c} + return []data.BackupCollection{ + dataMock.NewVersionedBackupCollection(t, c), + } }, backupIDCheck: require.NotEmpty, restoreCheck: assert.NoError, @@ -1717,7 +1723,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() { Manifest: man, Reasons: []identity.Reasoner{r}, }), - test.cols(), + test.cols(t), excluded, nil, true, diff --git a/src/internal/m365/backup.go b/src/internal/m365/backup.go index f916c7257..7b54c36cb 100644 --- a/src/internal/m365/backup.go +++ b/src/internal/m365/backup.go @@ -100,7 +100,7 @@ func (ctrl *Controller) ProduceBackupCollections( } case path.GroupsService: - colls, ssmb, canUsePreviousBackup, err = groups.ProduceBackupCollections( + colls, ssmb, err = groups.ProduceBackupCollections( ctx, bpc, ctrl.AC, @@ -111,6 +111,10 @@ func (ctrl *Controller) ProduceBackupCollections( return nil, nil, false, err } + // canUsePreviousBacukp can be always returned true for groups as we + // return a tombstone collection in case the metadata read fails + canUsePreviousBackup = true + default: return nil, nil, false, clues.Wrap(clues.New(service.String()), "service not supported").WithClues(ctx) } diff --git a/src/internal/m365/backup_test.go b/src/internal/m365/backup_test.go index acaa6036b..f7e51f89d 100644 --- a/src/internal/m365/backup_test.go +++ b/src/internal/m365/backup_test.go @@ -11,6 +11,9 @@ import ( "github.com/stretchr/testify/suite" inMock "github.com/alcionai/corso/src/internal/common/idname/mock" + "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/internal/data" + "github.com/alcionai/corso/src/internal/data/mock" "github.com/alcionai/corso/src/internal/m365/service/exchange" odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts" "github.com/alcionai/corso/src/internal/m365/service/sharepoint" @@ -458,9 +461,8 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Lists() { for item := range collection.Items(ctx, fault.New(true)) { t.Log("File: " + item.ID()) - bs, err := io.ReadAll(item.ToReader()) + _, err := io.ReadAll(item.ToReader()) require.NoError(t, err, clues.ToCore(err)) - t.Log(string(bs)) } } } @@ -575,3 +577,123 @@ func (suite *GroupsCollectionIntgSuite) TestCreateGroupsCollection_SharePoint() assert.NotZero(t, status.Successes) t.Log(status.String()) } + +func (suite *GroupsCollectionIntgSuite) TestCreateGroupsCollection_SharePoint_InvalidMetadata() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + var ( + groupID = tconfig.M365GroupID(t) + ctrl = newController(ctx, t, path.GroupsService) + groupIDs = []string{groupID} + ) + + id, name, err := ctrl.PopulateProtectedResourceIDAndName(ctx, groupID, nil) + require.NoError(t, err, clues.ToCore(err)) + + sel := selectors.NewGroupsBackup(groupIDs) + sel.Include(sel.LibraryFolders([]string{"test"}, selectors.PrefixMatch())) + + sel.SetDiscreteOwnerIDName(id, name) + + site, err := suite.connector.AC.Groups().GetRootSite(ctx, groupID) + require.NoError(t, err, clues.ToCore(err)) + + pth, err := path.Build( + suite.tenantID, + groupID, + path.GroupsService, + path.LibrariesCategory, + true, + odConsts.SitesPathDir, + ptr.Val(site.GetId())) + require.NoError(t, err, clues.ToCore(err)) + + mmc := []data.RestoreCollection{ + mock.Collection{ + Path: pth, + ItemData: []data.Item{ + &mock.Item{ + ItemID: "previouspath", + Reader: io.NopCloser(bytes.NewReader([]byte("invalid"))), + }, + }, + }, + } + + bpc := inject.BackupProducerConfig{ + LastBackupVersion: version.NoBackup, + Options: control.DefaultOptions(), + ProtectedResource: inMock.NewProvider(id, name), + Selector: sel.Selector, + MetadataCollections: mmc, + } + + collections, excludes, canUsePreviousBackup, err := ctrl.ProduceBackupCollections( + ctx, + bpc, + fault.New(true)) + require.NoError(t, err, clues.ToCore(err)) + assert.True(t, canUsePreviousBackup, "can use previous backup") + // No excludes yet as this isn't an incremental backup. + assert.True(t, excludes.Empty()) + + // we don't know an exact count of drives this will produce, + // but it should be more than one. + assert.Greater(t, len(collections), 1) + + p, err := path.BuildMetadata( + suite.tenantID, + groupID, + path.GroupsService, + path.LibrariesCategory, + false) + require.NoError(t, err, clues.ToCore(err)) + + p, err = p.Append(false, odConsts.SitesPathDir) + require.NoError(t, err, clues.ToCore(err)) + + foundSitesMetadata := false + foundRootTombstone := false + + sp, err := path.BuildPrefix( + suite.tenantID, + groupID, + path.GroupsService, + path.LibrariesCategory) + require.NoError(t, err, clues.ToCore(err)) + + sp, err = sp.Append(false, odConsts.SitesPathDir, ptr.Val(site.GetId())) + require.NoError(t, err, clues.ToCore(err)) + + for _, coll := range collections { + if coll.State() == data.DeletedState { + if coll.PreviousPath() != nil && coll.PreviousPath().String() == sp.String() { + foundRootTombstone = true + } + + continue + } + + sitesMetadataCollection := coll.FullPath().String() == p.String() + + for object := range coll.Items(ctx, fault.New(true)) { + if object.ID() == "previouspath" && sitesMetadataCollection { + foundSitesMetadata = true + } + + buf := &bytes.Buffer{} + _, err := buf.ReadFrom(object.ToReader()) + assert.NoError(t, err, "reading item", clues.ToCore(err)) + } + } + + assert.True(t, foundSitesMetadata, "missing sites metadata") + assert.True(t, foundRootTombstone, "missing root tombstone") + + status := ctrl.Wait() + assert.NotZero(t, status.Successes) + t.Log(status.String()) +} diff --git a/src/internal/m365/collection/drive/collection.go b/src/internal/m365/collection/drive/collection.go index 0cdf79c0e..423c43930 100644 --- a/src/internal/m365/collection/drive/collection.go +++ b/src/internal/m365/collection/drive/collection.go @@ -33,11 +33,7 @@ const ( MaxOneNoteFileSize = 2 * 1024 * 1024 * 1024 ) -var ( - _ data.BackupCollection = &Collection{} - _ data.Item = &metadata.Item{} - _ data.ItemModTime = &metadata.Item{} -) +var _ data.BackupCollection = &Collection{} // Collection represents a set of OneDrive objects retrieved from M365 type Collection struct { @@ -588,14 +584,25 @@ func (oc *Collection) streamDriveItem( return progReader, nil }) - oc.data <- &metadata.Item{ - ItemID: metaFileName + metaSuffix, - Data: metaReader, + storeItem, err := data.NewUnindexedPrefetchedItem( + metaReader, + metaFileName+metaSuffix, // Metadata file should always use the latest time as // permissions change does not update mod time. - Mod: time.Now(), + time.Now()) + if err != nil { + errs.AddRecoverable(ctx, clues.Stack(err). + WithClues(ctx). + Label(fault.LabelForceNoBackupCreation)) + + return } + // We wrap the reader with a lazy reader so that the progress bar is only + // initialized if the file is read. Since we're not actually lazily reading + // data just use the eager item implementation. + oc.data <- storeItem + // Item read successfully, add to collection if isFile { atomic.AddInt64(&stats.itemsRead, 1) diff --git a/src/internal/m365/collection/drive/collection_test.go b/src/internal/m365/collection/drive/collection_test.go index b99a2484e..2e2f85160 100644 --- a/src/internal/m365/collection/drive/collection_test.go +++ b/src/internal/m365/collection/drive/collection_test.go @@ -19,6 +19,7 @@ import ( "github.com/stretchr/testify/suite" "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata" metaTD "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata/testdata" @@ -256,7 +257,7 @@ func (suite *CollectionUnitSuite) TestCollection() { mt := readItem.(data.ItemModTime) assert.Equal(t, now, mt.ModTime()) - readData, err := io.ReadAll(readItem.ToReader()) + rr, err := readers.NewVersionedRestoreReader(readItem.ToReader()) test.expectErr(t, err) if err != nil { @@ -267,13 +268,25 @@ func (suite *CollectionUnitSuite) TestCollection() { return } + assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version) + assert.False(t, rr.Format().DelInFlight) + + readData, err := io.ReadAll(rr) + require.NoError(t, err, clues.ToCore(err)) + assert.Equal(t, stubItemContent, readData) readItemMeta := readItems[1] assert.Equal(t, stubItemID+metadata.MetaFileSuffix, readItemMeta.ID()) + rr, err = readers.NewVersionedRestoreReader(readItemMeta.ToReader()) + require.NoError(t, err, clues.ToCore(err)) + + assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version) + assert.False(t, rr.Format().DelInFlight) + readMeta := metadata.Metadata{} - err = json.NewDecoder(readItemMeta.ToReader()).Decode(&readMeta) + err = json.NewDecoder(rr).Decode(&readMeta) require.NoError(t, err, clues.ToCore(err)) metaTD.AssertMetadataEqual(t, stubMeta, readMeta) @@ -485,12 +498,18 @@ func (suite *CollectionUnitSuite) TestCollectionPermissionBackupLatestModTime() for _, i := range readItems { if strings.HasSuffix(i.ID(), metadata.MetaFileSuffix) { - content, err := io.ReadAll(i.ToReader()) + rr, err := readers.NewVersionedRestoreReader(i.ToReader()) + require.NoError(t, err, clues.ToCore(err)) + + assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version) + assert.False(t, rr.Format().DelInFlight) + + content, err := io.ReadAll(rr) require.NoError(t, err, clues.ToCore(err)) require.Equal(t, `{"filename":"Fake Item","permissionMode":1}`, string(content)) im, ok := i.(data.ItemModTime) - require.Equal(t, ok, true, "modtime interface") + require.True(t, ok, "modtime interface") require.Greater(t, im.ModTime(), mtime, "permissions time greater than mod time") } } diff --git a/src/internal/m365/collection/drive/collections.go b/src/internal/m365/collection/drive/collections.go index 40d4d7cd6..7d94156ea 100644 --- a/src/internal/m365/collection/drive/collections.go +++ b/src/internal/m365/collection/drive/collections.go @@ -135,11 +135,6 @@ func deserializeMetadata( continue } - if err == nil { - // Successful decode. - continue - } - // This is conservative, but report an error if either any of the items // for any of the deserialized maps have duplicate drive IDs or there's // some other problem deserializing things. This will cause the entire @@ -147,7 +142,9 @@ func deserializeMetadata( // these cases. We can make the logic for deciding when to continue vs. // when to fail less strict in the future if needed. if err != nil { - return nil, nil, false, clues.Stack(err).WithClues(ictx) + errs.Fail(clues.Stack(err).WithClues(ictx)) + + return map[string]string{}, map[string]map[string]string{}, false, nil } } } diff --git a/src/internal/m365/collection/drive/collections_test.go b/src/internal/m365/collection/drive/collections_test.go index 88a8f9a62..1e25d16c0 100644 --- a/src/internal/m365/collection/drive/collections_test.go +++ b/src/internal/m365/collection/drive/collections_test.go @@ -17,6 +17,7 @@ import ( "github.com/alcionai/corso/src/internal/common/prefixmatcher" pmMock "github.com/alcionai/corso/src/internal/common/prefixmatcher/mock" "github.com/alcionai/corso/src/internal/data" + dataMock "github.com/alcionai/corso/src/internal/data/mock" "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata" "github.com/alcionai/corso/src/internal/m365/graph" odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts" @@ -984,7 +985,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() { { // Bad formats are logged but skip adding entries to the maps and don't // return an error. - name: "BadFormat", + name: "BadFormat", + expectedDeltas: map[string]string{}, + expectedPaths: map[string]map[string]string{}, cols: []func() []graph.MetadataCollectionEntry{ func() []graph.MetadataCollectionEntry { return []graph.MetadataCollectionEntry{ @@ -995,7 +998,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() { }, }, canUsePreviousBackup: false, - errCheck: assert.Error, + errCheck: assert.NoError, }, { // Unexpected files are logged and skipped. They don't cause an error to @@ -1060,10 +1063,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() { } }, }, - expectedDeltas: nil, - expectedPaths: nil, + expectedDeltas: map[string]string{}, + expectedPaths: map[string]map[string]string{}, canUsePreviousBackup: false, - errCheck: assert.Error, + errCheck: assert.NoError, }, { name: "DriveAlreadyFound_Deltas", @@ -1090,10 +1093,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() { } }, }, - expectedDeltas: nil, - expectedPaths: nil, + expectedDeltas: map[string]string{}, + expectedPaths: map[string]map[string]string{}, canUsePreviousBackup: false, - errCheck: assert.Error, + errCheck: assert.NoError, }, } @@ -1121,7 +1124,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() { func(*support.ControllerOperationStatus) {}) require.NoError(t, err, clues.ToCore(err)) - cols = append(cols, data.NoFetchRestoreCollection{Collection: mc}) + cols = append(cols, dataMock.NewUnversionedRestoreCollection( + t, + data.NoFetchRestoreCollection{Collection: mc})) } deltas, paths, canUsePreviousBackup, err := deserializeMetadata(ctx, cols) @@ -2294,7 +2299,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { func(*support.ControllerOperationStatus) {}) assert.NoError(t, err, "creating metadata collection", clues.ToCore(err)) - prevMetadata := []data.RestoreCollection{data.NoFetchRestoreCollection{Collection: mc}} + prevMetadata := []data.RestoreCollection{ + dataMock.NewUnversionedRestoreCollection(t, data.NoFetchRestoreCollection{Collection: mc}), + } errs := fault.New(true) delList := prefixmatcher.NewStringSetBuilder() @@ -2321,7 +2328,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { deltas, paths, _, err := deserializeMetadata( ctx, []data.RestoreCollection{ - data.NoFetchRestoreCollection{Collection: baseCol}, + dataMock.NewUnversionedRestoreCollection( + t, + data.NoFetchRestoreCollection{Collection: baseCol}), }) if !assert.NoError(t, err, "deserializing metadata", clues.ToCore(err)) { continue diff --git a/src/internal/m365/collection/drive/metadata/metadata.go b/src/internal/m365/collection/drive/metadata/metadata.go index 06a31d432..7e91a2e5b 100644 --- a/src/internal/m365/collection/drive/metadata/metadata.go +++ b/src/internal/m365/collection/drive/metadata/metadata.go @@ -1,7 +1,6 @@ package metadata import ( - "io" "time" ) @@ -41,17 +40,3 @@ type Metadata struct { Permissions []Permission `json:"permissions,omitempty"` LinkShares []LinkShare `json:"linkShares,omitempty"` } - -type Item struct { - ItemID string - Data io.ReadCloser - Mod time.Time -} - -// Deleted implements an interface function. However, OneDrive items are marked -// as deleted by adding them to the exclude list so this can always return -// false. -func (i *Item) Deleted() bool { return false } -func (i *Item) ID() string { return i.ItemID } -func (i *Item) ToReader() io.ReadCloser { return i.Data } -func (i *Item) ModTime() time.Time { return i.Mod } diff --git a/src/internal/m365/collection/exchange/backup_test.go b/src/internal/m365/collection/exchange/backup_test.go index 4b046fd47..bb6aad27c 100644 --- a/src/internal/m365/collection/exchange/backup_test.go +++ b/src/internal/m365/collection/exchange/backup_test.go @@ -15,7 +15,9 @@ import ( inMock "github.com/alcionai/corso/src/internal/common/idname/mock" "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/internal/data" + dataMock "github.com/alcionai/corso/src/internal/data/mock" "github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/m365/support" "github.com/alcionai/corso/src/internal/operations/inject" @@ -322,7 +324,7 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() { require.NoError(t, err, clues.ToCore(err)) cdps, canUsePreviousBackup, err := ParseMetadataCollections(ctx, []data.RestoreCollection{ - data.NoFetchRestoreCollection{Collection: coll}, + dataMock.NewUnversionedRestoreCollection(t, data.NoFetchRestoreCollection{Collection: coll}), }) test.expectError(t, err, clues.ToCore(err)) @@ -591,7 +593,7 @@ func (suite *BackupIntgSuite) TestDelta() { require.NotNil(t, metadata, "collections contains a metadata collection") cdps, canUsePreviousBackup, err := ParseMetadataCollections(ctx, []data.RestoreCollection{ - data.NoFetchRestoreCollection{Collection: metadata}, + dataMock.NewUnversionedRestoreCollection(t, data.NoFetchRestoreCollection{Collection: metadata}), }) require.NoError(t, err, clues.ToCore(err)) assert.True(t, canUsePreviousBackup, "can use previous backup") @@ -666,7 +668,12 @@ func (suite *BackupIntgSuite) TestMailSerializationRegression() { for stream := range streamChannel { buf := &bytes.Buffer{} - read, err := buf.ReadFrom(stream.ToReader()) + rr, err := readers.NewVersionedRestoreReader(stream.ToReader()) + require.NoError(t, err, clues.ToCore(err)) + + assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version) + + read, err := buf.ReadFrom(rr) assert.NoError(t, err, clues.ToCore(err)) assert.NotZero(t, read) @@ -744,7 +751,13 @@ func (suite *BackupIntgSuite) TestContactSerializationRegression() { for stream := range edc.Items(ctx, fault.New(true)) { buf := &bytes.Buffer{} - read, err := buf.ReadFrom(stream.ToReader()) + + rr, err := readers.NewVersionedRestoreReader(stream.ToReader()) + require.NoError(t, err, clues.ToCore(err)) + + assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version) + + read, err := buf.ReadFrom(rr) assert.NoError(t, err, clues.ToCore(err)) assert.NotZero(t, read) @@ -878,7 +891,12 @@ func (suite *BackupIntgSuite) TestEventsSerializationRegression() { for item := range edc.Items(ctx, fault.New(true)) { buf := &bytes.Buffer{} - read, err := buf.ReadFrom(item.ToReader()) + rr, err := readers.NewVersionedRestoreReader(item.ToReader()) + require.NoError(t, err, clues.ToCore(err)) + + assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version) + + read, err := buf.ReadFrom(rr) assert.NoError(t, err, clues.ToCore(err)) assert.NotZero(t, read) @@ -1198,7 +1216,9 @@ func checkMetadata( ) { catPaths, _, err := ParseMetadataCollections( ctx, - []data.RestoreCollection{data.NoFetchRestoreCollection{Collection: c}}) + []data.RestoreCollection{ + dataMock.NewUnversionedRestoreCollection(t, data.NoFetchRestoreCollection{Collection: c}), + }) if !assert.NoError(t, err, "getting metadata", clues.ToCore(err)) { return } diff --git a/src/internal/m365/collection/exchange/collection.go b/src/internal/m365/collection/exchange/collection.go index 30f28672d..71b9bb01b 100644 --- a/src/internal/m365/collection/exchange/collection.go +++ b/src/internal/m365/collection/exchange/collection.go @@ -278,10 +278,21 @@ func (col *prefetchCollection) streamItems( return } - stream <- data.NewPrefetchedItem( + item, err := data.NewPrefetchedItem( io.NopCloser(bytes.NewReader(itemData)), id, details.ItemInfo{Exchange: info}) + if err != nil { + el.AddRecoverable( + ctx, + clues.Stack(err). + WithClues(ctx). + Label(fault.LabelForceNoBackupCreation)) + + return + } + + stream <- item atomic.AddInt64(&success, 1) atomic.AddInt64(&totalBytes, info.Size) diff --git a/src/internal/m365/collection/exchange/collection_test.go b/src/internal/m365/collection/exchange/collection_test.go index 5e1665faa..f373bd1a5 100644 --- a/src/internal/m365/collection/exchange/collection_test.go +++ b/src/internal/m365/collection/exchange/collection_test.go @@ -17,6 +17,7 @@ import ( "golang.org/x/exp/slices" "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/m365/collection/exchange/mock" "github.com/alcionai/corso/src/internal/m365/graph" @@ -55,13 +56,20 @@ func (suite *CollectionUnitSuite) TestPrefetchedItem_Reader() { suite.Run(test.name, func() { t := suite.T() - ed := data.NewPrefetchedItem( + ed, err := data.NewPrefetchedItem( io.NopCloser(bytes.NewReader(test.readData)), "itemID", details.ItemInfo{}) + require.NoError(t, err, clues.ToCore(err)) + + r, err := readers.NewVersionedRestoreReader(ed.ToReader()) + require.NoError(t, err, clues.ToCore(err)) + + assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version) + assert.False(t, r.Format().DelInFlight) buf := &bytes.Buffer{} - _, err := buf.ReadFrom(ed.ToReader()) + _, err = buf.ReadFrom(r) assert.NoError(t, err, "reading data: %v", clues.ToCore(err)) assert.Equal(t, test.readData, buf.Bytes(), "read data") assert.Equal(t, "itemID", ed.ID(), "item ID") @@ -493,11 +501,11 @@ func (suite *CollectionUnitSuite) TestLazyItem_NoRead_GetInfo_Errors() { time.Now(), fault.New(true)) - _, err := li.(data.ItemInfo).Info() + _, err := li.Info() assert.Error(suite.T(), err, "Info without reading data should error") } -func (suite *CollectionUnitSuite) TestLazyItem() { +func (suite *CollectionUnitSuite) TestLazyItem_GetDataErrors() { var ( parentPath = "inbox/private/silly cats" now = time.Now() @@ -505,44 +513,19 @@ func (suite *CollectionUnitSuite) TestLazyItem() { table := []struct { name string - modTime time.Time getErr error serializeErr error - expectModTime time.Time expectReadErrType error - dataCheck assert.ValueAssertionFunc - expectInfoErr bool - expectInfoErrType error }{ - { - name: "ReturnsEmptyReaderOnDeletedInFlight", - modTime: now, - getErr: graph.ErrDeletedInFlight, - dataCheck: assert.Empty, - expectInfoErr: true, - expectInfoErrType: data.ErrNotFound, - }, - { - name: "ReturnsValidReaderAndInfo", - modTime: now, - dataCheck: assert.NotEmpty, - expectModTime: now, - }, { name: "ReturnsErrorOnGenericGetError", - modTime: now, getErr: assert.AnError, expectReadErrType: assert.AnError, - dataCheck: assert.Empty, - expectInfoErr: true, }, { name: "ReturnsErrorOnGenericSerializeError", - modTime: now, serializeErr: assert.AnError, expectReadErrType: assert.AnError, - dataCheck: assert.Empty, - expectInfoErr: true, }, } @@ -575,47 +558,128 @@ func (suite *CollectionUnitSuite) TestLazyItem() { userID: "userID", itemID: "itemID", getter: getter, - modTime: test.modTime, + modTime: now, immutableIDs: false, parentPath: parentPath, }, "itemID", - test.modTime, + now, fault.New(true)) assert.False(t, li.Deleted(), "item shouldn't be marked deleted") - assert.Equal( - t, - test.modTime, - li.(data.ItemModTime).ModTime(), - "item mod time") + assert.Equal(t, now, li.ModTime(), "item mod time") - readData, err := io.ReadAll(li.ToReader()) - if test.expectReadErrType == nil { - assert.NoError(t, err, "reading item data: %v", clues.ToCore(err)) - } else { - assert.ErrorIs(t, err, test.expectReadErrType, "read error") - } - - test.dataCheck(t, readData, "read item data") - - info, err := li.(data.ItemInfo).Info() - - // Didn't expect an error getting info, it should be valid. - if !test.expectInfoErr { - assert.NoError(t, err, "getting item info: %v", clues.ToCore(err)) - assert.Equal(t, parentPath, info.Exchange.ParentPath) - assert.Equal(t, test.expectModTime, info.Modified()) - - return - } + _, err := readers.NewVersionedRestoreReader(li.ToReader()) + assert.ErrorIs(t, err, test.expectReadErrType) // Should get some form of error when trying to get info. + _, err = li.Info() assert.Error(t, err, "Info()") - - if test.expectInfoErrType != nil { - assert.ErrorIs(t, err, test.expectInfoErrType, "Info() error") - } }) } } + +func (suite *CollectionUnitSuite) TestLazyItem_ReturnsEmptyReaderOnDeletedInFlight() { + var ( + t = suite.T() + + parentPath = "inbox/private/silly cats" + now = time.Now() + ) + + ctx, flush := tester.NewContext(t) + defer flush() + + getter := &mock.ItemGetSerialize{GetErr: graph.ErrDeletedInFlight} + + li := data.NewLazyItem( + ctx, + &lazyItemGetter{ + userID: "userID", + itemID: "itemID", + getter: getter, + modTime: now, + immutableIDs: false, + parentPath: parentPath, + }, + "itemID", + now, + fault.New(true)) + + assert.False(t, li.Deleted(), "item shouldn't be marked deleted") + assert.Equal( + t, + now, + li.ModTime(), + "item mod time") + + r, err := readers.NewVersionedRestoreReader(li.ToReader()) + require.NoError(t, err, clues.ToCore(err)) + + assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version) + assert.True(t, r.Format().DelInFlight) + + readData, err := io.ReadAll(r) + assert.NoError(t, err, "reading item data: %v", clues.ToCore(err)) + + assert.Empty(t, readData, "read item data") + + _, err = li.Info() + assert.ErrorIs(t, err, data.ErrNotFound, "Info() error") +} + +func (suite *CollectionUnitSuite) TestLazyItem() { + var ( + t = suite.T() + + parentPath = "inbox/private/silly cats" + now = time.Now() + ) + + ctx, flush := tester.NewContext(t) + defer flush() + + // Exact data type doesn't really matter. + testData := models.NewMessage() + testData.SetSubject(ptr.To("hello world")) + + getter := &mock.ItemGetSerialize{GetData: testData} + + li := data.NewLazyItem( + ctx, + &lazyItemGetter{ + userID: "userID", + itemID: "itemID", + getter: getter, + modTime: now, + immutableIDs: false, + parentPath: parentPath, + }, + "itemID", + now, + fault.New(true)) + + assert.False(t, li.Deleted(), "item shouldn't be marked deleted") + assert.Equal( + t, + now, + li.ModTime(), + "item mod time") + + r, err := readers.NewVersionedRestoreReader(li.ToReader()) + require.NoError(t, err, clues.ToCore(err)) + + assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version) + assert.False(t, r.Format().DelInFlight) + + readData, err := io.ReadAll(r) + assert.NoError(t, err, "reading item data: %v", clues.ToCore(err)) + + assert.NotEmpty(t, readData, "read item data") + + info, err := li.Info() + assert.NoError(t, err, "getting item info: %v", clues.ToCore(err)) + + assert.Equal(t, parentPath, info.Exchange.ParentPath) + assert.Equal(t, now, info.Modified()) +} diff --git a/src/internal/m365/collection/groups/channel_handler.go b/src/internal/m365/collection/groups/channel_handler.go index 80c36cbef..db50446ca 100644 --- a/src/internal/m365/collection/groups/channel_handler.go +++ b/src/internal/m365/collection/groups/channel_handler.go @@ -67,6 +67,15 @@ func (bh channelsBackupHandler) canonicalPath( false) } +func (bh channelsBackupHandler) PathPrefix(tenantID string) (path.Path, error) { + return path.Build( + tenantID, + bh.protectedResource, + path.GroupsService, + path.ChannelMessagesCategory, + false) +} + func (bh channelsBackupHandler) GetChannelMessage( ctx context.Context, teamID, channelID, itemID string, diff --git a/src/internal/m365/collection/groups/collection.go b/src/internal/m365/collection/groups/collection.go index b8ff3b436..0a1ca7212 100644 --- a/src/internal/m365/collection/groups/collection.go +++ b/src/internal/m365/collection/groups/collection.go @@ -150,27 +150,47 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) { parentFolderID, id) if err != nil { - el.AddRecoverable(ctx, clues.Wrap(err, "writing channel message to serializer")) + el.AddRecoverable( + ctx, + clues.Wrap(err, "writing channel message to serializer").Label(fault.LabelForceNoBackupCreation)) + return } if err := writer.WriteObjectValue("", item); err != nil { - el.AddRecoverable(ctx, clues.Wrap(err, "writing channel message to serializer")) + el.AddRecoverable( + ctx, + clues.Wrap(err, "writing channel message to serializer").Label(fault.LabelForceNoBackupCreation)) + return } itemData, err := writer.GetSerializedContent() if err != nil { - el.AddRecoverable(ctx, clues.Wrap(err, "serializing channel message")) + el.AddRecoverable( + ctx, + clues.Wrap(err, "serializing channel message").Label(fault.LabelForceNoBackupCreation)) + return } info.ParentPath = col.LocationPath().String() - col.stream <- data.NewPrefetchedItem( + storeItem, err := data.NewPrefetchedItem( io.NopCloser(bytes.NewReader(itemData)), id, details.ItemInfo{Groups: info}) + if err != nil { + el.AddRecoverable( + ctx, + clues.Stack(err). + WithClues(ctx). + Label(fault.LabelForceNoBackupCreation)) + + return + } + + col.stream <- storeItem atomic.AddInt64(&streamedItems, 1) atomic.AddInt64(&totalBytes, info.Size) diff --git a/src/internal/m365/collection/groups/collection_test.go b/src/internal/m365/collection/groups/collection_test.go index be4c52dc7..e0bf19d19 100644 --- a/src/internal/m365/collection/groups/collection_test.go +++ b/src/internal/m365/collection/groups/collection_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/m365/collection/groups/mock" "github.com/alcionai/corso/src/internal/m365/support" @@ -48,13 +49,20 @@ func (suite *CollectionUnitSuite) TestPrefetchedItem_Reader() { suite.Run(test.name, func() { t := suite.T() - ed := data.NewPrefetchedItem( + ed, err := data.NewPrefetchedItem( io.NopCloser(bytes.NewReader(test.readData)), "itemID", details.ItemInfo{}) + require.NoError(t, err, clues.ToCore(err)) + + r, err := readers.NewVersionedRestoreReader(ed.ToReader()) + require.NoError(t, err, clues.ToCore(err)) + + assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version) + assert.False(t, r.Format().DelInFlight) buf := &bytes.Buffer{} - _, err := buf.ReadFrom(ed.ToReader()) + _, err = buf.ReadFrom(r) assert.NoError(t, err, "reading data: %v", clues.ToCore(err)) assert.Equal(t, test.readData, buf.Bytes(), "read data") assert.Equal(t, "itemID", ed.ID(), "item ID") diff --git a/src/internal/m365/collection/site/collection.go b/src/internal/m365/collection/site/collection.go index 95d77acb2..8af643d4b 100644 --- a/src/internal/m365/collection/site/collection.go +++ b/src/internal/m365/collection/site/collection.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "io" - "time" "github.com/alcionai/clues" "github.com/microsoft/kiota-abstractions-go/serialization" @@ -40,12 +39,7 @@ const ( Pages DataCategory = 2 ) -var ( - _ data.BackupCollection = &Collection{} - _ data.Item = &Item{} - _ data.ItemInfo = &Item{} - _ data.ItemModTime = &Item{} -) +var _ data.BackupCollection = &Collection{} // Collection is the SharePoint.List implementation of data.Collection. SharePoint.Libraries collections are supported // by the oneDrive.Collection as the calls are identical for populating the Collection @@ -120,43 +114,6 @@ func (sc *Collection) Items( return sc.data } -type Item struct { - id string - data io.ReadCloser - info *details.SharePointInfo - modTime time.Time - - // true if the item was marked by graph as deleted. - deleted bool -} - -func NewItem(name string, d io.ReadCloser) *Item { - return &Item{ - id: name, - data: d, - } -} - -func (sd *Item) ID() string { - return sd.id -} - -func (sd *Item) ToReader() io.ReadCloser { - return sd.data -} - -func (sd Item) Deleted() bool { - return sd.deleted -} - -func (sd *Item) Info() (details.ItemInfo, error) { - return details.ItemInfo{SharePoint: sd.info}, nil -} - -func (sd *Item) ModTime() time.Time { - return sd.modTime -} - func (sc *Collection) finishPopulation( ctx context.Context, metrics support.CollectionMetrics, @@ -251,21 +208,20 @@ func (sc *Collection) retrieveLists( size := int64(len(byteArray)) if size > 0 { - t := time.Now() - if t1 := lst.GetLastModifiedDateTime(); t1 != nil { - t = *t1 - } - metrics.Bytes += size metrics.Successes++ - sc.data <- &Item{ - id: ptr.Val(lst.GetId()), - data: io.NopCloser(bytes.NewReader(byteArray)), - info: ListToSPInfo(lst, size), - modTime: t, + + item, err := data.NewPrefetchedItem( + io.NopCloser(bytes.NewReader(byteArray)), + ptr.Val(lst.GetId()), + details.ItemInfo{SharePoint: ListToSPInfo(lst, size)}) + if err != nil { + el.AddRecoverable(ctx, clues.Stack(err).WithClues(ctx).Label(fault.LabelForceNoBackupCreation)) + continue } + sc.data <- item progress <- struct{}{} } } @@ -322,13 +278,17 @@ func (sc *Collection) retrievePages( if size > 0 { metrics.Bytes += size metrics.Successes++ - sc.data <- &Item{ - id: ptr.Val(pg.GetId()), - data: io.NopCloser(bytes.NewReader(byteArray)), - info: pageToSPInfo(pg, root, size), - modTime: ptr.OrNow(pg.GetLastModifiedDateTime()), + + item, err := data.NewPrefetchedItem( + io.NopCloser(bytes.NewReader(byteArray)), + ptr.Val(pg.GetId()), + details.ItemInfo{SharePoint: pageToSPInfo(pg, root, size)}) + if err != nil { + el.AddRecoverable(ctx, clues.Stack(err).WithClues(ctx).Label(fault.LabelForceNoBackupCreation)) + continue } + sc.data <- item progress <- struct{}{} } } diff --git a/src/internal/m365/collection/site/collection_test.go b/src/internal/m365/collection/site/collection_test.go index 0be5c2dc8..5b53513f0 100644 --- a/src/internal/m365/collection/site/collection_test.go +++ b/src/internal/m365/collection/site/collection_test.go @@ -19,6 +19,7 @@ import ( "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control/testdata" "github.com/alcionai/corso/src/pkg/fault" @@ -58,21 +59,6 @@ func TestSharePointCollectionSuite(t *testing.T) { }) } -func (suite *SharePointCollectionSuite) TestCollection_Item_Read() { - t := suite.T() - m := []byte("test message") - name := "aFile" - sc := &Item{ - id: name, - data: io.NopCloser(bytes.NewReader(m)), - } - readData, err := io.ReadAll(sc.ToReader()) - require.NoError(t, err, clues.ToCore(err)) - - assert.Equal(t, name, sc.id) - assert.Equal(t, readData, m) -} - // TestListCollection tests basic functionality to create // SharePoint collection and to use the data stream channel. func (suite *SharePointCollectionSuite) TestCollection_Items() { @@ -88,7 +74,7 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() { name, itemName string scope selectors.SharePointScope getDir func(t *testing.T) path.Path - getItem func(t *testing.T, itemName string) *Item + getItem func(t *testing.T, itemName string) data.Item }{ { name: "List", @@ -106,7 +92,7 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() { return dir }, - getItem: func(t *testing.T, name string) *Item { + getItem: func(t *testing.T, name string) data.Item { ow := kioser.NewJsonSerializationWriter() listing := spMock.ListDefault(name) listing.SetDisplayName(&name) @@ -117,11 +103,11 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() { byteArray, err := ow.GetSerializedContent() require.NoError(t, err, clues.ToCore(err)) - data := &Item{ - id: name, - data: io.NopCloser(bytes.NewReader(byteArray)), - info: ListToSPInfo(listing, int64(len(byteArray))), - } + data, err := data.NewPrefetchedItem( + io.NopCloser(bytes.NewReader(byteArray)), + name, + details.ItemInfo{SharePoint: ListToSPInfo(listing, int64(len(byteArray)))}) + require.NoError(t, err, clues.ToCore(err)) return data }, @@ -142,16 +128,16 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() { return dir }, - getItem: func(t *testing.T, itemName string) *Item { + getItem: func(t *testing.T, itemName string) data.Item { byteArray := spMock.Page(itemName) page, err := betaAPI.CreatePageFromBytes(byteArray) require.NoError(t, err, clues.ToCore(err)) - data := &Item{ - id: itemName, - data: io.NopCloser(bytes.NewReader(byteArray)), - info: betaAPI.PageInfo(page, int64(len(byteArray))), - } + data, err := data.NewPrefetchedItem( + io.NopCloser(bytes.NewReader(byteArray)), + itemName, + details.ItemInfo{SharePoint: betaAPI.PageInfo(page, int64(len(byteArray)))}) + require.NoError(t, err, clues.ToCore(err)) return data }, @@ -210,11 +196,11 @@ func (suite *SharePointCollectionSuite) TestListCollection_Restore() { byteArray, err := service.Serialize(listing) require.NoError(t, err, clues.ToCore(err)) - listData := &Item{ - id: testName, - data: io.NopCloser(bytes.NewReader(byteArray)), - info: ListToSPInfo(listing, int64(len(byteArray))), - } + listData, err := data.NewPrefetchedItem( + io.NopCloser(bytes.NewReader(byteArray)), + testName, + details.ItemInfo{SharePoint: ListToSPInfo(listing, int64(len(byteArray)))}) + require.NoError(t, err, clues.ToCore(err)) destName := testdata.DefaultRestoreConfig("").Location diff --git a/src/internal/m365/controller.go b/src/internal/m365/controller.go index 0bd15ee17..3e0b3af93 100644 --- a/src/internal/m365/controller.go +++ b/src/internal/m365/controller.go @@ -79,20 +79,29 @@ func NewController( return nil, clues.Wrap(err, "creating api client").WithClues(ctx) } - rc := resource.UnknownResource + var rCli *resourceClient - switch pst { - case path.ExchangeService, path.OneDriveService: - rc = resource.Users - case path.GroupsService: - rc = resource.Groups - case path.SharePointService: - rc = resource.Sites - } + // no failure for unknown service. + // In that case we create a controller that doesn't attempt to look up any resource + // data. This case helps avoid unnecessary service calls when the end user is running + // repo init and connect commands via the CLI. All other callers should be expected + // to pass in a known service, or else expect downstream failures. + if pst != path.UnknownService { + rc := resource.UnknownResource - rCli, err := getResourceClient(rc, ac) - if err != nil { - return nil, clues.Wrap(err, "creating resource client").WithClues(ctx) + switch pst { + case path.ExchangeService, path.OneDriveService: + rc = resource.Users + case path.GroupsService: + rc = resource.Groups + case path.SharePointService: + rc = resource.Sites + } + + rCli, err = getResourceClient(rc, ac) + if err != nil { + return nil, clues.Wrap(err, "creating resource client").WithClues(ctx) + } } ctrl := Controller{ @@ -110,6 +119,10 @@ func NewController( return &ctrl, nil } +func (ctrl *Controller) VerifyAccess(ctx context.Context) error { + return ctrl.AC.Access().GetToken(ctx) +} + // --------------------------------------------------------------------------- // Processing Status // --------------------------------------------------------------------------- @@ -195,7 +208,7 @@ func getResourceClient(rc resource.Category, ac api.Client) (*resourceClient, er case resource.Groups: return &resourceClient{enum: rc, getter: ac.Groups()}, nil default: - return nil, clues.New("unrecognized owner resource enum").With("resource_enum", rc) + return nil, clues.New("unrecognized owner resource type").With("resource_enum", rc) } } diff --git a/src/internal/m365/controller_test.go b/src/internal/m365/controller_test.go index 3f535880a..d95a56c9f 100644 --- a/src/internal/m365/controller_test.go +++ b/src/internal/m365/controller_test.go @@ -861,7 +861,7 @@ func (suite *ControllerIntegrationSuite) TestRestoreAndBackup_core() { }, }, { - name: "MultipleContactsSingleFolder", + name: "MultipleContactsInRestoreFolder", service: path.ExchangeService, collections: []stub.ColInfo{ { @@ -887,49 +887,77 @@ func (suite *ControllerIntegrationSuite) TestRestoreAndBackup_core() { }, }, }, - { - name: "MultipleContactsMultipleFolders", - service: path.ExchangeService, - collections: []stub.ColInfo{ - { - PathElements: []string{"Work"}, - Category: path.ContactsCategory, - Items: []stub.ItemInfo{ - { - Name: "someencodeditemID", - Data: exchMock.ContactBytes("Ghimley"), - LookupKey: "Ghimley", - }, - { - Name: "someencodeditemID2", - Data: exchMock.ContactBytes("Irgot"), - LookupKey: "Irgot", - }, - { - Name: "someencodeditemID3", - Data: exchMock.ContactBytes("Jannes"), - LookupKey: "Jannes", - }, - }, - }, - { - PathElements: []string{"Personal"}, - Category: path.ContactsCategory, - Items: []stub.ItemInfo{ - { - Name: "someencodeditemID4", - Data: exchMock.ContactBytes("Argon"), - LookupKey: "Argon", - }, - { - Name: "someencodeditemID5", - Data: exchMock.ContactBytes("Bernard"), - LookupKey: "Bernard", - }, - }, - }, - }, - }, + // TODO(ashmrtn): Re-enable when we can restore contacts to nested folders. + //{ + // name: "MultipleContactsSingleFolder", + // service: path.ExchangeService, + // collections: []stub.ColInfo{ + // { + // PathElements: []string{"Contacts"}, + // Category: path.ContactsCategory, + // Items: []stub.ItemInfo{ + // { + // Name: "someencodeditemID", + // Data: exchMock.ContactBytes("Ghimley"), + // LookupKey: "Ghimley", + // }, + // { + // Name: "someencodeditemID2", + // Data: exchMock.ContactBytes("Irgot"), + // LookupKey: "Irgot", + // }, + // { + // Name: "someencodeditemID3", + // Data: exchMock.ContactBytes("Jannes"), + // LookupKey: "Jannes", + // }, + // }, + // }, + // }, + //}, + //{ + // name: "MultipleContactsMultipleFolders", + // service: path.ExchangeService, + // collections: []stub.ColInfo{ + // { + // PathElements: []string{"Work"}, + // Category: path.ContactsCategory, + // Items: []stub.ItemInfo{ + // { + // Name: "someencodeditemID", + // Data: exchMock.ContactBytes("Ghimley"), + // LookupKey: "Ghimley", + // }, + // { + // Name: "someencodeditemID2", + // Data: exchMock.ContactBytes("Irgot"), + // LookupKey: "Irgot", + // }, + // { + // Name: "someencodeditemID3", + // Data: exchMock.ContactBytes("Jannes"), + // LookupKey: "Jannes", + // }, + // }, + // }, + // { + // PathElements: []string{"Personal"}, + // Category: path.ContactsCategory, + // Items: []stub.ItemInfo{ + // { + // Name: "someencodeditemID4", + // Data: exchMock.ContactBytes("Argon"), + // LookupKey: "Argon", + // }, + // { + // Name: "someencodeditemID5", + // Data: exchMock.ContactBytes("Bernard"), + // LookupKey: "Bernard", + // }, + // }, + // }, + // }, + //}, // { // name: "MultipleEventsSingleCalendar", // service: path.ExchangeService, @@ -1017,34 +1045,35 @@ func (suite *ControllerIntegrationSuite) TestRestoreAndBackup_core() { func (suite *ControllerIntegrationSuite) TestMultiFolderBackupDifferentNames() { table := []restoreBackupInfo{ - { - name: "Contacts", - service: path.ExchangeService, - collections: []stub.ColInfo{ - { - PathElements: []string{"Work"}, - Category: path.ContactsCategory, - Items: []stub.ItemInfo{ - { - Name: "someencodeditemID", - Data: exchMock.ContactBytes("Ghimley"), - LookupKey: "Ghimley", - }, - }, - }, - { - PathElements: []string{"Personal"}, - Category: path.ContactsCategory, - Items: []stub.ItemInfo{ - { - Name: "someencodeditemID2", - Data: exchMock.ContactBytes("Irgot"), - LookupKey: "Irgot", - }, - }, - }, - }, - }, + // TODO(ashmrtn): Re-enable when we can restore contacts to nested folders. + //{ + // name: "Contacts", + // service: path.ExchangeService, + // collections: []stub.ColInfo{ + // { + // PathElements: []string{"Work"}, + // Category: path.ContactsCategory, + // Items: []stub.ItemInfo{ + // { + // Name: "someencodeditemID", + // Data: exchMock.ContactBytes("Ghimley"), + // LookupKey: "Ghimley", + // }, + // }, + // }, + // { + // PathElements: []string{"Personal"}, + // Category: path.ContactsCategory, + // Items: []stub.ItemInfo{ + // { + // Name: "someencodeditemID2", + // Data: exchMock.ContactBytes("Irgot"), + // LookupKey: "Irgot", + // }, + // }, + // }, + // }, + //}, // { // name: "Events", // service: path.ExchangeService, diff --git a/src/internal/m365/graph/errors.go b/src/internal/m365/graph/errors.go index f5c7824ab..6a758977e 100644 --- a/src/internal/m365/graph/errors.go +++ b/src/internal/m365/graph/errors.go @@ -70,6 +70,7 @@ const ( NoSPLicense errorMessage = "Tenant does not have a SPO license" parameterDeltaTokenNotSupported errorMessage = "Parameter 'DeltaToken' not supported for this request" usersCannotBeResolved errorMessage = "One or more users could not be resolved" + requestedSiteCouldNotBeFound errorMessage = "Requested site could not be found" ) const ( @@ -259,6 +260,10 @@ func IsErrUsersCannotBeResolved(err error) bool { return hasErrorCode(err, noResolvedUsers) || hasErrorMessage(err, usersCannotBeResolved) } +func IsErrSiteNotFound(err error) bool { + return hasErrorMessage(err, requestedSiteCouldNotBeFound) +} + // --------------------------------------------------------------------------- // error parsers // --------------------------------------------------------------------------- diff --git a/src/internal/m365/graph/errors_test.go b/src/internal/m365/graph/errors_test.go index cf9f2f99d..cd0057fda 100644 --- a/src/internal/m365/graph/errors_test.go +++ b/src/internal/m365/graph/errors_test.go @@ -628,6 +628,51 @@ func (suite *GraphErrorsUnitSuite) TestIsErrUsersCannotBeResolved() { } } +func (suite *GraphErrorsUnitSuite) TestIsErrSiteCouldNotBeFound() { + table := []struct { + name string + err error + expect assert.BoolAssertionFunc + }{ + { + name: "nil", + err: nil, + expect: assert.False, + }, + { + name: "non-matching", + err: assert.AnError, + expect: assert.False, + }, + { + name: "non-matching oDataErr", + err: odErrMsg("InvalidRequest", "cant resolve sites"), + expect: assert.False, + }, + { + name: "matching oDataErr msg", + err: odErrMsg("InvalidRequest", string(requestedSiteCouldNotBeFound)), + expect: assert.True, + }, + // next two tests are to make sure the checks are case insensitive + { + name: "oDataErr uppercase", + err: odErrMsg("InvalidRequest", strings.ToUpper(string(requestedSiteCouldNotBeFound))), + expect: assert.True, + }, + { + name: "oDataErr lowercase", + err: odErrMsg("InvalidRequest", strings.ToLower(string(requestedSiteCouldNotBeFound))), + expect: assert.True, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + test.expect(suite.T(), IsErrSiteNotFound(test.err)) + }) + } +} + func (suite *GraphErrorsUnitSuite) TestIsErrCannotOpenFileAttachment() { table := []struct { name string diff --git a/src/internal/m365/graph/metadata_collection.go b/src/internal/m365/graph/metadata_collection.go index 7b382fe16..1c3d1f766 100644 --- a/src/internal/m365/graph/metadata_collection.go +++ b/src/internal/m365/graph/metadata_collection.go @@ -5,6 +5,7 @@ import ( "context" "encoding/json" "io" + "time" "github.com/alcionai/clues" @@ -16,7 +17,7 @@ import ( var ( _ data.BackupCollection = &MetadataCollection{} - _ data.Item = &MetadataItem{} + _ data.Item = &metadataItem{} ) // MetadataCollection in a simple collection that assumes all items to be @@ -24,7 +25,7 @@ var ( // created. This collection has no logic for lazily fetching item data. type MetadataCollection struct { fullPath path.Path - items []MetadataItem + items []metadataItem statusUpdater support.StatusUpdater } @@ -40,23 +41,34 @@ func NewMetadataEntry(fileName string, mData any) MetadataCollectionEntry { return MetadataCollectionEntry{fileName, mData} } -func (mce MetadataCollectionEntry) toMetadataItem() (MetadataItem, error) { +func (mce MetadataCollectionEntry) toMetadataItem() (metadataItem, error) { if len(mce.fileName) == 0 { - return MetadataItem{}, clues.New("missing metadata filename") + return metadataItem{}, clues.New("missing metadata filename") } if mce.data == nil { - return MetadataItem{}, clues.New("missing metadata") + return metadataItem{}, clues.New("missing metadata") } buf := &bytes.Buffer{} encoder := json.NewEncoder(buf) if err := encoder.Encode(mce.data); err != nil { - return MetadataItem{}, clues.Wrap(err, "serializing metadata") + return metadataItem{}, clues.Wrap(err, "serializing metadata") } - return NewMetadataItem(mce.fileName, buf.Bytes()), nil + item, err := data.NewUnindexedPrefetchedItem( + io.NopCloser(buf), + mce.fileName, + time.Now()) + if err != nil { + return metadataItem{}, clues.Stack(err) + } + + return metadataItem{ + Item: item, + size: int64(buf.Len()), + }, nil } // MakeMetadataCollection creates a metadata collection that has a file @@ -71,7 +83,7 @@ func MakeMetadataCollection( return nil, nil } - items := make([]MetadataItem, 0, len(metadata)) + items := make([]metadataItem, 0, len(metadata)) for _, md := range metadata { item, err := md.toMetadataItem() @@ -89,7 +101,7 @@ func MakeMetadataCollection( func NewMetadataCollection( p path.Path, - items []MetadataItem, + items []metadataItem, statusUpdater support.StatusUpdater, ) *MetadataCollection { return &MetadataCollection{ @@ -148,7 +160,7 @@ func (md MetadataCollection) Items( defer close(res) for _, item := range md.items { - totalBytes += int64(len(item.data)) + totalBytes += item.size res <- item } }() @@ -156,36 +168,7 @@ func (md MetadataCollection) Items( return res } -// MetadataItem is an in-memory data.Item implementation. MetadataItem does -// not implement additional interfaces like data.ItemInfo, so it should only -// be used for items with a small amount of content that don't need to be added -// to backup details. -// -// Currently the expected use-case for this struct are storing metadata for a -// backup like delta tokens or a mapping of container IDs to container paths. -type MetadataItem struct { - // uuid is an ID that can be used to refer to the item. - uuid string - // data is a buffer of data that the item refers to. - data []byte -} - -func NewMetadataItem(uuid string, itemData []byte) MetadataItem { - return MetadataItem{ - uuid: uuid, - data: itemData, - } -} - -func (mi MetadataItem) ID() string { - return mi.uuid -} - -// TODO(ashmrtn): Fill in once we know how to handle this. -func (mi MetadataItem) Deleted() bool { - return false -} - -func (mi MetadataItem) ToReader() io.ReadCloser { - return io.NopCloser(bytes.NewReader(mi.data)) +type metadataItem struct { + data.Item + size int64 } diff --git a/src/internal/m365/graph/metadata_collection_test.go b/src/internal/m365/graph/metadata_collection_test.go index 41e15f0bf..ee9ca6b5c 100644 --- a/src/internal/m365/graph/metadata_collection_test.go +++ b/src/internal/m365/graph/metadata_collection_test.go @@ -1,9 +1,11 @@ package graph import ( + "bytes" "encoding/json" "io" "testing" + "time" "github.com/alcionai/clues" "github.com/google/uuid" @@ -11,6 +13,8 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/alcionai/corso/src/internal/common/readers" + "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/m365/support" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/fault" @@ -63,10 +67,21 @@ func (suite *MetadataCollectionUnitSuite) TestItems() { len(itemData), "Requires same number of items and data") - items := []MetadataItem{} + items := []metadataItem{} for i := 0; i < len(itemNames); i++ { - items = append(items, NewMetadataItem(itemNames[i], itemData[i])) + item, err := data.NewUnindexedPrefetchedItem( + io.NopCloser(bytes.NewReader(itemData[i])), + itemNames[i], + time.Time{}) + require.NoError(t, err, clues.ToCore(err)) + + items = append( + items, + metadataItem{ + Item: item, + size: int64(len(itemData[i])), + }) } p, err := path.Build( @@ -92,7 +107,13 @@ func (suite *MetadataCollectionUnitSuite) TestItems() { for s := range c.Items(ctx, fault.New(true)) { gotNames = append(gotNames, s.ID()) - buf, err := io.ReadAll(s.ToReader()) + rr, err := readers.NewVersionedRestoreReader(s.ToReader()) + require.NoError(t, err, clues.ToCore(err)) + + assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version) + assert.False(t, rr.Format().DelInFlight) + + buf, err := io.ReadAll(rr) if !assert.NoError(t, err, clues.ToCore(err)) { continue } @@ -193,11 +214,17 @@ func (suite *MetadataCollectionUnitSuite) TestMakeMetadataCollection() { for item := range col.Items(ctx, fault.New(true)) { assert.Equal(t, test.metadata.fileName, item.ID()) + rr, err := readers.NewVersionedRestoreReader(item.ToReader()) + require.NoError(t, err, clues.ToCore(err)) + + assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version) + assert.False(t, rr.Format().DelInFlight) + gotMap := map[string]string{} - decoder := json.NewDecoder(item.ToReader()) + decoder := json.NewDecoder(rr) itemCount++ - err := decoder.Decode(&gotMap) + err = decoder.Decode(&gotMap) if !assert.NoError(t, err, clues.ToCore(err)) { continue } diff --git a/src/internal/m365/helper_test.go b/src/internal/m365/helper_test.go index d6b7c256c..6f3907394 100644 --- a/src/internal/m365/helper_test.go +++ b/src/internal/m365/helper_test.go @@ -16,6 +16,7 @@ import ( "golang.org/x/exp/slices" "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata" odStub "github.com/alcionai/corso/src/internal/m365/service/onedrive/stub" @@ -573,7 +574,12 @@ func compareExchangeEmail( expected map[string][]byte, item data.Item, ) { - itemData, err := io.ReadAll(item.ToReader()) + rr := versionedReadWrapper(t, item.ToReader()) + if rr == nil { + return + } + + itemData, err := io.ReadAll(rr) if !assert.NoError(t, err, "reading collection item", item.ID(), clues.ToCore(err)) { return } @@ -600,7 +606,12 @@ func compareExchangeContact( expected map[string][]byte, item data.Item, ) { - itemData, err := io.ReadAll(item.ToReader()) + rr := versionedReadWrapper(t, item.ToReader()) + if rr == nil { + return + } + + itemData, err := io.ReadAll(rr) if !assert.NoError(t, err, "reading collection item", item.ID(), clues.ToCore(err)) { return } @@ -628,7 +639,12 @@ func compareExchangeEvent( expected map[string][]byte, item data.Item, ) { - itemData, err := io.ReadAll(item.ToReader()) + rr := versionedReadWrapper(t, item.ToReader()) + if rr == nil { + return + } + + itemData, err := io.ReadAll(rr) if !assert.NoError(t, err, "reading collection item", item.ID(), clues.ToCore(err)) { return } @@ -718,7 +734,12 @@ func compareDriveItem( return false } - buf, err := io.ReadAll(item.ToReader()) + rr := versionedReadWrapper(t, item.ToReader()) + if rr == nil { + return true + } + + buf, err := io.ReadAll(rr) if !assert.NoError(t, err, clues.ToCore(err)) { return true } @@ -751,10 +772,6 @@ func compareDriveItem( } if isMeta { - var itemType *metadata.Item - - assert.IsType(t, itemType, item) - var ( itemMeta metadata.Metadata expectedMeta metadata.Metadata @@ -854,6 +871,29 @@ func compareDriveItem( return true } +// versionedReaderWrapper strips out the version format header and checks it +// meets the current standard for all service types. If it doesn't meet the +// standard, returns nil. Else returns the versionedRestoreReader. +func versionedReadWrapper( + t *testing.T, + reader io.ReadCloser, +) io.ReadCloser { + rr, err := readers.NewVersionedRestoreReader(reader) + if !assert.NoError(t, err, clues.ToCore(err)) { + return nil + } + + if !assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version) { + return nil + } + + if !assert.False(t, rr.Format().DelInFlight) { + return nil + } + + return rr +} + // compareItem compares the data returned by backup with the expected data. // Returns true if a comparison was done else false. Bool return is mostly used // to exclude OneDrive permissions for the root right now. @@ -923,30 +963,9 @@ func checkHasCollections( continue } - fp := g.FullPath() loc := g.(data.LocationPather).LocationPath() - if fp.Service() == path.OneDriveService || - (fp.Service() == path.SharePointService && fp.Category() == path.LibrariesCategory) { - dp, err := path.ToDrivePath(fp) - if !assert.NoError(t, err, clues.ToCore(err)) { - continue - } - - loc = path.BuildDriveLocation(dp.DriveID, loc.Elements()...) - } - - p, err := loc.ToDataLayerPath( - fp.Tenant(), - fp.ProtectedResource(), - fp.Service(), - fp.Category(), - false) - if !assert.NoError(t, err, clues.ToCore(err)) { - continue - } - - gotNames = append(gotNames, p.String()) + gotNames = append(gotNames, loc.String()) } assert.ElementsMatch(t, expectedNames, gotNames, "returned collections") @@ -967,14 +986,18 @@ func checkCollections( for _, returned := range got { var ( - hasItems bool - service = returned.FullPath().Service() - category = returned.FullPath().Category() - expectedColData = expected[returned.FullPath().String()] - folders = returned.FullPath().Elements() - rootDir = folders[len(folders)-1] == mci.RestoreCfg.Location + expectedColDataByLoc map[string][]byte + hasItems bool + service = returned.FullPath().Service() + category = returned.FullPath().Category() + folders = returned.FullPath().Elements() + rootDir = folders[len(folders)-1] == mci.RestoreCfg.Location ) + if p, ok := returned.(data.LocationPather); ok { + expectedColDataByLoc = expected[p.LocationPath().String()] + } + // Need to iterate through all items even if we don't expect to find a match // because otherwise we'll deadlock waiting for the status. Unexpected or // missing collection paths will be reported by checkHasCollections. @@ -994,14 +1017,14 @@ func checkCollections( hasItems = true gotItems++ - if expectedColData == nil { + if expectedColDataByLoc == nil { continue } if !compareItem( t, returned.FullPath(), - expectedColData, + expectedColDataByLoc, service, category, item, diff --git a/src/internal/m365/restore.go b/src/internal/m365/restore.go index 616fd6f2b..d6237c072 100644 --- a/src/internal/m365/restore.go +++ b/src/internal/m365/restore.go @@ -84,6 +84,7 @@ func (ctrl *Controller) ConsumeRestoreCollections( rcc, ctrl.AC, ctrl.backupDriveIDNames, + ctrl.backupSiteIDWebURL, dcs, deets, errs, diff --git a/src/internal/m365/service/groups/backup.go b/src/internal/m365/service/groups/backup.go index 27f34f7b3..25210ade3 100644 --- a/src/internal/m365/service/groups/backup.go +++ b/src/internal/m365/service/groups/backup.go @@ -22,6 +22,7 @@ import ( "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/backup/identity" "github.com/alcionai/corso/src/pkg/backup/metadata" + "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" @@ -35,19 +36,18 @@ func ProduceBackupCollections( creds account.M365Config, su support.StatusUpdater, errs *fault.Bus, -) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, bool, error) { +) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, error) { b, err := bpc.Selector.ToGroupsBackup() if err != nil { - return nil, nil, false, clues.Wrap(err, "groupsDataCollection: parsing selector") + return nil, nil, clues.Wrap(err, "groupsDataCollection: parsing selector") } var ( - el = errs.Local() - collections = []data.BackupCollection{} - categories = map[path.CategoryType]struct{}{} - ssmb = prefixmatcher.NewStringSetBuilder() - canUsePreviousBackup bool - sitesPreviousPaths = map[string]string{} + el = errs.Local() + collections = []data.BackupCollection{} + categories = map[path.CategoryType]struct{}{} + ssmb = prefixmatcher.NewStringSetBuilder() + sitesPreviousPaths = map[string]string{} ) ctx = clues.Add( @@ -55,9 +55,12 @@ func ProduceBackupCollections( "group_id", clues.Hide(bpc.ProtectedResource.ID()), "group_name", clues.Hide(bpc.ProtectedResource.Name())) - group, err := ac.Groups().GetByID(ctx, bpc.ProtectedResource.ID()) + group, err := ac.Groups().GetByID( + ctx, + bpc.ProtectedResource.ID(), + api.CallConfig{}) if err != nil { - return nil, nil, false, clues.Wrap(err, "getting group").WithClues(ctx) + return nil, nil, clues.Wrap(err, "getting group").WithClues(ctx) } isTeam := api.IsTeam(ctx, group) @@ -76,12 +79,9 @@ func ProduceBackupCollections( switch scope.Category().PathType() { case path.LibrariesCategory: - // TODO(meain): Private channels get a separate SharePoint - // site. We should also back those up and not just the - // default one. - resp, err := ac.Groups().GetRootSite(ctx, bpc.ProtectedResource.ID()) + sites, err := ac.Groups().GetAllSites(ctx, bpc.ProtectedResource.ID(), errs) if err != nil { - return nil, nil, false, err + return nil, nil, err } siteMetadataCollection := map[string][]data.RestoreCollection{} @@ -92,39 +92,47 @@ func ProduceBackupCollections( siteMetadataCollection[siteID] = append(siteMetadataCollection[siteID], c) } - pr := idname.NewProvider(ptr.Val(resp.GetId()), ptr.Val(resp.GetName())) - sbpc := inject.BackupProducerConfig{ - LastBackupVersion: bpc.LastBackupVersion, - Options: bpc.Options, - ProtectedResource: pr, - Selector: bpc.Selector, - MetadataCollections: siteMetadataCollection[ptr.Val(resp.GetId())], - } + for _, s := range sites { + pr := idname.NewProvider(ptr.Val(s.GetId()), ptr.Val(s.GetName())) + sbpc := inject.BackupProducerConfig{ + LastBackupVersion: bpc.LastBackupVersion, + Options: bpc.Options, + ProtectedResource: pr, + Selector: bpc.Selector, + MetadataCollections: siteMetadataCollection[ptr.Val(s.GetId())], + } - bh := drive.NewGroupBackupHandler( - bpc.ProtectedResource.ID(), - ptr.Val(resp.GetId()), - ac.Drives(), - scope) + bh := drive.NewGroupBackupHandler( + bpc.ProtectedResource.ID(), + ptr.Val(s.GetId()), + ac.Drives(), + scope) - cp, err := bh.SitePathPrefix(creds.AzureTenantID) - if err != nil { - return nil, nil, false, clues.Wrap(err, "getting canonical path") - } + sp, err := bh.SitePathPrefix(creds.AzureTenantID) + if err != nil { + return nil, nil, clues.Wrap(err, "getting site path") + } - sitesPreviousPaths[ptr.Val(resp.GetId())] = cp.String() + sitesPreviousPaths[ptr.Val(s.GetId())] = sp.String() - dbcs, canUsePreviousBackup, err = site.CollectLibraries( - ctx, - sbpc, - bh, - creds.AzureTenantID, - ssmb, - su, - errs) - if err != nil { - el.AddRecoverable(ctx, err) - continue + cs, canUsePreviousBackup, err := site.CollectLibraries( + ctx, + sbpc, + bh, + creds.AzureTenantID, + ssmb, + su, + errs) + if err != nil { + el.AddRecoverable(ctx, err) + continue + } + + if !canUsePreviousBackup { + dbcs = append(dbcs, data.NewTombstoneCollection(sp, control.Options{})) + } + + dbcs = append(dbcs, cs...) } case path.ChannelMessagesCategory: @@ -132,10 +140,12 @@ func ProduceBackupCollections( continue } - dbcs, canUsePreviousBackup, err = groups.CreateCollections( + bh := groups.NewChannelBackupHandler(bpc.ProtectedResource.ID(), ac.Channels()) + + cs, canUsePreviousBackup, err := groups.CreateCollections( ctx, bpc, - groups.NewChannelBackupHandler(bpc.ProtectedResource.ID(), ac.Channels()), + bh, creds.AzureTenantID, scope, su, @@ -144,6 +154,17 @@ func ProduceBackupCollections( el.AddRecoverable(ctx, err) continue } + + if !canUsePreviousBackup { + tp, err := bh.PathPrefix(creds.AzureTenantID) + if err != nil { + return nil, nil, clues.Wrap(err, "getting message path") + } + + dbcs = append(dbcs, data.NewTombstoneCollection(tp, control.Options{})) + } + + dbcs = append(dbcs, cs...) } collections = append(collections, dbcs...) @@ -162,7 +183,7 @@ func ProduceBackupCollections( su, errs) if err != nil { - return nil, nil, false, err + return nil, nil, err } collections = append(collections, baseCols...) @@ -175,12 +196,12 @@ func ProduceBackupCollections( sitesPreviousPaths, su) if err != nil { - return nil, nil, false, err + return nil, nil, err } collections = append(collections, md) - return collections, ssmb.ToReader(), canUsePreviousBackup, el.Failure() + return collections, ssmb.ToReader(), el.Failure() } func getSitesMetadataCollection( diff --git a/src/internal/m365/service/groups/enabled.go b/src/internal/m365/service/groups/enabled.go index 87acc8c48..4580746e5 100644 --- a/src/internal/m365/service/groups/enabled.go +++ b/src/internal/m365/service/groups/enabled.go @@ -7,18 +7,15 @@ import ( "github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/alcionai/corso/src/pkg/filters" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) -type getByIDer interface { - GetByID(ctx context.Context, identifier string) (models.Groupable, error) -} - func IsServiceEnabled( ctx context.Context, - gbi getByIDer, + gbi api.GetByIDer[models.Groupable], resource string, ) (bool, error) { - resp, err := gbi.GetByID(ctx, resource) + resp, err := gbi.GetByID(ctx, resource, api.CallConfig{}) if err != nil { return false, clues.Wrap(err, "getting group").WithClues(ctx) } diff --git a/src/internal/m365/service/groups/enabled_test.go b/src/internal/m365/service/groups/enabled_test.go index c2447982e..d032be415 100644 --- a/src/internal/m365/service/groups/enabled_test.go +++ b/src/internal/m365/service/groups/enabled_test.go @@ -12,6 +12,7 @@ import ( "github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) type EnabledUnitSuite struct { @@ -22,14 +23,18 @@ func TestEnabledUnitSuite(t *testing.T) { suite.Run(t, &EnabledUnitSuite{Suite: tester.NewUnitSuite(t)}) } -var _ getByIDer = mockGBI{} +var _ api.GetByIDer[models.Groupable] = mockGBI{} type mockGBI struct { group models.Groupable err error } -func (m mockGBI) GetByID(ctx context.Context, identifier string) (models.Groupable, error) { +func (m mockGBI) GetByID( + ctx context.Context, + identifier string, + _ api.CallConfig, +) (models.Groupable, error) { return m.group, m.err } @@ -56,13 +61,13 @@ func (suite *EnabledUnitSuite) TestIsServiceEnabled() { table := []struct { name string - mock func(context.Context) getByIDer + mock func(context.Context) api.GetByIDer[models.Groupable] expect assert.BoolAssertionFunc expectErr assert.ErrorAssertionFunc }{ { name: "ok", - mock: func(ctx context.Context) getByIDer { + mock: func(ctx context.Context) api.GetByIDer[models.Groupable] { return mockGBI{ group: unified, } @@ -72,7 +77,7 @@ func (suite *EnabledUnitSuite) TestIsServiceEnabled() { }, { name: "non-unified group", - mock: func(ctx context.Context) getByIDer { + mock: func(ctx context.Context) api.GetByIDer[models.Groupable] { return mockGBI{ group: nonUnified, } @@ -82,7 +87,7 @@ func (suite *EnabledUnitSuite) TestIsServiceEnabled() { }, { name: "group not found", - mock: func(ctx context.Context) getByIDer { + mock: func(ctx context.Context) api.GetByIDer[models.Groupable] { return mockGBI{ err: graph.Stack(ctx, odErrMsg(string(graph.RequestResourceNotFound), "message")), } @@ -92,7 +97,7 @@ func (suite *EnabledUnitSuite) TestIsServiceEnabled() { }, { name: "arbitrary error", - mock: func(ctx context.Context) getByIDer { + mock: func(ctx context.Context) api.GetByIDer[models.Groupable] { return mockGBI{ err: assert.AnError, } diff --git a/src/internal/m365/service/groups/restore.go b/src/internal/m365/service/groups/restore.go index 9a94a921b..fc09088e4 100644 --- a/src/internal/m365/service/groups/restore.go +++ b/src/internal/m365/service/groups/restore.go @@ -12,6 +12,7 @@ import ( "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/m365/collection/drive" + "github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/m365/support" "github.com/alcionai/corso/src/internal/operations/inject" "github.com/alcionai/corso/src/pkg/backup/details" @@ -29,24 +30,20 @@ func ConsumeRestoreCollections( rcc inject.RestoreConsumerConfig, ac api.Client, backupDriveIDNames idname.Cacher, + backupSiteIDWebURL idname.Cacher, dcs []data.RestoreCollection, deets *details.Builder, errs *fault.Bus, ctr *count.Bus, ) (*support.ControllerOperationStatus, error) { var ( - restoreMetrics support.CollectionMetrics - caches = drive.NewRestoreCaches(backupDriveIDNames) - lrh = drive.NewLibraryRestoreHandler(ac, rcc.Selector.PathService()) - el = errs.Local() + restoreMetrics support.CollectionMetrics + caches = drive.NewRestoreCaches(backupDriveIDNames) + lrh = drive.NewLibraryRestoreHandler(ac, rcc.Selector.PathService()) + el = errs.Local() + webURLToSiteNames = map[string]string{} ) - // TODO: uncomment when a handler is available - // err := caches.Populate(ctx, lrh, rcc.ProtectedResource.ID()) - // if err != nil { - // return nil, clues.Wrap(err, "initializing restore caches") - // } - // Reorder collections so that the parents directories are created // before the child directories; a requirement for permissions. data.SortRestoreCollections(dcs) @@ -59,7 +56,7 @@ func ConsumeRestoreCollections( var ( err error - resp models.Siteable + siteName string category = dc.FullPath().Category() metrics support.CollectionMetrics ictx = clues.Add(ctx, @@ -71,16 +68,25 @@ func ConsumeRestoreCollections( switch dc.FullPath().Category() { case path.LibrariesCategory: - // TODO(meain): As of now we only restore the root site - // and that too to whatever is currently the root site of the - // group and not the original one. Not sure if the - // original can be changed. - resp, err = ac.Groups().GetRootSite(ctx, rcc.ProtectedResource.ID()) - if err != nil { - return nil, err + siteID := dc.FullPath().Folders()[1] + + webURL, ok := backupSiteIDWebURL.NameOf(siteID) + if !ok { + // This should not happen, but just in case + logger.Ctx(ctx).With("site_id", siteID).Info("site weburl not found, using site id") } - pr := idname.NewProvider(ptr.Val(resp.GetId()), ptr.Val(resp.GetName())) + siteName, err = getSiteName(ctx, siteID, webURL, ac.Sites(), webURLToSiteNames) + if err != nil { + el.AddRecoverable(ctx, clues.Wrap(err, "getting site"). + With("web_url", webURL, "site_id", siteID)) + } else if len(siteName) == 0 { + // Site was deleted in between and restore and is not + // available anymore. + continue + } + + pr := idname.NewProvider(siteID, siteName) srcc := inject.RestoreConsumerConfig{ BackupVersion: rcc.BackupVersion, Options: rcc.Options, @@ -133,3 +139,38 @@ func ConsumeRestoreCollections( return status, el.Failure() } + +func getSiteName( + ctx context.Context, + siteID string, + webURL string, + ac api.GetByIDer[models.Siteable], + webURLToSiteNames map[string]string, +) (string, error) { + siteName, ok := webURLToSiteNames[webURL] + if ok { + return siteName, nil + } + + site, err := ac.GetByID(ctx, siteID, api.CallConfig{}) + if err != nil { + webURLToSiteNames[webURL] = "" + + if graph.IsErrSiteNotFound(err) { + // TODO(meain): Should we surface this to the user somehow? + // In case a site that we had previously backed up was + // deleted, skip that site with a warning. + logger.Ctx(ctx).With("web_url", webURL, "site_id", siteID). + Info("Site does not exist, skipping restore.") + + return "", nil + } + + return "", err + } + + siteName = ptr.Val(site.GetDisplayName()) + webURLToSiteNames[webURL] = siteName + + return siteName, nil +} diff --git a/src/internal/m365/service/groups/restore_test.go b/src/internal/m365/service/groups/restore_test.go index d87000fc5..262bc3159 100644 --- a/src/internal/m365/service/groups/restore_test.go +++ b/src/internal/m365/service/groups/restore_test.go @@ -7,12 +7,17 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "golang.org/x/exp/slices" "github.com/alcionai/corso/src/internal/common/idname" + "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data/mock" + "github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/operations/inject" "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/internal/tester/tconfig" + "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/services/m365/api" @@ -52,9 +57,118 @@ func (suite *GroupsUnitSuite) TestConsumeRestoreCollections_noErrorOnGroups() { rcc, api.Client{}, idname.NewCache(map[string]string{}), + idname.NewCache(map[string]string{}), dcs, nil, fault.New(false), nil) assert.NoError(t, err, "Groups Channels restore") } + +type groupsIntegrationSuite struct { + tester.Suite + resource string + tenantID string + ac api.Client +} + +func TestGroupsIntegrationSuite(t *testing.T) { + suite.Run(t, &groupsIntegrationSuite{ + Suite: tester.NewIntegrationSuite( + t, + [][]string{tconfig.M365AcctCredEnvs}), + }) +} + +func (suite *groupsIntegrationSuite) SetupSuite() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + graph.InitializeConcurrencyLimiter(ctx, true, 4) + + suite.resource = tconfig.M365TeamID(t) + + acct := tconfig.NewM365Account(t) + creds, err := acct.M365Config() + require.NoError(t, err, clues.ToCore(err)) + + suite.ac, err = api.NewClient(creds, control.DefaultOptions()) + require.NoError(t, err, clues.ToCore(err)) + + suite.tenantID = creds.AzureTenantID +} + +// test for getSiteName +func (suite *groupsIntegrationSuite) TestGetSiteName() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + rootSite, err := suite.ac.Groups().GetRootSite(ctx, suite.resource) + require.NoError(t, err, clues.ToCore(err)) + + // Generate a fake site ID that appears valid to graph API but doesn't actually exist. + // This "could" be flaky, but highly unlikely + unavailableSiteID := []rune(ptr.Val(rootSite.GetId())) + firstIDChar := slices.Index(unavailableSiteID, ',') + 1 + + if unavailableSiteID[firstIDChar] != '2' { + unavailableSiteID[firstIDChar] = '2' + } else { + unavailableSiteID[firstIDChar] = '1' + } + + tests := []struct { + name string + siteID string + webURL string + siteName string + webURLToSiteNames map[string]string + expectErr assert.ErrorAssertionFunc + }{ + { + name: "valid", + siteID: ptr.Val(rootSite.GetId()), + webURL: ptr.Val(rootSite.GetWebUrl()), + siteName: *rootSite.GetDisplayName(), + webURLToSiteNames: map[string]string{}, + expectErr: assert.NoError, + }, + { + name: "unavailable", + siteID: string(unavailableSiteID), + webURL: "https://does-not-matter", + siteName: "", + webURLToSiteNames: map[string]string{}, + expectErr: assert.NoError, + }, + { + name: "previously found", + siteID: "random-id", + webURL: "https://random-url", + siteName: "random-name", + webURLToSiteNames: map[string]string{"https://random-url": "random-name"}, + expectErr: assert.NoError, + }, + } + + for _, test := range tests { + suite.Run(test.name, func() { + t := suite.T() + + siteName, err := getSiteName( + ctx, + test.siteID, + test.webURL, + suite.ac.Sites(), + test.webURLToSiteNames) + require.NoError(t, err, clues.ToCore(err)) + + test.expectErr(t, err) + assert.Equal(t, test.siteName, siteName) + }) + } +} diff --git a/src/internal/m365/service/sharepoint/api/pages_test.go b/src/internal/m365/service/sharepoint/api/pages_test.go index a834f10ea..792e3eda0 100644 --- a/src/internal/m365/service/sharepoint/api/pages_test.go +++ b/src/internal/m365/service/sharepoint/api/pages_test.go @@ -4,13 +4,14 @@ import ( "bytes" "io" "testing" + "time" "github.com/alcionai/clues" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/alcionai/corso/src/internal/m365/collection/site" + "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/m365/service/sharepoint/api" spMock "github.com/alcionai/corso/src/internal/m365/service/sharepoint/mock" @@ -108,9 +109,11 @@ func (suite *SharePointPageSuite) TestRestoreSinglePage() { //nolint:lll byteArray := spMock.Page("Byte Test") - pageData := site.NewItem( + pageData, err := data.NewUnindexedPrefetchedItem( + io.NopCloser(bytes.NewReader(byteArray)), testName, - io.NopCloser(bytes.NewReader(byteArray))) + time.Now()) + require.NoError(t, err, clues.ToCore(err)) info, err := api.RestoreSitePage( ctx, diff --git a/src/internal/m365/stub/stub.go b/src/internal/m365/stub/stub.go index 49f27716c..b0c0104a1 100644 --- a/src/internal/m365/stub/stub.go +++ b/src/internal/m365/stub/stub.go @@ -4,6 +4,7 @@ import ( "bytes" "io" + "github.com/alcionai/clues" "golang.org/x/exp/maps" "github.com/alcionai/corso/src/internal/data" @@ -163,28 +164,29 @@ func CollectionsForInfo( func backupOutputPathFromRestore( restoreCfg control.RestoreConfig, inputPath path.Path, -) (path.Path, error) { +) (*path.Builder, error) { base := []string{restoreCfg.Location} + folders := inputPath.Folders() + switch inputPath.Service() { // OneDrive has leading information like the drive ID. - if inputPath.Service() == path.OneDriveService || inputPath.Service() == path.SharePointService { - folders := inputPath.Folders() - base = append(append([]string{}, folders[:3]...), restoreCfg.Location) + case path.OneDriveService, path.SharePointService: + p, err := path.ToDrivePath(inputPath) + if err != nil { + return nil, clues.Stack(err) + } - if len(folders) > 3 { - base = append(base, folders[3:]...) + // Remove driveID, root, etc. + folders = p.Folders + // Re-add root, but it needs to be in front of the restore folder. + base = append([]string{p.Root}, base...) + + // Currently contacts restore doesn't have nested folders. + case path.ExchangeService: + if inputPath.Category() == path.ContactsCategory { + folders = nil } } - if inputPath.Service() == path.ExchangeService && inputPath.Category() == path.EmailCategory { - base = append(base, inputPath.Folders()...) - } - - return path.Build( - inputPath.Tenant(), - inputPath.ProtectedResource(), - inputPath.Service(), - inputPath.Category(), - false, - base...) + return path.Builder{}.Append(append(base, folders...)...), nil } diff --git a/src/internal/operations/test/onedrive_test.go b/src/internal/operations/test/onedrive_test.go index 8b4ac9b81..6e53566c9 100644 --- a/src/internal/operations/test/onedrive_test.go +++ b/src/internal/operations/test/onedrive_test.go @@ -762,11 +762,10 @@ func runDriveIncrementalTest( true) // do some additional checks to ensure the incremental dealt with fewer items. - // +2 on read/writes to account for metadata: 1 delta and 1 path. var ( - expectWrites = test.itemsWritten + 2 + expectWrites = test.itemsWritten expectNonMetaWrites = test.nonMetaItemsWritten - expectReads = test.itemsRead + 2 + expectReads = test.itemsRead assertReadWrite = assert.Equal ) @@ -775,6 +774,17 @@ func runDriveIncrementalTest( // /libraries/sites/previouspath expectWrites++ expectReads++ + + // +2 on read/writes to account for metadata: 1 delta and 1 path (for each site) + sites, err := ac.Groups().GetAllSites(ctx, owner, fault.New(true)) + require.NoError(t, err, clues.ToCore(err)) + + expectWrites += len(sites) * 2 + expectReads += len(sites) * 2 + } else { + // +2 on read/writes to account for metadata: 1 delta and 1 path. + expectWrites += 2 + expectReads += 2 } // Sharepoint can produce a superset of permissions by nature of diff --git a/src/internal/streamstore/streamstore.go b/src/internal/streamstore/streamstore.go index 35a3b9706..9246a9325 100644 --- a/src/internal/streamstore/streamstore.go +++ b/src/internal/streamstore/streamstore.go @@ -6,6 +6,7 @@ import ( "bytes" "context" "io" + "time" "github.com/alcionai/clues" @@ -128,7 +129,7 @@ type streamCollection struct { // folderPath indicates what level in the hierarchy this collection // represents folderPath path.Path - item *streamItem + item data.Item } func (dc *streamCollection) FullPath() path.Path { @@ -157,27 +158,6 @@ func (dc *streamCollection) Items(context.Context, *fault.Bus) <-chan data.Item return items } -// --------------------------------------------------------------------------- -// item -// --------------------------------------------------------------------------- - -type streamItem struct { - name string - data []byte -} - -func (di *streamItem) ID() string { - return di.name -} - -func (di *streamItem) ToReader() io.ReadCloser { - return io.NopCloser(bytes.NewReader(di.data)) -} - -func (di *streamItem) Deleted() bool { - return false -} - // --------------------------------------------------------------------------- // common reader/writer/deleter // --------------------------------------------------------------------------- @@ -202,12 +182,17 @@ func collect( return nil, clues.Wrap(err, "marshalling body").WithClues(ctx) } + item, err := data.NewUnindexedPrefetchedItem( + io.NopCloser(bytes.NewReader(bs)), + col.itemName, + time.Now()) + if err != nil { + return nil, clues.Stack(err).WithClues(ctx) + } + dc := streamCollection{ folderPath: p, - item: &streamItem{ - name: col.itemName, - data: bs, - }, + item: item, } return &dc, nil diff --git a/src/pkg/path/category_type.go b/src/pkg/path/category_type.go index b8c20020f..c403e3c19 100644 --- a/src/pkg/path/category_type.go +++ b/src/pkg/path/category_type.go @@ -96,14 +96,10 @@ var serviceCategories = map[ServiceType]map[CategoryType]struct{}{ ChannelMessagesCategory: {}, LibrariesCategory: {}, }, - TeamsService: { - ChannelMessagesCategory: {}, - LibrariesCategory: {}, - }, } func validateServiceAndCategoryStrings(s, c string) (ServiceType, CategoryType, error) { - service := toServiceType(s) + service := ToServiceType(s) if service == UnknownService { return UnknownService, UnknownCategory, clues.Stack(ErrorUnknownService).With("service", fmt.Sprintf("%q", s)) } diff --git a/src/pkg/path/service_category_test.go b/src/pkg/path/service_category_test.go index d2b19b244..2d98ed49c 100644 --- a/src/pkg/path/service_category_test.go +++ b/src/pkg/path/service_category_test.go @@ -157,7 +157,7 @@ func (suite *ServiceCategoryUnitSuite) TestToServiceType() { suite.Run(test.name, func() { t := suite.T() - assert.Equal(t, test.expected, toServiceType(test.service)) + assert.Equal(t, test.expected, ToServiceType(test.service)) }) } } diff --git a/src/pkg/path/service_type.go b/src/pkg/path/service_type.go index a4a99ec6c..9059615a2 100644 --- a/src/pkg/path/service_type.go +++ b/src/pkg/path/service_type.go @@ -15,27 +15,25 @@ var ErrorUnknownService = clues.New("unknown service string") // Metadata services are not considered valid service types for resource paths // though they can be used for metadata paths. // -// The order of the enums below can be changed, but the string representation of -// each enum must remain the same or migration code needs to be added to handle -// changes to the string format. +// The string representaton of each enum _must remain the same_. In case of +// changes to those values, we'll need migration code to handle transitions +// across states else we'll get marshalling/unmarshalling errors. type ServiceType int //go:generate stringer -type=ServiceType -linecomment const ( UnknownService ServiceType = 0 - ExchangeService ServiceType = 1 // exchange - OneDriveService ServiceType = 2 // onedrive - SharePointService ServiceType = 3 // sharepoint - ExchangeMetadataService ServiceType = 4 // exchangeMetadata - OneDriveMetadataService ServiceType = 5 // onedriveMetadata - SharePointMetadataService ServiceType = 6 // sharepointMetadata - GroupsService ServiceType = 7 // groups - GroupsMetadataService ServiceType = 8 // groupsMetadata - TeamsService ServiceType = 9 // teams - TeamsMetadataService ServiceType = 10 // teamsMetadata + ExchangeService ServiceType = 1 // exchange + OneDriveService ServiceType = 2 // onedrive + SharePointService ServiceType = 3 // sharepoint + ExchangeMetadataService ServiceType = 4 // exchangeMetadata + OneDriveMetadataService ServiceType = 5 // onedriveMetadata + SharePointMetadataService ServiceType = 6 // sharepointMetadata + GroupsService ServiceType = 7 // groups + GroupsMetadataService ServiceType = 8 // groupsMetadata ) -func toServiceType(service string) ServiceType { +func ToServiceType(service string) ServiceType { s := strings.ToLower(service) switch s { @@ -47,8 +45,6 @@ func toServiceType(service string) ServiceType { return SharePointService case strings.ToLower(GroupsService.String()): return GroupsService - case strings.ToLower(TeamsService.String()): - return TeamsService case strings.ToLower(ExchangeMetadataService.String()): return ExchangeMetadataService case strings.ToLower(OneDriveMetadataService.String()): @@ -57,8 +53,6 @@ func toServiceType(service string) ServiceType { return SharePointMetadataService case strings.ToLower(GroupsMetadataService.String()): return GroupsMetadataService - case strings.ToLower(TeamsMetadataService.String()): - return TeamsMetadataService default: return UnknownService } diff --git a/src/pkg/path/servicetype_string.go b/src/pkg/path/servicetype_string.go index 4b9ab16ec..6fa499364 100644 --- a/src/pkg/path/servicetype_string.go +++ b/src/pkg/path/servicetype_string.go @@ -17,13 +17,11 @@ func _() { _ = x[SharePointMetadataService-6] _ = x[GroupsService-7] _ = x[GroupsMetadataService-8] - _ = x[TeamsService-9] - _ = x[TeamsMetadataService-10] } -const _ServiceType_name = "UnknownServiceexchangeonedrivesharepointexchangeMetadataonedriveMetadatasharepointMetadatagroupsgroupsMetadatateamsteamsMetadata" +const _ServiceType_name = "UnknownServiceexchangeonedrivesharepointexchangeMetadataonedriveMetadatasharepointMetadatagroupsgroupsMetadata" -var _ServiceType_index = [...]uint8{0, 14, 22, 30, 40, 56, 72, 90, 96, 110, 115, 128} +var _ServiceType_index = [...]uint8{0, 14, 22, 30, 40, 56, 72, 90, 96, 110} func (i ServiceType) String() string { if i < 0 || i >= ServiceType(len(_ServiceType_index)-1) { diff --git a/src/pkg/repository/backups.go b/src/pkg/repository/backups.go new file mode 100644 index 000000000..a4314eb01 --- /dev/null +++ b/src/pkg/repository/backups.go @@ -0,0 +1,359 @@ +package repository + +import ( + "context" + + "github.com/alcionai/clues" + "github.com/kopia/kopia/repo/manifest" + "github.com/pkg/errors" + + "github.com/alcionai/corso/src/internal/common/idname" + "github.com/alcionai/corso/src/internal/data" + "github.com/alcionai/corso/src/internal/kopia" + "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata" + "github.com/alcionai/corso/src/internal/model" + "github.com/alcionai/corso/src/internal/operations" + "github.com/alcionai/corso/src/internal/streamstore" + "github.com/alcionai/corso/src/internal/version" + "github.com/alcionai/corso/src/pkg/backup" + "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/selectors" + "github.com/alcionai/corso/src/pkg/store" +) + +// BackupGetter deals with retrieving metadata about backups from the +// repository. +type BackupGetter interface { + Backup(ctx context.Context, id string) (*backup.Backup, error) + Backups(ctx context.Context, ids []string) ([]*backup.Backup, *fault.Bus) + BackupsByTag(ctx context.Context, fs ...store.FilterOption) ([]*backup.Backup, error) + GetBackupDetails( + ctx context.Context, + backupID string, + ) (*details.Details, *backup.Backup, *fault.Bus) + GetBackupErrors( + ctx context.Context, + backupID string, + ) (*fault.Errors, *backup.Backup, *fault.Bus) +} + +type Backuper interface { + NewBackup( + ctx context.Context, + self selectors.Selector, + ) (operations.BackupOperation, error) + NewBackupWithLookup( + ctx context.Context, + self selectors.Selector, + ins idname.Cacher, + ) (operations.BackupOperation, error) + DeleteBackups( + ctx context.Context, + failOnMissing bool, + ids ...string, + ) error +} + +// NewBackup generates a BackupOperation runner. +func (r repository) NewBackup( + ctx context.Context, + sel selectors.Selector, +) (operations.BackupOperation, error) { + return r.NewBackupWithLookup(ctx, sel, nil) +} + +// NewBackupWithLookup generates a BackupOperation runner. +// ownerIDToName and ownerNameToID are optional populations, in case the caller has +// already generated those values. +func (r repository) NewBackupWithLookup( + ctx context.Context, + sel selectors.Selector, + ins idname.Cacher, +) (operations.BackupOperation, error) { + err := r.ConnectDataProvider(ctx, sel.PathService()) + if err != nil { + return operations.BackupOperation{}, clues.Wrap(err, "connecting to m365") + } + + ownerID, ownerName, err := r.Provider.PopulateProtectedResourceIDAndName(ctx, sel.DiscreteOwner, ins) + if err != nil { + return operations.BackupOperation{}, clues.Wrap(err, "resolving resource owner details") + } + + // TODO: retrieve display name from gc + sel = sel.SetDiscreteOwnerIDName(ownerID, ownerName) + + return operations.NewBackupOperation( + ctx, + r.Opts, + r.dataLayer, + store.NewWrapper(r.modelStore), + r.Provider, + r.Account, + sel, + sel, // the selector acts as an IDNamer for its discrete resource owner. + r.Bus) +} + +// Backup retrieves a backup by id. +func (r repository) Backup(ctx context.Context, id string) (*backup.Backup, error) { + return getBackup(ctx, id, store.NewWrapper(r.modelStore)) +} + +// getBackup handles the processing for Backup. +func getBackup( + ctx context.Context, + id string, + sw store.BackupGetter, +) (*backup.Backup, error) { + b, err := sw.GetBackup(ctx, model.StableID(id)) + if err != nil { + return nil, errWrapper(err) + } + + return b, nil +} + +// Backups lists backups by ID. Returns as many backups as possible with +// errors for the backups it was unable to retrieve. +func (r repository) Backups(ctx context.Context, ids []string) ([]*backup.Backup, *fault.Bus) { + var ( + bups []*backup.Backup + errs = fault.New(false) + sw = store.NewWrapper(r.modelStore) + ) + + for _, id := range ids { + ictx := clues.Add(ctx, "backup_id", id) + + b, err := sw.GetBackup(ictx, model.StableID(id)) + if err != nil { + errs.AddRecoverable(ctx, errWrapper(err)) + } + + bups = append(bups, b) + } + + return bups, errs +} + +// BackupsByTag lists all backups in a repository that contain all the tags +// specified. +func (r repository) BackupsByTag(ctx context.Context, fs ...store.FilterOption) ([]*backup.Backup, error) { + sw := store.NewWrapper(r.modelStore) + return backupsByTag(ctx, sw, fs) +} + +// backupsByTag returns all backups matching all provided tags. +// +// TODO(ashmrtn): This exists mostly for testing, but we could restructure the +// code in this file so there's a more elegant mocking solution. +func backupsByTag( + ctx context.Context, + sw store.BackupWrapper, + fs []store.FilterOption, +) ([]*backup.Backup, error) { + bs, err := sw.GetBackups(ctx, fs...) + if err != nil { + return nil, clues.Stack(err) + } + + // Filter out assist backup bases as they're considered incomplete and we + // haven't been displaying them before now. + res := make([]*backup.Backup, 0, len(bs)) + + for _, b := range bs { + if t := b.Tags[model.BackupTypeTag]; t != model.AssistBackup { + res = append(res, b) + } + } + + return res, nil +} + +// BackupDetails returns the specified backup.Details +func (r repository) GetBackupDetails( + ctx context.Context, + backupID string, +) (*details.Details, *backup.Backup, *fault.Bus) { + errs := fault.New(false) + + deets, bup, err := getBackupDetails( + ctx, + backupID, + r.Account.ID(), + r.dataLayer, + store.NewWrapper(r.modelStore), + errs) + + return deets, bup, errs.Fail(err) +} + +// getBackupDetails handles the processing for GetBackupDetails. +func getBackupDetails( + ctx context.Context, + backupID, tenantID string, + kw *kopia.Wrapper, + sw store.BackupGetter, + errs *fault.Bus, +) (*details.Details, *backup.Backup, error) { + b, err := sw.GetBackup(ctx, model.StableID(backupID)) + if err != nil { + return nil, nil, errWrapper(err) + } + + ssid := b.StreamStoreID + if len(ssid) == 0 { + ssid = b.DetailsID + } + + if len(ssid) == 0 { + return nil, b, clues.New("no streamstore id in backup").WithClues(ctx) + } + + var ( + sstore = streamstore.NewStreamer(kw, tenantID, b.Selector.PathService()) + deets details.Details + ) + + err = sstore.Read( + ctx, + ssid, + streamstore.DetailsReader(details.UnmarshalTo(&deets)), + errs) + if err != nil { + return nil, nil, err + } + + // Retroactively fill in isMeta information for items in older + // backup versions without that info + // version.Restore2 introduces the IsMeta flag, so only v1 needs a check. + if b.Version >= version.OneDrive1DataAndMetaFiles && b.Version < version.OneDrive3IsMetaMarker { + for _, d := range deets.Entries { + if d.OneDrive != nil { + d.OneDrive.IsMeta = metadata.HasMetaSuffix(d.RepoRef) + } + } + } + + deets.DetailsModel = deets.FilterMetaFiles() + + return &deets, b, nil +} + +// BackupErrors returns the specified backup's fault.Errors +func (r repository) GetBackupErrors( + ctx context.Context, + backupID string, +) (*fault.Errors, *backup.Backup, *fault.Bus) { + errs := fault.New(false) + + fe, bup, err := getBackupErrors( + ctx, + backupID, + r.Account.ID(), + r.dataLayer, + store.NewWrapper(r.modelStore), + errs) + + return fe, bup, errs.Fail(err) +} + +// getBackupErrors handles the processing for GetBackupErrors. +func getBackupErrors( + ctx context.Context, + backupID, tenantID string, + kw *kopia.Wrapper, + sw store.BackupGetter, + errs *fault.Bus, +) (*fault.Errors, *backup.Backup, error) { + b, err := sw.GetBackup(ctx, model.StableID(backupID)) + if err != nil { + return nil, nil, errWrapper(err) + } + + ssid := b.StreamStoreID + if len(ssid) == 0 { + return nil, b, clues.New("missing streamstore id in backup").WithClues(ctx) + } + + var ( + sstore = streamstore.NewStreamer(kw, tenantID, b.Selector.PathService()) + fe fault.Errors + ) + + err = sstore.Read( + ctx, + ssid, + streamstore.FaultErrorsReader(fault.UnmarshalErrorsTo(&fe)), + errs) + if err != nil { + return nil, nil, err + } + + return &fe, b, nil +} + +// DeleteBackups removes the backups from both the model store and the backup +// storage. +// +// If failOnMissing is true then returns an error if a backup model can't be +// found. Otherwise ignores missing backup models. +// +// Missing models or snapshots during the actual deletion do not cause errors. +// +// All backups are delete as an atomic unit so any failures will result in no +// deletions. +func (r repository) DeleteBackups( + ctx context.Context, + failOnMissing bool, + ids ...string, +) error { + return deleteBackups(ctx, store.NewWrapper(r.modelStore), failOnMissing, ids...) +} + +// deleteBackup handles the processing for backup deletion. +func deleteBackups( + ctx context.Context, + sw store.BackupGetterModelDeleter, + failOnMissing bool, + ids ...string, +) error { + // Although we haven't explicitly stated it, snapshots are technically + // manifests in kopia. This means we can use the same delete API to remove + // them and backup models. Deleting all of them together gives us both + // atomicity guarantees (around when data will be flushed) and helps reduce + // the number of manifest blobs that kopia will create. + var toDelete []manifest.ID + + for _, id := range ids { + b, err := sw.GetBackup(ctx, model.StableID(id)) + if err != nil { + if !failOnMissing && errors.Is(err, data.ErrNotFound) { + continue + } + + return clues.Stack(errWrapper(err)). + WithClues(ctx). + With("delete_backup_id", id) + } + + toDelete = append(toDelete, b.ModelStoreID) + + if len(b.SnapshotID) > 0 { + toDelete = append(toDelete, manifest.ID(b.SnapshotID)) + } + + ssid := b.StreamStoreID + if len(ssid) == 0 { + ssid = b.DetailsID + } + + if len(ssid) > 0 { + toDelete = append(toDelete, manifest.ID(ssid)) + } + } + + return sw.DeleteWithModelStoreIDs(ctx, toDelete...) +} diff --git a/src/pkg/repository/data_providers.go b/src/pkg/repository/data_providers.go new file mode 100644 index 000000000..f95f85b56 --- /dev/null +++ b/src/pkg/repository/data_providers.go @@ -0,0 +1,88 @@ +package repository + +import ( + "context" + "fmt" + + "github.com/alcionai/clues" + + "github.com/alcionai/corso/src/internal/m365" + "github.com/alcionai/corso/src/internal/observe" + "github.com/alcionai/corso/src/internal/operations/inject" + "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/path" +) + +type DataProvider interface { + inject.BackupProducer + inject.ExportConsumer + inject.RestoreConsumer + + VerifyAccess(ctx context.Context) error +} + +type DataProviderConnector interface { + // ConnectDataProvider initializes configurations + // and establishes the client connection with the + // data provider for this operation. + ConnectDataProvider( + ctx context.Context, + pst path.ServiceType, + ) error +} + +func (r *repository) ConnectDataProvider( + ctx context.Context, + pst path.ServiceType, +) error { + var ( + provider DataProvider + err error + ) + + switch r.Account.Provider { + case account.ProviderM365: + provider, err = connectToM365(ctx, *r, pst) + default: + err = clues.New("unrecognized provider").WithClues(ctx) + } + + if err != nil { + return clues.Wrap(err, "connecting data provider") + } + + if err := provider.VerifyAccess(ctx); err != nil { + return clues.Wrap(err, fmt.Sprintf("verifying %s account connection", r.Account.Provider)) + } + + r.Provider = provider + + return nil +} + +func connectToM365( + ctx context.Context, + r repository, + pst path.ServiceType, +) (*m365.Controller, error) { + if r.Provider != nil { + ctrl, ok := r.Provider.(*m365.Controller) + if !ok { + // if the provider is initialized to a non-m365 controller, we should not + // attempt to connnect to m365 afterward. + return nil, clues.New("Attempted to connect to multiple data providers") + } + + return ctrl, nil + } + + progressBar := observe.MessageWithCompletion(ctx, "Connecting to M365") + defer close(progressBar) + + ctrl, err := m365.NewController(ctx, r.Account, pst, r.Opts) + if err != nil { + return nil, clues.Wrap(err, "creating m365 client controller") + } + + return ctrl, nil +} diff --git a/src/pkg/repository/exports.go b/src/pkg/repository/exports.go new file mode 100644 index 000000000..2aadd2bfb --- /dev/null +++ b/src/pkg/repository/exports.go @@ -0,0 +1,40 @@ +package repository + +import ( + "context" + + "github.com/alcionai/corso/src/internal/model" + "github.com/alcionai/corso/src/internal/operations" + "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/selectors" + "github.com/alcionai/corso/src/pkg/store" +) + +type Exporter interface { + NewExport( + ctx context.Context, + backupID string, + sel selectors.Selector, + exportCfg control.ExportConfig, + ) (operations.ExportOperation, error) +} + +// NewExport generates a exportOperation runner. +func (r repository) NewExport( + ctx context.Context, + backupID string, + sel selectors.Selector, + exportCfg control.ExportConfig, +) (operations.ExportOperation, error) { + return operations.NewExportOperation( + ctx, + r.Opts, + r.dataLayer, + store.NewWrapper(r.modelStore), + r.Provider, + r.Account, + model.StableID(backupID), + sel, + exportCfg, + r.Bus) +} diff --git a/src/pkg/repository/loadtest/repository_load_test.go b/src/pkg/repository/loadtest/repository_load_test.go index 9cfc38ffc..d65cb21e1 100644 --- a/src/pkg/repository/loadtest/repository_load_test.go +++ b/src/pkg/repository/loadtest/repository_load_test.go @@ -21,7 +21,6 @@ import ( "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/control" - ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository" ctrlTD "github.com/alcionai/corso/src/pkg/control/testdata" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" @@ -111,7 +110,7 @@ func initM365Repo(t *testing.T) ( repository.NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + err = r.Initialize(ctx, repository.InitConfig{}) require.NoError(t, err, clues.ToCore(err)) return ctx, r, ac, st diff --git a/src/pkg/repository/repository.go b/src/pkg/repository/repository.go index 85abe7c34..283af8e56 100644 --- a/src/pkg/repository/repository.go +++ b/src/pkg/repository/repository.go @@ -6,31 +6,20 @@ import ( "github.com/alcionai/clues" "github.com/google/uuid" - "github.com/kopia/kopia/repo/manifest" "github.com/pkg/errors" "github.com/alcionai/corso/src/internal/common/crash" - "github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/events" "github.com/alcionai/corso/src/internal/kopia" - "github.com/alcionai/corso/src/internal/m365" - "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata" "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/internal/observe" "github.com/alcionai/corso/src/internal/operations" - "github.com/alcionai/corso/src/internal/streamstore" - "github.com/alcionai/corso/src/internal/version" "github.com/alcionai/corso/src/pkg/account" - "github.com/alcionai/corso/src/pkg/backup" - "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/control" ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository" - "github.com/alcionai/corso/src/pkg/count" - "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" - "github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/storage" "github.com/alcionai/corso/src/pkg/store" ) @@ -42,48 +31,24 @@ var ( ErrorBackupNotFound = clues.New("no backup exists with that id") ) -// BackupGetter deals with retrieving metadata about backups from the -// repository. -type BackupGetter interface { - Backup(ctx context.Context, id string) (*backup.Backup, error) - Backups(ctx context.Context, ids []string) ([]*backup.Backup, *fault.Bus) - BackupsByTag(ctx context.Context, fs ...store.FilterOption) ([]*backup.Backup, error) - GetBackupDetails( - ctx context.Context, - backupID string, - ) (*details.Details, *backup.Backup, *fault.Bus) - GetBackupErrors( - ctx context.Context, - backupID string, - ) (*fault.Errors, *backup.Backup, *fault.Bus) -} - type Repositoryer interface { - Initialize(ctx context.Context, retentionOpts ctrlRepo.Retention) error - Connect(ctx context.Context) error + Backuper + BackupGetter + Restorer + Exporter + DataProviderConnector + + Initialize( + ctx context.Context, + cfg InitConfig, + ) error + Connect( + ctx context.Context, + cfg ConnConfig, + ) error GetID() string Close(context.Context) error - NewBackup( - ctx context.Context, - self selectors.Selector, - ) (operations.BackupOperation, error) - NewBackupWithLookup( - ctx context.Context, - self selectors.Selector, - ins idname.Cacher, - ) (operations.BackupOperation, error) - NewRestore( - ctx context.Context, - backupID string, - sel selectors.Selector, - restoreCfg control.RestoreConfig, - ) (operations.RestoreOperation, error) - NewExport( - ctx context.Context, - backupID string, - sel selectors.Selector, - exportCfg control.ExportConfig, - ) (operations.ExportOperation, error) + NewMaintenance( ctx context.Context, mOpts ctrlRepo.Maintenance, @@ -92,14 +57,6 @@ type Repositoryer interface { ctx context.Context, rcOpts ctrlRepo.Retention, ) (operations.RetentionConfigOperation, error) - DeleteBackups(ctx context.Context, failOnMissing bool, ids ...string) error - BackupGetter - // ConnectToM365 establishes graph api connections - // and initializes api client configurations. - ConnectToM365( - ctx context.Context, - pst path.ServiceType, - ) (*m365.Controller, error) } // Repository contains storage provider information. @@ -108,9 +65,10 @@ type repository struct { CreatedAt time.Time Version string // in case of future breaking changes - Account account.Account // the user's m365 account connection details - Storage storage.Storage // the storage provider details and configuration - Opts control.Options + Account account.Account // the user's m365 account connection details + Storage storage.Storage // the storage provider details and configuration + Opts control.Options + Provider DataProvider // the client controller used for external user data CRUD Bus events.Eventer dataLayer *kopia.Wrapper @@ -125,7 +83,7 @@ func (r repository) GetID() string { func New( ctx context.Context, acct account.Account, - s storage.Storage, + st storage.Storage, opts control.Options, configFileRepoID string, ) (singleRepo *repository, err error) { @@ -133,16 +91,16 @@ func New( ctx, "acct_provider", acct.Provider.String(), "acct_id", clues.Hide(acct.ID()), - "storage_provider", s.Provider.String()) + "storage_provider", st.Provider.String()) - bus, err := events.NewBus(ctx, s, acct.ID(), opts) + bus, err := events.NewBus(ctx, st, acct.ID(), opts) if err != nil { return nil, clues.Wrap(err, "constructing event bus").WithClues(ctx) } repoID := configFileRepoID if len(configFileRepoID) == 0 { - repoID = newRepoID(s) + repoID = newRepoID(st) } bus.SetRepoID(repoID) @@ -151,7 +109,7 @@ func New( ID: repoID, Version: "v1", Account: acct, - Storage: s, + Storage: st, Bus: bus, Opts: opts, } @@ -163,17 +121,22 @@ func New( return &r, nil } +type InitConfig struct { + // tells the data provider which service to + // use for its connection pattern. Optional. + Service path.ServiceType + RetentionOpts ctrlRepo.Retention +} + // Initialize will: -// - validate the m365 account & secrets // - connect to the m365 account to ensure communication capability -// - validate the provider config & secrets // - initialize the kopia repo with the provider and retention parameters // - update maintenance retention parameters as needed // - store the configuration details // - connect to the provider func (r *repository) Initialize( ctx context.Context, - retentionOpts ctrlRepo.Retention, + cfg InitConfig, ) (err error) { ctx = clues.Add( ctx, @@ -187,8 +150,14 @@ func (r *repository) Initialize( } }() + if err := r.ConnectDataProvider(ctx, cfg.Service); err != nil { + return clues.Stack(err) + } + + observe.Message(ctx, "Initializing repository") + kopiaRef := kopia.NewConn(r.Storage) - if err := kopiaRef.Initialize(ctx, r.Opts.Repo, retentionOpts); err != nil { + if err := kopiaRef.Initialize(ctx, r.Opts.Repo, cfg.RetentionOpts); err != nil { // replace common internal errors so that sdk users can check results with errors.Is() if errors.Is(err, kopia.ErrorRepoAlreadyExists) { return clues.Stack(ErrorRepoAlreadyExists, err).WithClues(ctx) @@ -219,12 +188,21 @@ func (r *repository) Initialize( return nil } +type ConnConfig struct { + // tells the data provider which service to + // use for its connection pattern. Leave empty + // to skip the provider connection. + Service path.ServiceType +} + // Connect will: -// - validate the m365 account details -// - connect to the m365 account to ensure communication capability +// - connect to the m365 account // - connect to the provider storage // - return the connected repository -func (r *repository) Connect(ctx context.Context) (err error) { +func (r *repository) Connect( + ctx context.Context, + cfg ConnConfig, +) (err error) { ctx = clues.Add( ctx, "acct_provider", r.Account.Provider.String(), @@ -237,8 +215,11 @@ func (r *repository) Connect(ctx context.Context) (err error) { } }() - progressBar := observe.MessageWithCompletion(ctx, "Connecting to repository") - defer close(progressBar) + if err := r.ConnectDataProvider(ctx, cfg.Service); err != nil { + return clues.Stack(err) + } + + observe.Message(ctx, "Connecting to repository") kopiaRef := kopia.NewConn(r.Storage) if err := kopiaRef.Connect(ctx, r.Opts.Repo); err != nil { @@ -296,14 +277,13 @@ func (r *repository) UpdatePassword(ctx context.Context, password string) (err e return clues.Wrap(err, "connecting kopia client") } - if err := kopiaRef.UpdatePassword(ctx, password, r.Opts.Repo); err != nil { + err = kopiaRef.UpdatePassword(ctx, password, r.Opts.Repo) + if err != nil { return clues.Wrap(err, "updating on kopia") } defer kopiaRef.Close(ctx) - r.Bus.Event(ctx, events.RepoUpdate, nil) - return nil } @@ -331,98 +311,6 @@ func (r *repository) Close(ctx context.Context) error { return nil } -// NewBackup generates a BackupOperation runner. -func (r repository) NewBackup( - ctx context.Context, - sel selectors.Selector, -) (operations.BackupOperation, error) { - return r.NewBackupWithLookup(ctx, sel, nil) -} - -// NewBackupWithLookup generates a BackupOperation runner. -// ownerIDToName and ownerNameToID are optional populations, in case the caller has -// already generated those values. -func (r repository) NewBackupWithLookup( - ctx context.Context, - sel selectors.Selector, - ins idname.Cacher, -) (operations.BackupOperation, error) { - ctrl, err := connectToM365(ctx, sel.PathService(), r.Account, r.Opts) - if err != nil { - return operations.BackupOperation{}, clues.Wrap(err, "connecting to m365") - } - - ownerID, ownerName, err := ctrl.PopulateProtectedResourceIDAndName(ctx, sel.DiscreteOwner, ins) - if err != nil { - return operations.BackupOperation{}, clues.Wrap(err, "resolving resource owner details") - } - - // TODO: retrieve display name from gc - sel = sel.SetDiscreteOwnerIDName(ownerID, ownerName) - - return operations.NewBackupOperation( - ctx, - r.Opts, - r.dataLayer, - store.NewWrapper(r.modelStore), - ctrl, - r.Account, - sel, - sel, // the selector acts as an IDNamer for its discrete resource owner. - r.Bus) -} - -// NewExport generates a exportOperation runner. -func (r repository) NewExport( - ctx context.Context, - backupID string, - sel selectors.Selector, - exportCfg control.ExportConfig, -) (operations.ExportOperation, error) { - ctrl, err := connectToM365(ctx, sel.PathService(), r.Account, r.Opts) - if err != nil { - return operations.ExportOperation{}, clues.Wrap(err, "connecting to m365") - } - - return operations.NewExportOperation( - ctx, - r.Opts, - r.dataLayer, - store.NewWrapper(r.modelStore), - ctrl, - r.Account, - model.StableID(backupID), - sel, - exportCfg, - r.Bus) -} - -// NewRestore generates a restoreOperation runner. -func (r repository) NewRestore( - ctx context.Context, - backupID string, - sel selectors.Selector, - restoreCfg control.RestoreConfig, -) (operations.RestoreOperation, error) { - ctrl, err := connectToM365(ctx, sel.PathService(), r.Account, r.Opts) - if err != nil { - return operations.RestoreOperation{}, clues.Wrap(err, "connecting to m365") - } - - return operations.NewRestoreOperation( - ctx, - r.Opts, - r.dataLayer, - store.NewWrapper(r.modelStore), - ctrl, - r.Account, - model.StableID(backupID), - sel, - restoreCfg, - r.Bus, - count.New()) -} - func (r repository) NewMaintenance( ctx context.Context, mOpts ctrlRepo.Maintenance, @@ -448,280 +336,6 @@ func (r repository) NewRetentionConfig( r.Bus) } -// Backup retrieves a backup by id. -func (r repository) Backup(ctx context.Context, id string) (*backup.Backup, error) { - return getBackup(ctx, id, store.NewWrapper(r.modelStore)) -} - -// getBackup handles the processing for Backup. -func getBackup( - ctx context.Context, - id string, - sw store.BackupGetter, -) (*backup.Backup, error) { - b, err := sw.GetBackup(ctx, model.StableID(id)) - if err != nil { - return nil, errWrapper(err) - } - - return b, nil -} - -// Backups lists backups by ID. Returns as many backups as possible with -// errors for the backups it was unable to retrieve. -func (r repository) Backups(ctx context.Context, ids []string) ([]*backup.Backup, *fault.Bus) { - var ( - bups []*backup.Backup - errs = fault.New(false) - sw = store.NewWrapper(r.modelStore) - ) - - for _, id := range ids { - ictx := clues.Add(ctx, "backup_id", id) - - b, err := sw.GetBackup(ictx, model.StableID(id)) - if err != nil { - errs.AddRecoverable(ctx, errWrapper(err)) - } - - bups = append(bups, b) - } - - return bups, errs -} - -// BackupsByTag lists all backups in a repository that contain all the tags -// specified. -func (r repository) BackupsByTag(ctx context.Context, fs ...store.FilterOption) ([]*backup.Backup, error) { - sw := store.NewWrapper(r.modelStore) - return backupsByTag(ctx, sw, fs) -} - -// backupsByTag returns all backups matching all provided tags. -// -// TODO(ashmrtn): This exists mostly for testing, but we could restructure the -// code in this file so there's a more elegant mocking solution. -func backupsByTag( - ctx context.Context, - sw store.BackupWrapper, - fs []store.FilterOption, -) ([]*backup.Backup, error) { - bs, err := sw.GetBackups(ctx, fs...) - if err != nil { - return nil, clues.Stack(err) - } - - // Filter out assist backup bases as they're considered incomplete and we - // haven't been displaying them before now. - res := make([]*backup.Backup, 0, len(bs)) - - for _, b := range bs { - if t := b.Tags[model.BackupTypeTag]; t != model.AssistBackup { - res = append(res, b) - } - } - - return res, nil -} - -// BackupDetails returns the specified backup.Details -func (r repository) GetBackupDetails( - ctx context.Context, - backupID string, -) (*details.Details, *backup.Backup, *fault.Bus) { - errs := fault.New(false) - - deets, bup, err := getBackupDetails( - ctx, - backupID, - r.Account.ID(), - r.dataLayer, - store.NewWrapper(r.modelStore), - errs) - - return deets, bup, errs.Fail(err) -} - -// getBackupDetails handles the processing for GetBackupDetails. -func getBackupDetails( - ctx context.Context, - backupID, tenantID string, - kw *kopia.Wrapper, - sw store.BackupGetter, - errs *fault.Bus, -) (*details.Details, *backup.Backup, error) { - b, err := sw.GetBackup(ctx, model.StableID(backupID)) - if err != nil { - return nil, nil, errWrapper(err) - } - - ssid := b.StreamStoreID - if len(ssid) == 0 { - ssid = b.DetailsID - } - - if len(ssid) == 0 { - return nil, b, clues.New("no streamstore id in backup").WithClues(ctx) - } - - var ( - sstore = streamstore.NewStreamer(kw, tenantID, b.Selector.PathService()) - deets details.Details - ) - - err = sstore.Read( - ctx, - ssid, - streamstore.DetailsReader(details.UnmarshalTo(&deets)), - errs) - if err != nil { - return nil, nil, err - } - - // Retroactively fill in isMeta information for items in older - // backup versions without that info - // version.Restore2 introduces the IsMeta flag, so only v1 needs a check. - if b.Version >= version.OneDrive1DataAndMetaFiles && b.Version < version.OneDrive3IsMetaMarker { - for _, d := range deets.Entries { - if d.OneDrive != nil { - d.OneDrive.IsMeta = metadata.HasMetaSuffix(d.RepoRef) - } - } - } - - deets.DetailsModel = deets.FilterMetaFiles() - - return &deets, b, nil -} - -// BackupErrors returns the specified backup's fault.Errors -func (r repository) GetBackupErrors( - ctx context.Context, - backupID string, -) (*fault.Errors, *backup.Backup, *fault.Bus) { - errs := fault.New(false) - - fe, bup, err := getBackupErrors( - ctx, - backupID, - r.Account.ID(), - r.dataLayer, - store.NewWrapper(r.modelStore), - errs) - - return fe, bup, errs.Fail(err) -} - -// getBackupErrors handles the processing for GetBackupErrors. -func getBackupErrors( - ctx context.Context, - backupID, tenantID string, - kw *kopia.Wrapper, - sw store.BackupGetter, - errs *fault.Bus, -) (*fault.Errors, *backup.Backup, error) { - b, err := sw.GetBackup(ctx, model.StableID(backupID)) - if err != nil { - return nil, nil, errWrapper(err) - } - - ssid := b.StreamStoreID - if len(ssid) == 0 { - return nil, b, clues.New("missing streamstore id in backup").WithClues(ctx) - } - - var ( - sstore = streamstore.NewStreamer(kw, tenantID, b.Selector.PathService()) - fe fault.Errors - ) - - err = sstore.Read( - ctx, - ssid, - streamstore.FaultErrorsReader(fault.UnmarshalErrorsTo(&fe)), - errs) - if err != nil { - return nil, nil, err - } - - return &fe, b, nil -} - -// DeleteBackups removes the backups from both the model store and the backup -// storage. -// -// If failOnMissing is true then returns an error if a backup model can't be -// found. Otherwise ignores missing backup models. -// -// Missing models or snapshots during the actual deletion do not cause errors. -// -// All backups are delete as an atomic unit so any failures will result in no -// deletions. -func (r repository) DeleteBackups( - ctx context.Context, - failOnMissing bool, - ids ...string, -) error { - return deleteBackups(ctx, store.NewWrapper(r.modelStore), failOnMissing, ids...) -} - -// deleteBackup handles the processing for backup deletion. -func deleteBackups( - ctx context.Context, - sw store.BackupGetterModelDeleter, - failOnMissing bool, - ids ...string, -) error { - // Although we haven't explicitly stated it, snapshots are technically - // manifests in kopia. This means we can use the same delete API to remove - // them and backup models. Deleting all of them together gives us both - // atomicity guarantees (around when data will be flushed) and helps reduce - // the number of manifest blobs that kopia will create. - var toDelete []manifest.ID - - for _, id := range ids { - b, err := sw.GetBackup(ctx, model.StableID(id)) - if err != nil { - if !failOnMissing && errors.Is(err, data.ErrNotFound) { - continue - } - - return clues.Stack(errWrapper(err)). - WithClues(ctx). - With("delete_backup_id", id) - } - - toDelete = append(toDelete, b.ModelStoreID) - - if len(b.SnapshotID) > 0 { - toDelete = append(toDelete, manifest.ID(b.SnapshotID)) - } - - ssid := b.StreamStoreID - if len(ssid) == 0 { - ssid = b.DetailsID - } - - if len(ssid) > 0 { - toDelete = append(toDelete, manifest.ID(ssid)) - } - } - - return sw.DeleteWithModelStoreIDs(ctx, toDelete...) -} - -func (r repository) ConnectToM365( - ctx context.Context, - pst path.ServiceType, -) (*m365.Controller, error) { - ctrl, err := connectToM365(ctx, pst, r.Account, r.Opts) - if err != nil { - return nil, clues.Wrap(err, "connecting to m365") - } - - return ctrl, nil -} - // --------------------------------------------------------------------------- // Repository ID Model // --------------------------------------------------------------------------- @@ -770,29 +384,6 @@ func newRepoID(s storage.Storage) string { // helpers // --------------------------------------------------------------------------- -var m365nonce bool - -func connectToM365( - ctx context.Context, - pst path.ServiceType, - acct account.Account, - co control.Options, -) (*m365.Controller, error) { - if !m365nonce { - m365nonce = true - - progressBar := observe.MessageWithCompletion(ctx, "Connecting to M365") - defer close(progressBar) - } - - ctrl, err := m365.NewController(ctx, acct, pst, co) - if err != nil { - return nil, err - } - - return ctrl, nil -} - func errWrapper(err error) error { if errors.Is(err, data.ErrNotFound) { return clues.Stack(ErrorBackupNotFound, err) diff --git a/src/pkg/repository/repository_test.go b/src/pkg/repository/repository_test.go index 83c60fc77..b76f2bb2c 100644 --- a/src/pkg/repository/repository_test.go +++ b/src/pkg/repository/repository_test.go @@ -17,6 +17,7 @@ import ( ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository" "github.com/alcionai/corso/src/pkg/control/testdata" "github.com/alcionai/corso/src/pkg/extensions" + "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/services/m365/api" "github.com/alcionai/corso/src/pkg/storage" @@ -69,7 +70,7 @@ func (suite *RepositoryUnitSuite) TestInitialize() { NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + err = r.Initialize(ctx, InitConfig{}) test.errCheck(t, err, clues.ToCore(err)) }) } @@ -85,12 +86,12 @@ func (suite *RepositoryUnitSuite) TestConnect() { errCheck assert.ErrorAssertionFunc }{ { - storage.ProviderUnknown.String(), - func() (storage.Storage, error) { + name: storage.ProviderUnknown.String(), + storage: func() (storage.Storage, error) { return storage.NewStorage(storage.ProviderUnknown) }, - account.Account{}, - assert.Error, + account: account.Account{}, + errCheck: assert.Error, }, } for _, test := range table { @@ -111,7 +112,7 @@ func (suite *RepositoryUnitSuite) TestConnect() { NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Connect(ctx) + err = r.Connect(ctx, ConnConfig{}) test.errCheck(t, err, clues.ToCore(err)) }) } @@ -136,12 +137,13 @@ func TestRepositoryIntegrationSuite(t *testing.T) { func (suite *RepositoryIntegrationSuite) TestInitialize() { table := []struct { name string - account account.Account + account func(*testing.T) account.Account storage func(tester.TestT) storage.Storage errCheck assert.ErrorAssertionFunc }{ { name: "success", + account: tconfig.NewM365Account, storage: storeTD.NewPrefixedS3Storage, errCheck: assert.NoError, }, @@ -156,13 +158,13 @@ func (suite *RepositoryIntegrationSuite) TestInitialize() { st := test.storage(t) r, err := New( ctx, - test.account, + test.account(t), st, control.DefaultOptions(), NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + err = r.Initialize(ctx, InitConfig{}) if err == nil { defer func() { err := r.Close(ctx) @@ -204,7 +206,7 @@ func (suite *RepositoryIntegrationSuite) TestInitializeWithRole() { NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + err = r.Initialize(ctx, InitConfig{}) require.NoError(t, err) defer func() { @@ -218,21 +220,23 @@ func (suite *RepositoryIntegrationSuite) TestConnect() { ctx, flush := tester.NewContext(t) defer flush() + acct := tconfig.NewM365Account(t) + // need to initialize the repository before we can test connecting to it. st := storeTD.NewPrefixedS3Storage(t) r, err := New( ctx, - account.Account{}, + acct, st, control.DefaultOptions(), NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + err = r.Initialize(ctx, InitConfig{}) require.NoError(t, err, clues.ToCore(err)) // now re-connect - err = r.Connect(ctx) + err = r.Connect(ctx, ConnConfig{}) assert.NoError(t, err, clues.ToCore(err)) } @@ -242,29 +246,36 @@ func (suite *RepositoryIntegrationSuite) TestRepository_UpdatePassword() { ctx, flush := tester.NewContext(t) defer flush() + acct := tconfig.NewM365Account(t) + // need to initialize the repository before we can test connecting to it. st := storeTD.NewPrefixedS3Storage(t) r, err := New( ctx, - account.Account{}, + acct, st, control.DefaultOptions(), NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + err = r.Initialize(ctx, InitConfig{}) require.NoError(t, err, clues.ToCore(err)) // now re-connect - err = r.Connect(ctx) + err = r.Connect(ctx, ConnConfig{}) assert.NoError(t, err, clues.ToCore(err)) err = r.UpdatePassword(ctx, "newpass") require.NoError(t, err, clues.ToCore(err)) + tmp := st.Config["common_corsoPassphrase"] + st.Config["common_corsoPassphrase"] = "newpass" + // now reconnect with new pass - err = r.Connect(ctx) - assert.Error(t, err, clues.ToCore(err)) + err = r.Connect(ctx, ConnConfig{}) + assert.NoError(t, err, clues.ToCore(err)) + + st.Config["common_corsoPassphrase"] = tmp } func (suite *RepositoryIntegrationSuite) TestConnect_sameID() { @@ -273,17 +284,19 @@ func (suite *RepositoryIntegrationSuite) TestConnect_sameID() { ctx, flush := tester.NewContext(t) defer flush() + acct := tconfig.NewM365Account(t) + // need to initialize the repository before we can test connecting to it. st := storeTD.NewPrefixedS3Storage(t) r, err := New( ctx, - account.Account{}, + acct, st, control.DefaultOptions(), NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + err = r.Initialize(ctx, InitConfig{}) require.NoError(t, err, clues.ToCore(err)) oldID := r.GetID() @@ -292,7 +305,7 @@ func (suite *RepositoryIntegrationSuite) TestConnect_sameID() { require.NoError(t, err, clues.ToCore(err)) // now re-connect - err = r.Connect(ctx) + err = r.Connect(ctx, ConnConfig{}) require.NoError(t, err, clues.ToCore(err)) assert.Equal(t, oldID, r.GetID()) } @@ -315,7 +328,8 @@ func (suite *RepositoryIntegrationSuite) TestNewBackup() { NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + // service doesn't matter here, we just need a valid value. + err = r.Initialize(ctx, InitConfig{Service: path.ExchangeService}) require.NoError(t, err, clues.ToCore(err)) userID := tconfig.M365UserID(t) @@ -344,7 +358,7 @@ func (suite *RepositoryIntegrationSuite) TestNewRestore() { "") require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + err = r.Initialize(ctx, InitConfig{}) require.NoError(t, err, clues.ToCore(err)) ro, err := r.NewRestore( @@ -374,7 +388,8 @@ func (suite *RepositoryIntegrationSuite) TestNewBackupAndDelete() { NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + // service doesn't matter here, we just need a valid value. + err = r.Initialize(ctx, InitConfig{Service: path.ExchangeService}) require.NoError(t, err, clues.ToCore(err)) userID := tconfig.M365UserID(t) @@ -427,7 +442,7 @@ func (suite *RepositoryIntegrationSuite) TestNewMaintenance() { NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + err = r.Initialize(ctx, InitConfig{}) require.NoError(t, err, clues.ToCore(err)) mo, err := r.NewMaintenance(ctx, ctrlRepo.Maintenance{}) @@ -496,11 +511,11 @@ func (suite *RepositoryIntegrationSuite) Test_Options() { NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + err = r.Initialize(ctx, InitConfig{}) require.NoError(t, err) assert.Equal(t, test.expectedLen, len(r.Opts.ItemExtensionFactory)) - err = r.Connect(ctx) + err = r.Connect(ctx, ConnConfig{}) assert.NoError(t, err) assert.Equal(t, test.expectedLen, len(r.Opts.ItemExtensionFactory)) }) diff --git a/src/pkg/repository/restores.go b/src/pkg/repository/restores.go new file mode 100644 index 000000000..6fe121e76 --- /dev/null +++ b/src/pkg/repository/restores.go @@ -0,0 +1,42 @@ +package repository + +import ( + "context" + + "github.com/alcionai/corso/src/internal/model" + "github.com/alcionai/corso/src/internal/operations" + "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/count" + "github.com/alcionai/corso/src/pkg/selectors" + "github.com/alcionai/corso/src/pkg/store" +) + +type Restorer interface { + NewRestore( + ctx context.Context, + backupID string, + sel selectors.Selector, + restoreCfg control.RestoreConfig, + ) (operations.RestoreOperation, error) +} + +// NewRestore generates a restoreOperation runner. +func (r repository) NewRestore( + ctx context.Context, + backupID string, + sel selectors.Selector, + restoreCfg control.RestoreConfig, +) (operations.RestoreOperation, error) { + return operations.NewRestoreOperation( + ctx, + r.Opts, + r.dataLayer, + store.NewWrapper(r.modelStore), + r.Provider, + r.Account, + model.StableID(backupID), + sel, + restoreCfg, + r.Bus, + count.New()) +} diff --git a/src/pkg/services/m365/api/access.go b/src/pkg/services/m365/api/access.go new file mode 100644 index 000000000..956f9db05 --- /dev/null +++ b/src/pkg/services/m365/api/access.go @@ -0,0 +1,68 @@ +package api + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/alcionai/clues" + + "github.com/alcionai/corso/src/internal/m365/graph" +) + +// --------------------------------------------------------------------------- +// controller +// --------------------------------------------------------------------------- + +func (c Client) Access() Access { + return Access{c} +} + +// Access is an interface-compliant provider of the client. +type Access struct { + Client +} + +// GetToken retrieves a m365 application auth token using client id and secret credentials. +// This token is not normally needed in order for corso to function, and is implemented +// primarily as a way to exercise the validity of those credentials without need of specific +// permissions. +func (c Access) GetToken( + ctx context.Context, +) error { + var ( + //nolint:lll + // https://learn.microsoft.com/en-us/graph/connecting-external-content-connectors-api-postman#step-5-get-an-authentication-token + rawURL = fmt.Sprintf( + "https://login.microsoftonline.com/%s/oauth2/v2.0/token", + c.Credentials.AzureTenantID) + headers = map[string]string{ + "Content-Type": "application/x-www-form-urlencoded", + } + body = strings.NewReader(fmt.Sprintf( + "client_id=%s"+ + "&client_secret=%s"+ + "&scope=https://graph.microsoft.com/.default"+ + "&grant_type=client_credentials", + c.Credentials.AzureClientID, + c.Credentials.AzureClientSecret)) + ) + + resp, err := c.Post(ctx, rawURL, headers, body) + if err != nil { + return graph.Stack(ctx, err) + } + + if resp.StatusCode == http.StatusBadRequest { + return clues.New("incorrect tenant or application parameters") + } + + if resp.StatusCode/100 == 4 || resp.StatusCode/100 == 5 { + return clues.New("non-2xx response: " + resp.Status) + } + + defer resp.Body.Close() + + return nil +} diff --git a/src/pkg/services/m365/api/access_test.go b/src/pkg/services/m365/api/access_test.go new file mode 100644 index 000000000..c903fcde1 --- /dev/null +++ b/src/pkg/services/m365/api/access_test.go @@ -0,0 +1,122 @@ +package api_test + +import ( + "testing" + + "github.com/alcionai/clues" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/internal/tester/tconfig" + "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/services/m365/api" +) + +type AccessAPIIntgSuite struct { + tester.Suite + its intgTesterSetup +} + +func TestAccessAPIIntgSuite(t *testing.T) { + suite.Run(t, &AccessAPIIntgSuite{ + Suite: tester.NewIntegrationSuite( + t, + [][]string{tconfig.M365AcctCredEnvs}), + }) +} + +func (suite *AccessAPIIntgSuite) SetupSuite() { + suite.its = newIntegrationTesterSetup(suite.T()) +} + +func (suite *AccessAPIIntgSuite) TestGetToken() { + tests := []struct { + name string + creds func() account.M365Config + expectErr require.ErrorAssertionFunc + }{ + { + name: "good", + creds: func() account.M365Config { return suite.its.ac.Credentials }, + expectErr: require.NoError, + }, + { + name: "bad tenant ID", + creds: func() account.M365Config { + creds := suite.its.ac.Credentials + creds.AzureTenantID = "ZIM" + + return creds + }, + expectErr: require.Error, + }, + { + name: "missing tenant ID", + creds: func() account.M365Config { + creds := suite.its.ac.Credentials + creds.AzureTenantID = "" + + return creds + }, + expectErr: require.Error, + }, + { + name: "bad client ID", + creds: func() account.M365Config { + creds := suite.its.ac.Credentials + creds.AzureClientID = "GIR" + + return creds + }, + expectErr: require.Error, + }, + { + name: "missing client ID", + creds: func() account.M365Config { + creds := suite.its.ac.Credentials + creds.AzureClientID = "" + + return creds + }, + expectErr: require.Error, + }, + { + name: "bad client secret", + creds: func() account.M365Config { + creds := suite.its.ac.Credentials + creds.AzureClientSecret = "MY TALLEST" + + return creds + }, + expectErr: require.Error, + }, + { + name: "missing client secret", + creds: func() account.M365Config { + creds := suite.its.ac.Credentials + creds.AzureClientSecret = "" + + return creds + }, + expectErr: require.Error, + }, + } + for _, test := range tests { + suite.Run(test.name, func() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + ac, err := api.NewClient(suite.its.ac.Credentials, control.DefaultOptions()) + require.NoError(t, err, clues.ToCore(err)) + + ac.Credentials = test.creds() + + err = ac.Access().GetToken(ctx) + test.expectErr(t, err, clues.ToCore(err)) + }) + } +} diff --git a/src/pkg/services/m365/api/client.go b/src/pkg/services/m365/api/client.go index 64b00f3dd..a0d90eb46 100644 --- a/src/pkg/services/m365/api/client.go +++ b/src/pkg/services/m365/api/client.go @@ -2,6 +2,7 @@ package api import ( "context" + "io" "net/http" "github.com/alcionai/clues" @@ -24,7 +25,7 @@ import ( type Client struct { Credentials account.M365Config - // The Stable service is re-usable for any non-paged request. + // The Stable service is re-usable for any request. // This allows us to maintain performance across async requests. Stable graph.Servicer @@ -119,6 +120,16 @@ func (c Client) Get( return c.Requester.Request(ctx, http.MethodGet, url, nil, headers) } +// Get performs an ad-hoc get request using its graph.Requester +func (c Client) Post( + ctx context.Context, + url string, + headers map[string]string, + body io.Reader, +) (*http.Response, error) { + return c.Requester.Request(ctx, http.MethodGet, url, body, headers) +} + // --------------------------------------------------------------------------- // per-call config // --------------------------------------------------------------------------- @@ -126,3 +137,15 @@ func (c Client) Get( type CallConfig struct { Expand []string } + +// --------------------------------------------------------------------------- +// common interfaces +// --------------------------------------------------------------------------- + +type GetByIDer[T any] interface { + GetByID( + ctx context.Context, + identifier string, + cc CallConfig, + ) (T, error) +} diff --git a/src/pkg/services/m365/api/drive.go b/src/pkg/services/m365/api/drive.go index e40d7497a..4c3b9b312 100644 --- a/src/pkg/services/m365/api/drive.go +++ b/src/pkg/services/m365/api/drive.go @@ -84,6 +84,26 @@ func (c Drives) GetRootFolder( return root, nil } +// TODO: pagination controller needed for completion. +func (c Drives) GetFolderChildren( + ctx context.Context, + driveID, folderID string, +) ([]models.DriveItemable, error) { + response, err := c.Stable. + Client(). + Drives(). + ByDriveId(driveID). + Items(). + ByDriveItemId(folderID). + Children(). + Get(ctx, nil) + if err != nil { + return nil, graph.Wrap(ctx, err, "getting folder children") + } + + return response.GetValue(), nil +} + // --------------------------------------------------------------------------- // Items // --------------------------------------------------------------------------- diff --git a/src/pkg/services/m365/api/groups.go b/src/pkg/services/m365/api/groups.go index 73beb3d2b..b6223dbdd 100644 --- a/src/pkg/services/m365/api/groups.go +++ b/src/pkg/services/m365/api/groups.go @@ -3,6 +3,8 @@ package api import ( "context" "fmt" + "net/url" + "strings" "github.com/alcionai/clues" msgraphgocore "github.com/microsoftgraph/msgraph-sdk-go-core" @@ -102,6 +104,7 @@ const filterGroupByDisplayNameQueryTmpl = "displayName eq '%s'" func (c Groups) GetByID( ctx context.Context, identifier string, + _ CallConfig, // matching standards ) (models.Groupable, error) { service, err := c.Service() if err != nil { @@ -153,6 +156,88 @@ func (c Groups) GetByID( return group, nil } +// GetAllSites gets all the sites that belong to a group. This is +// necessary as private and shared channels gets their on individual +// sites. All the other channels make use of the root site. +func (c Groups) GetAllSites( + ctx context.Context, + identifier string, + errs *fault.Bus, +) ([]models.Siteable, error) { + el := errs.Local() + + root, err := c.GetRootSite(ctx, identifier) + if err != nil { + return nil, clues.Wrap(err, "getting root site"). + With("group_id", identifier) + } + + sites := []models.Siteable{root} + + channels, err := Channels(c).GetChannels(ctx, identifier) + if err != nil { + return nil, clues.Wrap(err, "getting channels") + } + + service, err := c.Service() + if err != nil { + return nil, graph.Stack(ctx, err) + } + + for _, ch := range channels { + if ptr.Val(ch.GetMembershipType()) == models.STANDARD_CHANNELMEMBERSHIPTYPE { + // Standard channels use root site + continue + } + + ictx := clues.Add( + ctx, + "channel_id", + ptr.Val(ch.GetId()), + "channel_name", + clues.Hide(ptr.Val(ch.GetDisplayName()))) + + resp, err := service. + Client(). + Teams(). + ByTeamId(identifier). + Channels(). + ByChannelId(ptr.Val(ch.GetId())). + FilesFolder(). + Get(ictx, nil) + if err != nil { + return nil, clues.Wrap(err, "getting files folder for channel"). + WithClues(ictx) + } + + // WebURL returned here is the url to the documents folder, we + // have to trim that out to get the actual site's webURL + // https://example.sharepoint.com/sites//Shared%20Documents/ + documentWebURL := ptr.Val(resp.GetWebUrl()) + + u, err := url.Parse(documentWebURL) + if err != nil { + return nil, clues.Wrap(err, "parsing document web url"). + WithClues(ictx) + } + + pathSegments := strings.Split(u.Path, "/") // pathSegments[0] == "" + siteWebURL := fmt.Sprintf("%s://%s/%s/%s", u.Scheme, u.Host, pathSegments[1], pathSegments[2]) + + ictx = clues.Add(ictx, "document_web_url", documentWebURL, "site_web_url", siteWebURL) + + site, err := Sites(c).GetByID(ictx, siteWebURL, CallConfig{}) + if err != nil { + el.AddRecoverable(ctx, clues.Wrap(err, "getting site")) + continue + } + + sites = append(sites, site) + } + + return sites, el.Failure() +} + func (c Groups) GetRootSite( ctx context.Context, identifier string, @@ -170,7 +255,7 @@ func (c Groups) GetRootSite( BySiteId("root"). Get(ctx, nil) if err != nil { - return nil, clues.Wrap(err, "getting root site for group") + return nil, graph.Stack(ctx, err) } return resp, graph.Stack(ctx, err).OrNil() @@ -234,9 +319,9 @@ func IsTeam(ctx context.Context, mg models.Groupable) bool { func (c Groups) GetIDAndName( ctx context.Context, groupID string, - _ CallConfig, // not currently supported + cc CallConfig, ) (string, string, error) { - s, err := c.GetByID(ctx, groupID) + s, err := c.GetByID(ctx, groupID, cc) if err != nil { return "", "", err } diff --git a/src/pkg/services/m365/api/groups_test.go b/src/pkg/services/m365/api/groups_test.go index c00b64a13..213bb5d81 100644 --- a/src/pkg/services/m365/api/groups_test.go +++ b/src/pkg/services/m365/api/groups_test.go @@ -110,6 +110,33 @@ func (suite *GroupsIntgSuite) TestGetAll() { require.NotZero(t, len(groups), "must have at least one group") } +func (suite *GroupsIntgSuite) TestGetAllSites() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + channels, err := suite.its.ac. + Channels().GetChannels(ctx, suite.its.group.id) + require.NoError(t, err, "getting channels") + require.NotZero(t, len(channels), "must have at least one channel") + + siteCount := 1 + + for _, c := range channels { + if ptr.Val(c.GetMembershipType()) != models.STANDARD_CHANNELMEMBERSHIPTYPE { + siteCount++ + } + } + + sites, err := suite.its.ac. + Groups(). + GetAllSites(ctx, suite.its.group.id, fault.New(true)) + require.NoError(t, err) + require.NotZero(t, len(sites), "must have at least one site") + require.Equal(t, siteCount, len(sites), "incorrect number of sites") +} + func (suite *GroupsIntgSuite) TestGroups_GetByID() { t := suite.T() @@ -121,7 +148,7 @@ func (suite *GroupsIntgSuite) TestGroups_GetByID() { groupsAPI = suite.its.ac.Groups() ) - grp, err := groupsAPI.GetByID(ctx, groupID) + grp, err := groupsAPI.GetByID(ctx, groupID, api.CallConfig{}) require.NoError(t, err, clues.ToCore(err)) table := []struct { @@ -157,7 +184,7 @@ func (suite *GroupsIntgSuite) TestGroups_GetByID() { ctx, flush := tester.NewContext(t) defer flush() - _, err := groupsAPI.GetByID(ctx, test.id) + _, err := groupsAPI.GetByID(ctx, test.id, api.CallConfig{}) test.expectErr(t, err, clues.ToCore(err)) }) } diff --git a/src/pkg/services/m365/api/mail.go b/src/pkg/services/m365/api/mail.go index 63c3684dd..59ad150ac 100644 --- a/src/pkg/services/m365/api/mail.go +++ b/src/pkg/services/m365/api/mail.go @@ -223,6 +223,26 @@ func (c Mail) PatchFolder( return nil } +// TODO: needs pager implementation for completion +func (c Mail) GetContainerChildren( + ctx context.Context, + userID, containerID string, +) ([]models.MailFolderable, error) { + resp, err := c.Stable. + Client(). + Users(). + ByUserId(userID). + MailFolders(). + ByMailFolderId(containerID). + ChildFolders(). + Get(ctx, nil) + if err != nil { + return nil, graph.Wrap(ctx, err, "getting container child folders") + } + + return resp.GetValue(), nil +} + // --------------------------------------------------------------------------- // items // --------------------------------------------------------------------------- diff --git a/src/pkg/services/m365/api/sites.go b/src/pkg/services/m365/api/sites.go index 0865a4f47..813f1c1fa 100644 --- a/src/pkg/services/m365/api/sites.go +++ b/src/pkg/services/m365/api/sites.go @@ -142,6 +142,8 @@ func (c Sites) GetByID( options.QueryParameters.Expand = cc.Expand } + // NOTE: `/sites` sends `displayName` as name, but + // `/sites/` send base of `webURL` as name resp, err = c.Stable. Client(). Sites(). diff --git a/src/pkg/services/m365/groups.go b/src/pkg/services/m365/groups.go index a32195c1c..5255620a7 100644 --- a/src/pkg/services/m365/groups.go +++ b/src/pkg/services/m365/groups.go @@ -28,6 +28,27 @@ type Group struct { IsTeam bool } +// GroupByID retrieves a specific group. +func GroupByID( + ctx context.Context, + acct account.Account, + id string, +) (*Group, error) { + ac, err := makeAC(ctx, acct, path.GroupsService) + if err != nil { + return nil, clues.Stack(err).WithClues(ctx) + } + + cc := api.CallConfig{} + + g, err := ac.Groups().GetByID(ctx, id, cc) + if err != nil { + return nil, clues.Stack(err) + } + + return parseGroup(ctx, g) +} + // GroupsCompat returns a list of groups in the specified M365 tenant. func GroupsCompat(ctx context.Context, acct account.Account) ([]*Group, error) { errs := fault.New(true) diff --git a/src/pkg/services/m365/groups_test.go b/src/pkg/services/m365/groups_test.go index 7c2cd4183..02091d42b 100644 --- a/src/pkg/services/m365/groups_test.go +++ b/src/pkg/services/m365/groups_test.go @@ -41,6 +41,24 @@ func (suite *GroupsIntgSuite) SetupSuite() { suite.acct = tconfig.NewM365Account(t) } +func (suite *GroupsIntgSuite) TestGroupByID() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + graph.InitializeConcurrencyLimiter(ctx, true, 4) + + gid := tconfig.M365TeamID(t) + + group, err := m365.GroupByID(ctx, suite.acct, gid) + require.NoError(t, err, clues.ToCore(err)) + require.NotNil(t, group) + + assert.Equal(t, gid, group.ID, "must match expected id") + assert.NotEmpty(t, group.DisplayName) +} + func (suite *GroupsIntgSuite) TestGroups() { t := suite.T() diff --git a/src/pkg/storage/common_test.go b/src/pkg/storage/common_test.go index 02668e611..e8b4a89ba 100644 --- a/src/pkg/storage/common_test.go +++ b/src/pkg/storage/common_test.go @@ -7,16 +7,17 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" + "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/credentials" "github.com/alcionai/corso/src/pkg/storage" ) -type CommonCfgSuite struct { - suite.Suite +type CommonCfgUnitSuite struct { + tester.Suite } -func TestCommonCfgSuite(t *testing.T) { - suite.Run(t, new(CommonCfgSuite)) +func TestCommonCfgUnitSuite(t *testing.T) { + suite.Run(t, &CommonCfgUnitSuite{Suite: tester.NewUnitSuite(t)}) } var goodCommonConfig = storage.CommonConfig{ @@ -25,7 +26,7 @@ var goodCommonConfig = storage.CommonConfig{ }, } -func (suite *CommonCfgSuite) TestCommonConfig_Config() { +func (suite *CommonCfgUnitSuite) TestCommonConfig_Config() { cfg := goodCommonConfig c, err := cfg.StringConfig() assert.NoError(suite.T(), err, clues.ToCore(err)) @@ -43,7 +44,7 @@ func (suite *CommonCfgSuite) TestCommonConfig_Config() { } } -func (suite *CommonCfgSuite) TestStorage_CommonConfig() { +func (suite *CommonCfgUnitSuite) TestStorage_CommonConfig() { t := suite.T() in := goodCommonConfig @@ -55,7 +56,7 @@ func (suite *CommonCfgSuite) TestStorage_CommonConfig() { assert.Equal(t, in.CorsoPassphrase, out.CorsoPassphrase) } -func (suite *CommonCfgSuite) TestStorage_CommonConfig_InvalidCases() { +func (suite *CommonCfgUnitSuite) TestStorage_CommonConfig_InvalidCases() { // missing required properties table := []struct { name string diff --git a/src/pkg/storage/filesystem.go b/src/pkg/storage/filesystem.go index ca4cfe098..08dacc62c 100644 --- a/src/pkg/storage/filesystem.go +++ b/src/pkg/storage/filesystem.go @@ -20,6 +20,10 @@ type FilesystemConfig struct { Path string } +func (s Storage) ToFilesystemConfig() (*FilesystemConfig, error) { + return buildFilesystemConfigFromMap(s.Config) +} + func buildFilesystemConfigFromMap(config map[string]string) (*FilesystemConfig, error) { c := &FilesystemConfig{} @@ -69,7 +73,7 @@ func (c *FilesystemConfig) ApplyConfigOverrides( if matchFromConfig { providerType := cast.ToString(g.Get(StorageProviderTypeKey)) if providerType != ProviderFilesystem.String() { - return clues.New("unsupported storage provider in config file: " + providerType) + return clues.New("unsupported storage provider in config file: [" + providerType + "]") } // This is matching override values from config file. diff --git a/src/pkg/storage/s3.go b/src/pkg/storage/s3.go index c689e77cd..7f2e8688f 100644 --- a/src/pkg/storage/s3.go +++ b/src/pkg/storage/s3.go @@ -62,6 +62,28 @@ var s3constToTomlKeyMap = map[string]string{ StorageProviderTypeKey: StorageProviderTypeKey, } +func (s Storage) ToS3Config() (*S3Config, error) { + return buildS3ConfigFromMap(s.Config) +} + +func buildS3ConfigFromMap(config map[string]string) (*S3Config, error) { + c := &S3Config{} + + if len(config) > 0 { + c.AccessKey = orEmptyString(config[keyS3AccessKey]) + c.SecretKey = orEmptyString(config[keyS3SecretKey]) + c.SessionToken = orEmptyString(config[keyS3SessionToken]) + + c.Bucket = orEmptyString(config[keyS3Bucket]) + c.Endpoint = orEmptyString(config[keyS3Endpoint]) + c.Prefix = orEmptyString(config[keyS3Prefix]) + c.DoNotUseTLS = str.ParseBool(config[keyS3DoNotUseTLS]) + c.DoNotVerifyTLS = str.ParseBool(config[keyS3DoNotVerifyTLS]) + } + + return c, c.validate() +} + func (c *S3Config) normalize() S3Config { return S3Config{ Bucket: common.NormalizeBucket(c.Bucket), @@ -91,24 +113,6 @@ func (c *S3Config) StringConfig() (map[string]string, error) { return cfg, cn.validate() } -func buildS3ConfigFromMap(config map[string]string) (*S3Config, error) { - c := &S3Config{} - - if len(config) > 0 { - c.AccessKey = orEmptyString(config[keyS3AccessKey]) - c.SecretKey = orEmptyString(config[keyS3SecretKey]) - c.SessionToken = orEmptyString(config[keyS3SessionToken]) - - c.Bucket = orEmptyString(config[keyS3Bucket]) - c.Endpoint = orEmptyString(config[keyS3Endpoint]) - c.Prefix = orEmptyString(config[keyS3Prefix]) - c.DoNotUseTLS = str.ParseBool(config[keyS3DoNotUseTLS]) - c.DoNotVerifyTLS = str.ParseBool(config[keyS3DoNotVerifyTLS]) - } - - return c, c.validate() -} - func (c S3Config) validate() error { check := map[string]string{ Bucket: c.Bucket, @@ -169,11 +173,11 @@ func (c *S3Config) ApplyConfigOverrides( if matchFromConfig { providerType := cast.ToString(kvg.Get(StorageProviderTypeKey)) if providerType != ProviderS3.String() { - return clues.New("unsupported storage provider: " + providerType) + return clues.New("unsupported storage provider: [" + providerType + "]") } if err := mustMatchConfig(kvg, s3constToTomlKeyMap, s3Overrides(overrides)); err != nil { - return clues.Wrap(err, "verifying s3 configs in corso config file") + return clues.Stack(err) } } } diff --git a/src/pkg/storage/s3_test.go b/src/pkg/storage/s3_test.go index 2a4b239f9..1e3a2e0ba 100644 --- a/src/pkg/storage/s3_test.go +++ b/src/pkg/storage/s3_test.go @@ -8,15 +8,16 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/credentials" ) -type S3CfgSuite struct { - suite.Suite +type S3CfgUnitSuite struct { + tester.Suite } -func TestS3CfgSuite(t *testing.T) { - suite.Run(t, new(S3CfgSuite)) +func TestS3CfgUnitSuite(t *testing.T) { + suite.Run(t, &S3CfgUnitSuite{Suite: tester.NewUnitSuite(t)}) } var ( @@ -41,7 +42,7 @@ var ( } ) -func (suite *S3CfgSuite) TestS3Config_Config() { +func (suite *S3CfgUnitSuite) TestS3Config_Config() { s3 := goodS3Config c, err := s3.StringConfig() @@ -60,16 +61,16 @@ func (suite *S3CfgSuite) TestS3Config_Config() { } } -func (suite *S3CfgSuite) TestStorage_S3Config() { +func (suite *S3CfgUnitSuite) TestStorage_S3Config() { t := suite.T() - in := goodS3Config + s, err := NewStorage(ProviderS3, &in) assert.NoError(t, err, clues.ToCore(err)) - sc, err := s.StorageConfig() + + out, err := s.ToS3Config() assert.NoError(t, err, clues.ToCore(err)) - out := sc.(*S3Config) assert.Equal(t, in.Bucket, out.Bucket) assert.Equal(t, in.Endpoint, out.Endpoint) assert.Equal(t, in.Prefix, out.Prefix) @@ -84,7 +85,7 @@ func makeTestS3Cfg(bkt, end, pre, access, secret, session string) S3Config { } } -func (suite *S3CfgSuite) TestStorage_S3Config_invalidCases() { +func (suite *S3CfgUnitSuite) TestStorage_S3Config_invalidCases() { // missing required properties table := []struct { name string @@ -118,13 +119,14 @@ func (suite *S3CfgSuite) TestStorage_S3Config_invalidCases() { st, err := NewStorage(ProviderUnknown, &goodS3Config) assert.NoError(t, err, clues.ToCore(err)) test.amend(st) - _, err = st.StorageConfig() - assert.Error(t, err) + + _, err = st.ToS3Config() + assert.Error(t, err, clues.ToCore(err)) }) } } -func (suite *S3CfgSuite) TestStorage_S3Config_StringConfig() { +func (suite *S3CfgUnitSuite) TestStorage_S3Config_StringConfig() { table := []struct { name string input S3Config @@ -178,7 +180,7 @@ func (suite *S3CfgSuite) TestStorage_S3Config_StringConfig() { } } -func (suite *S3CfgSuite) TestStorage_S3Config_Normalize() { +func (suite *S3CfgUnitSuite) TestStorage_S3Config_Normalize() { const ( prefixedBkt = "s3://bkt" normalBkt = "bkt" diff --git a/src/pkg/storage/storage.go b/src/pkg/storage/storage.go index 11b8863a1..c695ea992 100644 --- a/src/pkg/storage/storage.go +++ b/src/pkg/storage/storage.go @@ -9,6 +9,8 @@ import ( "github.com/alcionai/corso/src/internal/common" ) +var ErrVerifyingConfigStorage = clues.New("verifying configs in corso config file") + type ProviderType int //go:generate stringer -type=ProviderType -linecomment @@ -102,7 +104,7 @@ func (s Storage) StorageConfig() (Configurer, error) { return buildFilesystemConfigFromMap(s.Config) } - return nil, clues.New("unsupported storage provider: " + s.Provider.String()) + return nil, clues.New("unsupported storage provider: [" + s.Provider.String() + "]") } func NewStorageConfig(provider ProviderType) (Configurer, error) { @@ -113,7 +115,7 @@ func NewStorageConfig(provider ProviderType) (Configurer, error) { return &FilesystemConfig{}, nil } - return nil, clues.New("unsupported storage provider: " + provider.String()) + return nil, clues.New("unsupported storage provider: [" + provider.String() + "]") } type Getter interface { @@ -167,7 +169,8 @@ func mustMatchConfig( vv := cast.ToString(g.Get(tomlK)) if v != vv { - return clues.New("value of " + k + " (" + v + ") does not match corso configuration value (" + vv + ")") + err := clues.New("value of " + k + " (" + v + ") does not match corso configuration value (" + vv + ")") + return clues.Stack(ErrVerifyingConfigStorage, err) } } diff --git a/src/pkg/storage/storage_test.go b/src/pkg/storage/storage_test.go index 0d2cfbec6..095ea363c 100644 --- a/src/pkg/storage/storage_test.go +++ b/src/pkg/storage/storage_test.go @@ -6,6 +6,8 @@ import ( "github.com/alcionai/clues" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/tester" ) type testConfig struct { @@ -17,15 +19,15 @@ func (c testConfig) StringConfig() (map[string]string, error) { return map[string]string{"expect": c.expect}, c.err } -type StorageSuite struct { - suite.Suite +type StorageUnitSuite struct { + tester.Suite } -func TestStorageSuite(t *testing.T) { - suite.Run(t, new(StorageSuite)) +func TestStorageUnitSuite(t *testing.T) { + suite.Run(t, &StorageUnitSuite{Suite: tester.NewUnitSuite(t)}) } -func (suite *StorageSuite) TestNewStorage() { +func (suite *StorageUnitSuite) TestNewStorage() { table := []struct { name string p ProviderType diff --git a/src/pkg/storage/testdata/storage.go b/src/pkg/storage/testdata/storage.go index 227a959bb..0653ee0fd 100644 --- a/src/pkg/storage/testdata/storage.go +++ b/src/pkg/storage/testdata/storage.go @@ -68,6 +68,9 @@ func NewFilesystemStorage(t tester.TestT) storage.Storage { }, storage.CommonConfig{ Corso: GetAndInsertCorso(""), + // Use separate kopia configs for each instance. Place in a new folder to + // avoid mixing data. + KopiaCfgDir: t.TempDir(), }) require.NoError(t, err, "creating storage", clues.ToCore(err)) diff --git a/website/docs/setup/maintenance.md b/website/docs/setup/maintenance.md index a2ec6b1ed..51563d492 100644 --- a/website/docs/setup/maintenance.md +++ b/website/docs/setup/maintenance.md @@ -43,3 +43,15 @@ may not result in a reduction of objects in the storage service Corso is backing Deletion of old objects in the storage service depends on both wall-clock time and running maintenance. Later maintenance runs on the repository will remove the data. + +## Maintenance guidelines + +For the best experience, the recommendation is to run metadata maintenance every +20–30 backups. Complete maintenance should be run every 1–2 weeks +depending on how many backups are deleted from the repo. More backup deletions +means that complete maintenance should be run more often so that unneeded blobs +in storage get deleted. + +Not running maintenance exactly according to the recommendations won't impact +the correctness of the data in the repo, but could result in decreased +performance. diff --git a/website/package-lock.json b/website/package-lock.json index 267aa055c..f4ff67600 100644 --- a/website/package-lock.json +++ b/website/package-lock.json @@ -33,7 +33,7 @@ "@docusaurus/module-type-aliases": "2.4.3", "@iconify/react": "^4.1.1", "autoprefixer": "^10.4.16", - "postcss": "^8.4.30", + "postcss": "^8.4.31", "tailwindcss": "^3.3.3" } }, @@ -10743,9 +10743,9 @@ } }, "node_modules/postcss": { - "version": "8.4.30", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.30.tgz", - "integrity": "sha512-7ZEao1g4kd68l97aWG/etQKPKq07us0ieSZ2TnFDk11i0ZfDW2AwKHYU8qv4MZKqN2fdBfg+7q0ES06UA73C1g==", + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", "funding": [ { "type": "opencollective", @@ -22738,9 +22738,9 @@ "integrity": "sha512-Wb4p1J4zyFTbM+u6WuO4XstYx4Ky9Cewe4DWrel7B0w6VVICvPwdOpotjzcf6eD8TsckVnIMNONQyPIUFOUbCQ==" }, "postcss": { - "version": "8.4.30", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.30.tgz", - "integrity": "sha512-7ZEao1g4kd68l97aWG/etQKPKq07us0ieSZ2TnFDk11i0ZfDW2AwKHYU8qv4MZKqN2fdBfg+7q0ES06UA73C1g==", + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", "requires": { "nanoid": "^3.3.6", "picocolors": "^1.0.0", diff --git a/website/package.json b/website/package.json index 08ddd9305..ab903d36d 100644 --- a/website/package.json +++ b/website/package.json @@ -39,7 +39,7 @@ "@docusaurus/module-type-aliases": "2.4.3", "@iconify/react": "^4.1.1", "autoprefixer": "^10.4.16", - "postcss": "^8.4.30", + "postcss": "^8.4.31", "tailwindcss": "^3.3.3" }, "browserslist": {