diff --git a/.github/actions/backup-restore-test/action.yml b/.github/actions/backup-restore-test/action.yml index 4e31ad836..2d161af63 100644 --- a/.github/actions/backup-restore-test/action.yml +++ b/.github/actions/backup-restore-test/action.yml @@ -24,6 +24,10 @@ inputs: log-dir: description: Folder to store test log files required: true + on-collision: + description: Value for the --collisions flag + requried: false + default: "replace" outputs: backup-id: @@ -57,6 +61,7 @@ runs: ./corso restore '${{ inputs.service }}' \ --no-stats \ --hide-progress \ + --collisions ${{ inputs.on-collision }} \ ${{ inputs.restore-args }} \ --backup '${{ steps.backup.outputs.result }}' \ 2>&1 | diff --git a/.github/actions/purge-m365-data/action.yml b/.github/actions/purge-m365-data/action.yml index dd67f2326..cf013a054 100644 --- a/.github/actions/purge-m365-data/action.yml +++ b/.github/actions/purge-m365-data/action.yml @@ -89,4 +89,4 @@ runs: env: M365_TENANT_ADMIN_USER: ${{ inputs.m365-admin-user }} M365_TENANT_ADMIN_PASSWORD: ${{ inputs.m365-admin-password }} - run: ./onedrivePurge.ps1 -Site ${{ inputs.site }} -LibraryNameList "${{ inputs.libraries }}".split(",") -FolderPrefixPurgeList ${{ inputs.folder-prefix }} -LibraryPrefixDeleteList ${{ inputs.library-prefix }} -PurgeBeforeTimestamp ${{ inputs.older-than }} + run: ./onedrivePurge.ps1 -Site ${{ inputs.site }} -LibraryNameList "${{ inputs.libraries }}".split(",") -FolderPrefixPurgeList ${{ inputs.folder-prefix }} -LibraryPrefixDeleteList ${{ inputs.library-prefix && inputs.library-prefix || '[]' }} -PurgeBeforeTimestamp ${{ inputs.older-than }} diff --git a/.github/actions/slack-message/action.yml b/.github/actions/slack-message/action.yml new file mode 100644 index 000000000..1b72d9bee --- /dev/null +++ b/.github/actions/slack-message/action.yml @@ -0,0 +1,57 @@ +name: Send a message to slack + +inputs: + msg: + description: The slack message text + slack_url: + description: passthrough for secrets.SLACK_WEBHOOK_URL + +runs: + using: composite + steps: + - uses: actions/checkout@v3 + + - name: set github ref + shell: bash + run: | + echo "github_reference=${{ github.ref }}" >> $GITHUB_ENV + + - name: trim github ref + shell: bash + run: | + echo "trimmed_ref=${github_reference#refs/}" >> $GITHUB_ENV + + - name: build urls + shell: bash + run: | + echo "logurl=$(printf '' ${{ github.run_id }})" >> $GITHUB_ENV + echo "commiturl=$(printf '' ${{ github.sha }})" >> $GITHUB_ENV + echo "refurl=$(printf '' ${{ env.trimmed_ref }})" >> $GITHUB_ENV + + - name: use url or blank val + shell: bash + run: | + echo "JOB=${{ github.job || '' }}" >> $GITHUB_ENV + echo "LOGS=${{ github.run_id && env.logurl || '-' }}" >> $GITHUB_ENV + echo "COMMIT=${{ github.sha && env.commiturl || '-' }}" >> $GITHUB_ENV + echo "REF=${{ env.trimmed_ref && env.refurl || '-' }}" >> $GITHUB_ENV + + - id: slack-message + uses: slackapi/slack-github-action@v1.24.0 + env: + SLACK_WEBHOOK_URL: ${{ inputs.slack_url }} + SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK + with: + payload: | + { + "text": "${{ inputs.msg }} :: ${{ env.LOGS }} ${{ env.COMMIT }} ${{ env.REF }}", + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "${{ inputs.msg }} :: ${{ env.JOB }}\n${{ env.LOGS }} ${{ env.COMMIT }} ${{ env.REF }}" + } + } + ] + } diff --git a/.github/workflows/ci_test_cleanup.yml b/.github/workflows/ci_test_cleanup.yml index 11ba94097..3687c0e0c 100644 --- a/.github/workflows/ci_test_cleanup.yml +++ b/.github/workflows/ci_test_cleanup.yml @@ -18,9 +18,7 @@ jobs: - uses: actions/checkout@v3 # sets the maximum time to now-30m. - # CI test have a 10 minute timeout. - # At 20 minutes ago, we should be safe from conflicts. - # The additional 10 minutes is just to be good citizens. + # CI test have a 20 minute timeout. - name: Set purge boundary run: echo "HALF_HOUR_AGO=$(date -d '30 minutes ago' -u +"%Y-%m-%dT%H:%M:%SZ")" >> $GITHUB_ENV @@ -36,6 +34,13 @@ jobs: m365-admin-user: ${{ secrets.M365_TENANT_ADMIN_USER }} m365-admin-password: ${{ secrets.M365_TENANT_ADMIN_PASSWORD }} + - name: Notify failure in slack + if: failure() + uses: ./.github/actions/slack-message + with: + msg: "[FAILED] ${{ vars[matrix.user] }} CI Cleanup" + slack_url: ${{ secrets.SLACK_WEBHOOK_URL }} + Test-Site-Data-Cleanup: environment: Testing runs-on: ubuntu-latest @@ -48,9 +53,7 @@ jobs: - uses: actions/checkout@v3 # sets the maximum time to now-30m. - # CI test have a 10 minute timeout. - # At 20 minutes ago, we should be safe from conflicts. - # The additional 10 minutes is just to be good citizens. + # CI test have a 20 minute timeout. - name: Set purge boundary run: echo "HALF_HOUR_AGO=$(date -d '30 minutes ago' -u +"%Y-%m-%dT%H:%M:%SZ")" >> $GITHUB_ENV @@ -67,3 +70,10 @@ jobs: azure-tenant-id: ${{ secrets.TENANT_ID }} m365-admin-user: ${{ secrets.M365_TENANT_ADMIN_USER }} m365-admin-password: ${{ secrets.M365_TENANT_ADMIN_PASSWORD }} + + - name: Notify failure in slack + if: failure() + uses: ./.github/actions/slack-message + with: + msg: "[FAILED] ${{ vars[matrix.site] }} CI Cleanup" + slack_url: ${{ secrets.SLACK_WEBHOOK_URL }} diff --git a/.github/workflows/longevity_test.yml b/.github/workflows/longevity_test.yml index 02b956be1..8d294107a 100644 --- a/.github/workflows/longevity_test.yml +++ b/.github/workflows/longevity_test.yml @@ -277,33 +277,9 @@ jobs: if-no-files-found: error retention-days: 14 - - name: SHA info - id: sha-info + - name: Notify failure in slack if: failure() - run: | - echo ${GITHUB_REF#refs/heads/}-${GITHUB_SHA} - echo SHA=${GITHUB_REF#refs/heads/}-${GITHUB_SHA} >> $GITHUB_OUTPUT - echo RUN_URL=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} >> $GITHUB_OUTPUT - echo COMMIT_URL=${{ github.server_url }}/${{ github.repository }}/commit/${GITHUB_SHA} >> $GITHUB_OUTPUT - - - name: Send Github Action failure to Slack - id: slack-notification - if: failure() - uses: slackapi/slack-github-action@v1.24.0 + uses: ./.github/actions/slack-message with: - payload: | - { - "text": "Longevity test failure - build: ${{ job.status }} - SHA: ${{ steps.sha-info.outputs.SHA }}", - "blocks": [ - { - "type": "section", - "text": { - "type": "mrkdwn", - "text": "[FAILED] Longevity Checks :: <${{ steps.sha-info.outputs.RUN_URL }}|[Logs]> <${{ steps.sha-info.outputs.COMMIT_URL }}|[Base]>\nCommit: <${{ steps.sha-info.outputs.COMMIT_URL }}|${{ steps.sha-info.outputs.SHA }}>" - } - } - ] - } - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK + msg: "[FAILED] Longevity Test" + slack_url: ${{ secrets.SLACK_WEBHOOK_URL }} diff --git a/.github/workflows/nightly_test.yml b/.github/workflows/nightly_test.yml index 2ea556099..29b69ad20 100644 --- a/.github/workflows/nightly_test.yml +++ b/.github/workflows/nightly_test.yml @@ -94,6 +94,7 @@ jobs: CORSO_PASSPHRASE: ${{ secrets.INTEGRATION_TEST_CORSO_PASSPHRASE }} CORSO_LOG_FILE: ${{ github.workspace }}/testlog/run-nightly.log LOG_GRAPH_REQUESTS: true + S3_BUCKET: ${{ secrets.CI_TESTS_S3_BUCKET }} run: | set -euo pipefail go test \ @@ -119,33 +120,9 @@ jobs: if-no-files-found: error retention-days: 14 - - name: SHA info - id: sha-info + - name: Notify failure in slack if: failure() - run: | - echo ${GITHUB_REF#refs/heads/}-${GITHUB_SHA} - echo SHA=${GITHUB_REF#refs/heads/}-${GITHUB_SHA} >> $GITHUB_OUTPUT - echo RUN_URL=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} >> $GITHUB_OUTPUT - echo COMMIT_URL=${{ github.server_url }}/${{ github.repository }}/commit/${GITHUB_SHA} >> $GITHUB_OUTPUT - - - name: Send Github Action failure to Slack - id: slack-notification - if: failure() - uses: slackapi/slack-github-action@v1.24.0 + uses: ./.github/actions/slack-message with: - payload: | - { - "text": "Nightly test failure - build: ${{ job.status }} - SHA: ${{ steps.sha-info.outputs.SHA }}", - "blocks": [ - { - "type": "section", - "text": { - "type": "mrkdwn", - "text": "[FAILED] Nightly Checks :: <${{ steps.sha-info.outputs.RUN_URL }}|[Logs]> <${{ steps.sha-info.outputs.COMMIT_URL }}|[Base]>\nCommit: <${{ steps.sha-info.outputs.COMMIT_URL }}|${{ steps.sha-info.outputs.SHA }}>" - } - } - ] - } - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK + msg: "[FAILED] Nightly Checks" + slack_url: ${{ secrets.SLACK_WEBHOOK_URL }} diff --git a/.github/workflows/sanity-test.yaml b/.github/workflows/sanity-test.yaml index 9e210778d..292c358dd 100644 --- a/.github/workflows/sanity-test.yaml +++ b/.github/workflows/sanity-test.yaml @@ -333,33 +333,9 @@ jobs: if-no-files-found: error retention-days: 14 - - name: SHA info - id: sha-info + - name: Notify failure in slack if: failure() - run: | - echo ${GITHUB_REF#refs/heads/}-${GITHUB_SHA} - echo SHA=${GITHUB_REF#refs/heads/}-${GITHUB_SHA} >> $GITHUB_OUTPUT - echo RUN_URL=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} >> $GITHUB_OUTPUT - echo COMMIT_URL=${{ github.server_url }}/${{ github.repository }}/commit/${GITHUB_SHA} >> $GITHUB_OUTPUT - - - name: Send Github Action failure to Slack - id: slack-notification - if: failure() - uses: slackapi/slack-github-action@v1.24.0 + uses: ./.github/actions/slack-message with: - payload: | - { - "text": "Sanity test failure - build: ${{ job.status }} - SHA: ${{ steps.sha-info.outputs.SHA }}", - "blocks": [ - { - "type": "section", - "text": { - "type": "mrkdwn", - "text": "[FAILED] Sanity Checks :: <${{ steps.sha-info.outputs.RUN_URL }}|[Logs]> <${{ github.event.pull_request.html_url || github.event.head_commit.url }}|[Base]>\nCommit: <${{ steps.sha-info.outputs.COMMIT_URL }}|${{ steps.sha-info.outputs.SHA }}>" - } - } - ] - } - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK \ No newline at end of file + msg: "[FAILED] Sanity Tests" + slack_url: ${{ secrets.SLACK_WEBHOOK_URL }} \ No newline at end of file diff --git a/src/.golangci.yml b/src/.golangci.yml index 06ccaa3dd..da54f6217 100644 --- a/src/.golangci.yml +++ b/src/.golangci.yml @@ -3,14 +3,20 @@ run: linters: enable: + - errcheck + - forbidigo - gci - gofmt - gofumpt - - errcheck - - forbidigo + - gosimple + - govet + - ineffassign - lll + - loggercheck - misspell - revive + - unused + - usestdlibvars - wsl disable: diff --git a/src/cli/backup/exchange.go b/src/cli/backup/exchange.go index 99bb0ff78..0f11bd6bd 100644 --- a/src/cli/backup/exchange.go +++ b/src/cli/backup/exchange.go @@ -94,6 +94,7 @@ func addExchangeCommands(cmd *cobra.Command) *cobra.Command { flags.AddDisableDeltaFlag(c) flags.AddEnableImmutableIDFlag(c) flags.AddDisableConcurrencyLimiterFlag(c) + flags.AddDeltaPageSizeFlag(c) case listCommand: c, fs = utils.AddCommand(cmd, exchangeListCmd()) @@ -175,7 +176,7 @@ func createExchangeCmd(cmd *cobra.Command, args []string) error { sel := exchangeBackupCreateSelectors(flags.UserFV, flags.CategoryDataFV) - ins, err := utils.UsersMap(ctx, *acct, fault.New(true)) + ins, err := utils.UsersMap(ctx, *acct, utils.Control(), fault.New(true)) if err != nil { return Only(ctx, clues.Wrap(err, "Failed to retrieve M365 users")) } diff --git a/src/cli/backup/exchange_test.go b/src/cli/backup/exchange_test.go index 6bd078797..d260ca290 100644 --- a/src/cli/backup/exchange_test.go +++ b/src/cli/backup/exchange_test.go @@ -37,11 +37,11 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() { expectRunE func(*cobra.Command, []string) error }{ { - "create exchange", - createCommand, - expectUse + " " + exchangeServiceCommandCreateUseSuffix, - exchangeCreateCmd().Short, - []string{ + name: "create exchange", + use: createCommand, + expectUse: expectUse + " " + exchangeServiceCommandCreateUseSuffix, + expectShort: exchangeCreateCmd().Short, + flags: []string{ flags.UserFN, flags.CategoryDataFN, flags.DisableIncrementalsFN, @@ -50,28 +50,29 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() { flags.FetchParallelismFN, flags.SkipReduceFN, flags.NoStatsFN, + flags.DeltaPageSizeFN, }, - createExchangeCmd, + expectRunE: createExchangeCmd, }, { - "list exchange", - listCommand, - expectUse, - exchangeListCmd().Short, - []string{ + name: "list exchange", + use: listCommand, + expectUse: expectUse, + expectShort: exchangeListCmd().Short, + flags: []string{ flags.BackupFN, flags.FailedItemsFN, flags.SkippedItemsFN, flags.RecoveredErrorsFN, }, - listExchangeCmd, + expectRunE: listExchangeCmd, }, { - "details exchange", - detailsCommand, - expectUse + " " + exchangeServiceCommandDetailsUseSuffix, - exchangeDetailsCmd().Short, - []string{ + name: "details exchange", + use: detailsCommand, + expectUse: expectUse + " " + exchangeServiceCommandDetailsUseSuffix, + expectShort: exchangeDetailsCmd().Short, + flags: []string{ flags.BackupFN, flags.ContactFN, flags.ContactFolderFN, @@ -90,7 +91,7 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() { flags.EventStartsBeforeFN, flags.EventSubjectFN, }, - detailsExchangeCmd, + expectRunE: detailsExchangeCmd, }, { "delete exchange", diff --git a/src/cli/backup/onedrive.go b/src/cli/backup/onedrive.go index 62ce242d4..b9d94fc41 100644 --- a/src/cli/backup/onedrive.go +++ b/src/cli/backup/onedrive.go @@ -157,7 +157,7 @@ func createOneDriveCmd(cmd *cobra.Command, args []string) error { sel := oneDriveBackupCreateSelectors(flags.UserFV) - ins, err := utils.UsersMap(ctx, *acct, fault.New(true)) + ins, err := utils.UsersMap(ctx, *acct, utils.Control(), fault.New(true)) if err != nil { return Only(ctx, clues.Wrap(err, "Failed to retrieve M365 users")) } diff --git a/src/cli/flags/options.go b/src/cli/flags/options.go index 046d3c8d7..81a893f93 100644 --- a/src/cli/flags/options.go +++ b/src/cli/flags/options.go @@ -5,6 +5,7 @@ import ( ) const ( + DeltaPageSizeFN = "delta-page-size" DisableConcurrencyLimiterFN = "disable-concurrency-limiter" DisableDeltaFN = "disable-delta" DisableIncrementalsFN = "disable-incrementals" @@ -21,6 +22,7 @@ const ( ) var ( + DeltaPageSizeFV int DisableConcurrencyLimiterFV bool DisableDeltaFV bool DisableIncrementalsFV bool @@ -72,6 +74,18 @@ func AddSkipReduceFlag(cmd *cobra.Command) { cobra.CheckErr(fs.MarkHidden(SkipReduceFN)) } +// AddDeltaPageSizeFlag adds a hidden flag that allows callers to reduce delta +// query page sizes below 500. +func AddDeltaPageSizeFlag(cmd *cobra.Command) { + fs := cmd.Flags() + fs.IntVar( + &DeltaPageSizeFV, + DeltaPageSizeFN, + 500, + "Control quantity of items returned in paged queries. Valid range is [1-500]. Default: 500") + cobra.CheckErr(fs.MarkHidden(DeltaPageSizeFN)) +} + // AddFetchParallelismFlag adds a hidden flag that allows callers to reduce call // paralellism (ie, the corso worker pool size) from 4 to as low as 1. func AddFetchParallelismFlag(cmd *cobra.Command) { diff --git a/src/cli/utils/options.go b/src/cli/utils/options.go index 0cc44c839..7f9176a90 100644 --- a/src/cli/utils/options.go +++ b/src/cli/utils/options.go @@ -14,6 +14,12 @@ func Control() control.Options { opt.FailureHandling = control.FailFast } + dps := int32(flags.DeltaPageSizeFV) + if dps > 500 || dps < 1 { + dps = 500 + } + + opt.DeltaPageSize = dps opt.DisableMetrics = flags.NoStatsFV opt.RestorePermissions = flags.RestorePermissionsFV opt.SkipReduce = flags.SkipReduceFV diff --git a/src/cli/utils/options_test.go b/src/cli/utils/options_test.go index 746558aa1..1a8f7ddcd 100644 --- a/src/cli/utils/options_test.go +++ b/src/cli/utils/options_test.go @@ -35,6 +35,7 @@ func (suite *OptionsUnitSuite) TestAddExchangeCommands() { assert.True(t, flags.SkipReduceFV, flags.SkipReduceFN) assert.Equal(t, 2, flags.FetchParallelismFV, flags.FetchParallelismFN) assert.True(t, flags.DisableConcurrencyLimiterFV, flags.DisableConcurrencyLimiterFN) + assert.Equal(t, 499, flags.DeltaPageSizeFV, flags.DeltaPageSizeFN) }, } @@ -48,6 +49,7 @@ func (suite *OptionsUnitSuite) TestAddExchangeCommands() { flags.AddSkipReduceFlag(cmd) flags.AddFetchParallelismFlag(cmd) flags.AddDisableConcurrencyLimiterFlag(cmd) + flags.AddDeltaPageSizeFlag(cmd) // Test arg parsing for few args cmd.SetArgs([]string{ @@ -60,6 +62,7 @@ func (suite *OptionsUnitSuite) TestAddExchangeCommands() { "--" + flags.SkipReduceFN, "--" + flags.FetchParallelismFN, "2", "--" + flags.DisableConcurrencyLimiterFN, + "--" + flags.DeltaPageSizeFN, "499", }) err := cmd.Execute() diff --git a/src/cli/utils/testdata/flags.go b/src/cli/utils/testdata/flags.go index f97529b57..d29198072 100644 --- a/src/cli/utils/testdata/flags.go +++ b/src/cli/utils/testdata/flags.go @@ -48,6 +48,8 @@ var ( Destination = "destination" RestorePermissions = true + DeltaPageSize = "deltaPageSize" + AzureClientID = "testAzureClientId" AzureTenantID = "testAzureTenantId" AzureClientSecret = "testAzureClientSecret" diff --git a/src/cli/utils/users.go b/src/cli/utils/users.go index 610f0e2c6..affa520fd 100644 --- a/src/cli/utils/users.go +++ b/src/cli/utils/users.go @@ -7,6 +7,7 @@ import ( "github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/services/m365/api" ) @@ -15,9 +16,10 @@ import ( func UsersMap( ctx context.Context, acct account.Account, + co control.Options, errs *fault.Bus, ) (idname.Cacher, error) { - au, err := makeUserAPI(acct) + au, err := makeUserAPI(acct, co) if err != nil { return nil, clues.Wrap(err, "constructing a graph client") } @@ -25,13 +27,13 @@ func UsersMap( return au.GetAllIDsAndNames(ctx, errs) } -func makeUserAPI(acct account.Account) (api.Users, error) { +func makeUserAPI(acct account.Account, co control.Options) (api.Users, error) { creds, err := acct.M365Config() if err != nil { return api.Users{}, clues.Wrap(err, "getting m365 account creds") } - cli, err := api.NewClient(creds) + cli, err := api.NewClient(creds, co) if err != nil { return api.Users{}, clues.Wrap(err, "constructing api client") } diff --git a/src/cmd/getM365/exchange/get_item.go b/src/cmd/getM365/exchange/get_item.go deleted file mode 100644 index cc6e8cd6a..000000000 --- a/src/cmd/getM365/exchange/get_item.go +++ /dev/null @@ -1,157 +0,0 @@ -// get_item.go is a source file designed to retrieve an m365 object from an -// existing M365 account. Data displayed is representative of the current -// serialization abstraction versioning used by Microsoft Graph and stored by Corso. - -package exchange - -import ( - "context" - "fmt" - "os" - - "github.com/alcionai/clues" - "github.com/microsoft/kiota-abstractions-go/serialization" - kw "github.com/microsoft/kiota-serialization-json-go" - "github.com/spf13/cobra" - - "github.com/alcionai/corso/src/cli/utils" - "github.com/alcionai/corso/src/internal/common/ptr" - "github.com/alcionai/corso/src/internal/common/str" - "github.com/alcionai/corso/src/pkg/account" - "github.com/alcionai/corso/src/pkg/backup/details" - "github.com/alcionai/corso/src/pkg/credentials" - "github.com/alcionai/corso/src/pkg/fault" - "github.com/alcionai/corso/src/pkg/path" - "github.com/alcionai/corso/src/pkg/services/m365/api" -) - -// Required inputs from user for command execution -var ( - user, tenant, m365ID, category string -) - -func AddCommands(parent *cobra.Command) { - exCmd := &cobra.Command{ - Use: "exchange", - Short: "Get an M365ID item JSON", - RunE: handleExchangeCmd, - } - - fs := exCmd.PersistentFlags() - fs.StringVar(&m365ID, "id", "", "m365 identifier for object") - fs.StringVar(&category, "category", "", "type of M365 data (contacts, email, events)") - fs.StringVar(&user, "user", "", "m365 user id of M365 user") - fs.StringVar(&tenant, "tenant", "", "m365 identifier for the tenant") - - cobra.CheckErr(exCmd.MarkPersistentFlagRequired("user")) - cobra.CheckErr(exCmd.MarkPersistentFlagRequired("id")) - cobra.CheckErr(exCmd.MarkPersistentFlagRequired("category")) - - parent.AddCommand(exCmd) -} - -func handleExchangeCmd(cmd *cobra.Command, args []string) error { - if utils.HasNoFlagsAndShownHelp(cmd) { - return nil - } - - tid := str.First(tenant, os.Getenv(account.AzureTenantID)) - - ctx := clues.Add( - cmd.Context(), - "item_id", m365ID, - "resource_owner", user, - "tenant", tid) - - creds := account.M365Config{ - M365: credentials.GetM365(), - AzureTenantID: tid, - } - - err := runDisplayM365JSON(ctx, creds, user, m365ID, fault.New(true)) - if err != nil { - cmd.SilenceUsage = true - cmd.SilenceErrors = true - - return clues.Wrap(err, "getting item") - } - - return nil -} - -func runDisplayM365JSON( - ctx context.Context, - creds account.M365Config, - user, itemID string, - errs *fault.Bus, -) error { - var ( - bs []byte - err error - cat = path.ToCategoryType(category) - sw = kw.NewJsonSerializationWriter() - ) - - ac, err := api.NewClient(creds) - if err != nil { - return err - } - - switch cat { - case path.EmailCategory: - bs, err = getItem(ctx, ac.Mail(), user, itemID, true, errs) - case path.EventsCategory: - bs, err = getItem(ctx, ac.Events(), user, itemID, true, errs) - case path.ContactsCategory: - bs, err = getItem(ctx, ac.Contacts(), user, itemID, true, errs) - default: - return fmt.Errorf("unable to process category: %s", cat) - } - - if err != nil { - return err - } - - err = sw.WriteStringValue("", ptr.To(string(bs))) - if err != nil { - return clues.Wrap(err, "Error writing string value: "+itemID) - } - - array, err := sw.GetSerializedContent() - if err != nil { - return clues.Wrap(err, "Error serializing item: "+itemID) - } - - fmt.Println(string(array)) - - return nil -} - -type itemer interface { - GetItem( - ctx context.Context, - user, itemID string, - immutableID bool, - errs *fault.Bus, - ) (serialization.Parsable, *details.ExchangeInfo, error) - Serialize( - ctx context.Context, - item serialization.Parsable, - user, itemID string, - ) ([]byte, error) -} - -func getItem( - ctx context.Context, - itm itemer, - user, itemID string, - immutableIDs bool, - errs *fault.Bus, -) ([]byte, error) { - sp, _, err := itm.GetItem(ctx, user, itemID, immutableIDs, errs) - if err != nil { - return nil, clues.Wrap(err, "getting item") - } - - return itm.Serialize(ctx, sp, user, itemID) -} diff --git a/src/cmd/getM365/main.go b/src/cmd/getM365/main.go deleted file mode 100644 index c7acd3175..000000000 --- a/src/cmd/getM365/main.go +++ /dev/null @@ -1,36 +0,0 @@ -package main - -import ( - "context" - "os" - - "github.com/spf13/cobra" - - . "github.com/alcionai/corso/src/cli/print" - "github.com/alcionai/corso/src/cmd/getM365/exchange" - "github.com/alcionai/corso/src/cmd/getM365/onedrive" - "github.com/alcionai/corso/src/pkg/logger" -) - -var rootCmd = &cobra.Command{ - Use: "getM365", -} - -func main() { - ls := logger.Settings{ - Level: logger.LLDebug, - Format: logger.LFText, - } - ctx, _ := logger.CtxOrSeed(context.Background(), ls) - - ctx = SetRootCmd(ctx, rootCmd) - defer logger.Flush(ctx) - - exchange.AddCommands(rootCmd) - onedrive.AddCommands(rootCmd) - - if err := rootCmd.Execute(); err != nil { - Err(ctx, err) - os.Exit(1) - } -} diff --git a/src/cmd/getM365/onedrive/get_item.go b/src/cmd/getM365/onedrive/get_item.go deleted file mode 100644 index 05b5395ce..000000000 --- a/src/cmd/getM365/onedrive/get_item.go +++ /dev/null @@ -1,207 +0,0 @@ -// get_item.go is a source file designed to retrieve an m365 object from an -// existing M365 account. Data displayed is representative of the current -// serialization abstraction versioning used by Microsoft Graph and stored by Corso. - -package onedrive - -import ( - "context" - "encoding/json" - "io" - "net/http" - "os" - - "github.com/alcionai/clues" - "github.com/microsoft/kiota-abstractions-go/serialization" - kjson "github.com/microsoft/kiota-serialization-json-go" - "github.com/microsoftgraph/msgraph-sdk-go/models" - "github.com/spf13/cobra" - - . "github.com/alcionai/corso/src/cli/print" - "github.com/alcionai/corso/src/cli/utils" - "github.com/alcionai/corso/src/internal/common/ptr" - "github.com/alcionai/corso/src/internal/common/str" - "github.com/alcionai/corso/src/internal/m365/graph" - "github.com/alcionai/corso/src/pkg/account" - "github.com/alcionai/corso/src/pkg/credentials" - "github.com/alcionai/corso/src/pkg/services/m365/api" -) - -const downloadURLKey = "@microsoft.graph.downloadUrl" - -// Required inputs from user for command execution -var ( - user, tenant, m365ID string -) - -func AddCommands(parent *cobra.Command) { - exCmd := &cobra.Command{ - Use: "onedrive", - Short: "Get an M365ID item", - RunE: handleOneDriveCmd, - } - - fs := exCmd.PersistentFlags() - fs.StringVar(&m365ID, "id", "", "m365 identifier for object") - fs.StringVar(&user, "user", "", "m365 user id of M365 user") - fs.StringVar(&tenant, "tenant", "", "m365 identifier for the tenant") - - cobra.CheckErr(exCmd.MarkPersistentFlagRequired("user")) - cobra.CheckErr(exCmd.MarkPersistentFlagRequired("id")) - - parent.AddCommand(exCmd) -} - -func handleOneDriveCmd(cmd *cobra.Command, args []string) error { - if utils.HasNoFlagsAndShownHelp(cmd) { - return nil - } - - tid := str.First(tenant, os.Getenv(account.AzureTenantID)) - - ctx := clues.Add( - cmd.Context(), - "item_id", m365ID, - "resource_owner", user, - "tenant", tid) - - // get account info - creds := account.M365Config{ - M365: credentials.GetM365(), - AzureTenantID: tid, - } - - gr := graph.NewNoTimeoutHTTPWrapper() - - ac, err := api.NewClient(creds) - if err != nil { - return Only(ctx, clues.Wrap(err, "getting api client")) - } - - err = runDisplayM365JSON(ctx, ac, gr, creds, user, m365ID) - if err != nil { - cmd.SilenceUsage = true - cmd.SilenceErrors = true - - return Only(ctx, clues.Wrap(err, "getting item")) - } - - return nil -} - -type itemData struct { - Size int `json:"size"` -} - -type itemPrintable struct { - Info json.RawMessage `json:"info"` - Permissions json.RawMessage `json:"permissions"` - Data itemData `json:"data"` -} - -func (i itemPrintable) MinimumPrintable() any { - return i -} - -func runDisplayM365JSON( - ctx context.Context, - ac api.Client, - gr graph.Requester, - creds account.M365Config, - userID, itemID string, -) error { - drive, err := ac.Users().GetDefaultDrive(ctx, userID) - if err != nil { - return err - } - - driveID := ptr.Val(drive.GetId()) - - it := itemPrintable{} - - item, err := ac.Drives().GetItem(ctx, driveID, itemID) - if err != nil { - return err - } - - if item != nil { - content, err := getDriveItemContent(ctx, gr, item) - if err != nil { - return err - } - - // We could get size from item.GetSize(), but the - // getDriveItemContent call is to ensure that we are able to - // download the file. - it.Data.Size = len(content) - } - - sInfo, err := serializeObject(item) - if err != nil { - return err - } - - err = json.Unmarshal([]byte(sInfo), &it.Info) - if err != nil { - return err - } - - perms, err := ac.Drives().GetItemPermission(ctx, driveID, itemID) - if err != nil { - return err - } - - sPerms, err := serializeObject(perms) - if err != nil { - return err - } - - err = json.Unmarshal([]byte(sPerms), &it.Permissions) - if err != nil { - return err - } - - PrettyJSON(ctx, it) - - return nil -} - -func serializeObject(data serialization.Parsable) (string, error) { - sw := kjson.NewJsonSerializationWriter() - - err := sw.WriteObjectValue("", data) - if err != nil { - return "", clues.Wrap(err, "writing serializing info") - } - - content, err := sw.GetSerializedContent() - if err != nil { - return "", clues.Wrap(err, "getting serializing info") - } - - return string(content), err -} - -func getDriveItemContent( - ctx context.Context, - gr graph.Requester, - item models.DriveItemable, -) ([]byte, error) { - url, ok := item.GetAdditionalData()[downloadURLKey].(*string) - if !ok { - return nil, clues.New("retrieving download url") - } - - resp, err := gr.Request(ctx, http.MethodGet, *url, nil, nil) - if err != nil { - return nil, clues.New("requesting item content").With("error", err) - } - defer resp.Body.Close() - - content, err := io.ReadAll(resp.Body) - if err != nil { - return nil, clues.New("reading item content").With("error", err) - } - - return content, nil -} diff --git a/src/go.mod b/src/go.mod index b589ca542..1c6091e91 100644 --- a/src/go.mod +++ b/src/go.mod @@ -8,7 +8,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 github.com/alcionai/clues v0.0.0-20230630194723-e24d7940e07a github.com/armon/go-metrics v0.4.1 - github.com/aws/aws-sdk-go v1.44.305 + github.com/aws/aws-sdk-go v1.44.307 github.com/aws/aws-xray-sdk-go v1.8.1 github.com/cenkalti/backoff/v4 v4.2.1 github.com/google/uuid v1.3.0 @@ -16,7 +16,7 @@ require ( github.com/kopia/kopia v0.12.2-0.20230327171220-747baeebdab1 github.com/microsoft/kiota-abstractions-go v1.1.0 github.com/microsoft/kiota-authentication-azure-go v1.0.0 - github.com/microsoft/kiota-http-go v1.0.0 + github.com/microsoft/kiota-http-go v1.0.1 github.com/microsoft/kiota-serialization-form-go v1.0.0 github.com/microsoft/kiota-serialization-json-go v1.0.4 github.com/microsoftgraph/msgraph-sdk-go v1.12.0 diff --git a/src/go.sum b/src/go.sum index 718084162..cd66bd27b 100644 --- a/src/go.sum +++ b/src/go.sum @@ -66,8 +66,8 @@ github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= -github.com/aws/aws-sdk-go v1.44.305 h1:fU/5lY3WyBjGU9fkmQYd8o4fZu+2RaOv/i+sPaJVvFg= -github.com/aws/aws-sdk-go v1.44.305/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.307 h1:2R0/EPgpZcFSUwZhYImq/srjaOrOfLv5MNRzrFyAM38= +github.com/aws/aws-sdk-go v1.44.307/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo= github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= @@ -275,8 +275,8 @@ github.com/microsoft/kiota-abstractions-go v1.1.0 h1:X1aKlsYCRs/0RSChr/fbq4j/+kx github.com/microsoft/kiota-abstractions-go v1.1.0/go.mod h1:RkxyZ5x87Njik7iVeQY9M2wtrrL1MJZcXiI/BxD/82g= github.com/microsoft/kiota-authentication-azure-go v1.0.0 h1:29FNZZ/4nnCOwFcGWlB/sxPvWz487HA2bXH8jR5k2Rk= github.com/microsoft/kiota-authentication-azure-go v1.0.0/go.mod h1:rnx3PRlkGdXDcA/0lZQTbBwyYGmc+3POt7HpE/e4jGw= -github.com/microsoft/kiota-http-go v1.0.0 h1:F1hd6gMlLeEgH2CkRB7z13ow7LxMKMWEmms/t0VfS+k= -github.com/microsoft/kiota-http-go v1.0.0/go.mod h1:eujxJliqodotsYepIc6ihhK+vXMMt5Q8YiSNL7+7M7U= +github.com/microsoft/kiota-http-go v1.0.1 h1:818u3aiLpxj35hZgfUSqphQ18IUTK3gVdTE4cQ5vjLw= +github.com/microsoft/kiota-http-go v1.0.1/go.mod h1:H0cg+ly+5ZSR8z4swj5ea9O/GB5ll2YuYeQ0/pJs7AY= github.com/microsoft/kiota-serialization-form-go v1.0.0 h1:UNdrkMnLFqUCccQZerKjblsyVgifS11b3WCx+eFEsAI= github.com/microsoft/kiota-serialization-form-go v1.0.0/go.mod h1:h4mQOO6KVTNciMF6azi1J9QB19ujSw3ULKcSNyXXOMA= github.com/microsoft/kiota-serialization-json-go v1.0.4 h1:5TaISWwd2Me8clrK7SqNATo0tv9seOq59y4I5953egQ= diff --git a/src/internal/events/events.go b/src/internal/events/events.go index baa2c2117..1252052f7 100644 --- a/src/internal/events/events.go +++ b/src/internal/events/events.go @@ -82,8 +82,8 @@ var ( RudderStackDataPlaneURL string ) -func NewBus(ctx context.Context, s storage.Storage, tenID string, opts control.Options) (Bus, error) { - if opts.DisableMetrics { +func NewBus(ctx context.Context, s storage.Storage, tenID string, co control.Options) (Bus, error) { + if co.DisableMetrics { return Bus{}, nil } diff --git a/src/internal/kopia/backup_bases.go b/src/internal/kopia/backup_bases.go index 0505fc829..c0b8ecfaa 100644 --- a/src/internal/kopia/backup_bases.go +++ b/src/internal/kopia/backup_bases.go @@ -24,7 +24,7 @@ type BackupBases interface { MergeBackupBases( ctx context.Context, other BackupBases, - reasonToKey func(Reason) string, + reasonToKey func(Reasoner) string, ) BackupBases } @@ -109,10 +109,10 @@ func (bb *backupBases) ClearAssistBases() { // some migration that disrupts lookup), and that the BackupBases used to call // this function contains the current version. // -// reasonToKey should be a function that, given a Reason, will produce some -// string that represents Reason in the context of the merge operation. For -// example, to merge BackupBases across a ResourceOwner migration, the Reason's -// service and category can be used as the key. +// reasonToKey should be a function that, given a Reasoner, will produce some +// string that represents Reasoner in the context of the merge operation. For +// example, to merge BackupBases across a ProtectedResource migration, the +// Reasoner's service and category can be used as the key. // // Selection priority, for each reason key generated by reasonsToKey, follows // these rules: @@ -125,7 +125,7 @@ func (bb *backupBases) ClearAssistBases() { func (bb *backupBases) MergeBackupBases( ctx context.Context, other BackupBases, - reasonToKey func(reason Reason) string, + reasonToKey func(reason Reasoner) string, ) BackupBases { if other == nil || (len(other.MergeBases()) == 0 && len(other.AssistBases()) == 0) { return bb @@ -159,7 +159,7 @@ func (bb *backupBases) MergeBackupBases( // Calculate the set of mergeBases to pull from other into this one. for _, m := range other.MergeBases() { - useReasons := []Reason{} + useReasons := []Reasoner{} for _, r := range m.Reasons { k := reasonToKey(r) @@ -210,7 +210,7 @@ func (bb *backupBases) MergeBackupBases( // Add assistBases from other to this one as needed. for _, m := range other.AssistBases() { - useReasons := []Reason{} + useReasons := []Reasoner{} // Assume that all complete manifests in assist overlap with MergeBases. if len(m.IncompleteReason) == 0 { @@ -267,8 +267,8 @@ func findNonUniqueManifests( } for _, reason := range man.Reasons { - reasonKey := reason.ResourceOwner + reason.Service.String() + reason.Category.String() - reasons[reasonKey] = append(reasons[reasonKey], man) + mapKey := reasonKey(reason) + reasons[mapKey] = append(reasons[mapKey], man) } } diff --git a/src/internal/kopia/backup_bases_test.go b/src/internal/kopia/backup_bases_test.go index f902d4e37..04afb5408 100644 --- a/src/internal/kopia/backup_bases_test.go +++ b/src/internal/kopia/backup_bases_test.go @@ -16,7 +16,7 @@ import ( "github.com/alcionai/corso/src/pkg/path" ) -func makeManifest(id, incmpl, bID string, reasons ...Reason) ManifestEntry { +func makeManifest(id, incmpl, bID string, reasons ...Reasoner) ManifestEntry { bIDKey, _ := makeTagKV(TagBackupID) return ManifestEntry{ @@ -223,14 +223,10 @@ func (suite *BackupBasesUnitSuite) TestMergeBackupBases() { ir = "checkpoint" } - reasons := make([]Reason, 0, len(i.cat)) + reasons := make([]Reasoner, 0, len(i.cat)) for _, c := range i.cat { - reasons = append(reasons, Reason{ - ResourceOwner: ro, - Service: path.ExchangeService, - Category: c, - }) + reasons = append(reasons, NewReason("", ro, path.ExchangeService, c)) } m := makeManifest(baseID, ir, "b"+baseID, reasons...) @@ -457,8 +453,8 @@ func (suite *BackupBasesUnitSuite) TestMergeBackupBases() { got := bb.MergeBackupBases( ctx, other, - func(reason Reason) string { - return reason.Service.String() + reason.Category.String() + func(r Reasoner) string { + return r.Service().String() + r.Category().String() }) AssertBackupBasesEqual(t, expect, got) }) @@ -469,13 +465,8 @@ func (suite *BackupBasesUnitSuite) TestFixupAndVerify() { ro := "resource_owner" makeMan := func(pct path.CategoryType, id, incmpl, bID string) ManifestEntry { - reason := Reason{ - ResourceOwner: ro, - Service: path.ExchangeService, - Category: pct, - } - - return makeManifest(id, incmpl, bID, reason) + r := NewReason("", ro, path.ExchangeService, pct) + return makeManifest(id, incmpl, bID, r) } // Make a function so tests can modify things without messing with each other. @@ -606,11 +597,7 @@ func (suite *BackupBasesUnitSuite) TestFixupAndVerify() { res := validMail1() res.mergeBases[0].Reasons = append( res.mergeBases[0].Reasons, - Reason{ - ResourceOwner: ro, - Service: path.ExchangeService, - Category: path.ContactsCategory, - }) + NewReason("", ro, path.ExchangeService, path.ContactsCategory)) res.assistBases = res.mergeBases return res @@ -619,11 +606,7 @@ func (suite *BackupBasesUnitSuite) TestFixupAndVerify() { res := validMail1() res.mergeBases[0].Reasons = append( res.mergeBases[0].Reasons, - Reason{ - ResourceOwner: ro, - Service: path.ExchangeService, - Category: path.ContactsCategory, - }) + NewReason("", ro, path.ExchangeService, path.ContactsCategory)) res.assistBases = res.mergeBases return res diff --git a/src/internal/kopia/base_finder.go b/src/internal/kopia/base_finder.go index 9ac651512..00561c833 100644 --- a/src/internal/kopia/base_finder.go +++ b/src/internal/kopia/base_finder.go @@ -29,39 +29,94 @@ const ( userTagPrefix = "tag:" ) -type Reason struct { - ResourceOwner string - Service path.ServiceType - Category path.CategoryType +// TODO(ashmrtn): Move this into some inject package. Here to avoid import +// cycles. +type Reasoner interface { + Tenant() string + ProtectedResource() string + Service() path.ServiceType + Category() path.CategoryType + // SubtreePath returns the path prefix for data in existing backups that have + // parameters (tenant, protected resourced, etc) that match this Reasoner. + SubtreePath() (path.Path, error) } -func (r Reason) TagKeys() []string { - return []string{ - r.ResourceOwner, - serviceCatString(r.Service, r.Category), +func NewReason( + tenant, resource string, + service path.ServiceType, + category path.CategoryType, +) Reasoner { + return reason{ + tenant: tenant, + resource: resource, + service: service, + category: category, } } -// Key is the concatenation of the ResourceOwner, Service, and Category. -func (r Reason) Key() string { - return r.ResourceOwner + r.Service.String() + r.Category.String() +type reason struct { + // tenant appears here so that when this is moved to an inject package nothing + // needs changed. However, kopia itself is blind to the fields in the reason + // struct and relies on helper functions to get the information it needs. + tenant string + resource string + service path.ServiceType + category path.CategoryType +} + +func (r reason) Tenant() string { + return r.tenant +} + +func (r reason) ProtectedResource() string { + return r.resource +} + +func (r reason) Service() path.ServiceType { + return r.service +} + +func (r reason) Category() path.CategoryType { + return r.category +} + +func (r reason) SubtreePath() (path.Path, error) { + p, err := path.ServicePrefix( + r.Tenant(), + r.ProtectedResource(), + r.Service(), + r.Category()) + + return p, clues.Wrap(err, "building path").OrNil() +} + +func tagKeys(r Reasoner) []string { + return []string{ + r.ProtectedResource(), + serviceCatString(r.Service(), r.Category()), + } +} + +// reasonKey returns the concatenation of the ProtectedResource, Service, and Category. +func reasonKey(r Reasoner) string { + return r.ProtectedResource() + r.Service().String() + r.Category().String() } type BackupEntry struct { *backup.Backup - Reasons []Reason + Reasons []Reasoner } type ManifestEntry struct { *snapshot.Manifest - // Reason contains the ResourceOwners and Service/Categories that caused this + // Reasons contains the ResourceOwners and Service/Categories that caused this // snapshot to be selected as a base. We can't reuse OwnersCats here because // it's possible some ResourceOwners will have a subset of the Categories as // the reason for selecting a snapshot. For example: // 1. backup user1 email,contacts -> B1 // 2. backup user1 contacts -> B2 (uses B1 as base) // 3. backup user1 email,contacts,events (uses B1 for email, B2 for contacts) - Reasons []Reason + Reasons []Reasoner } func (me ManifestEntry) GetTag(key string) (string, bool) { @@ -157,7 +212,7 @@ func (b *baseFinder) getBackupModel( // most recent complete backup as the base. func (b *baseFinder) findBasesInSet( ctx context.Context, - reason Reason, + reason Reasoner, metas []*manifest.EntryMetadata, ) (*BackupEntry, *ManifestEntry, []ManifestEntry, error) { // Sort manifests by time so we can go through them sequentially. The code in @@ -190,7 +245,7 @@ func (b *baseFinder) findBasesInSet( kopiaAssistSnaps = append(kopiaAssistSnaps, ManifestEntry{ Manifest: man, - Reasons: []Reason{reason}, + Reasons: []Reasoner{reason}, }) logger.Ctx(ictx).Info("found incomplete backup") @@ -211,7 +266,7 @@ func (b *baseFinder) findBasesInSet( kopiaAssistSnaps = append(kopiaAssistSnaps, ManifestEntry{ Manifest: man, - Reasons: []Reason{reason}, + Reasons: []Reasoner{reason}, }) logger.Ctx(ictx).Info("found incomplete backup") @@ -235,7 +290,7 @@ func (b *baseFinder) findBasesInSet( kopiaAssistSnaps = append(kopiaAssistSnaps, ManifestEntry{ Manifest: man, - Reasons: []Reason{reason}, + Reasons: []Reasoner{reason}, }) logger.Ctx(ictx).Infow( @@ -253,13 +308,13 @@ func (b *baseFinder) findBasesInSet( me := ManifestEntry{ Manifest: man, - Reasons: []Reason{reason}, + Reasons: []Reasoner{reason}, } kopiaAssistSnaps = append(kopiaAssistSnaps, me) return &BackupEntry{ Backup: bup, - Reasons: []Reason{reason}, + Reasons: []Reasoner{reason}, }, &me, kopiaAssistSnaps, nil } @@ -270,12 +325,12 @@ func (b *baseFinder) findBasesInSet( func (b *baseFinder) getBase( ctx context.Context, - reason Reason, + r Reasoner, tags map[string]string, ) (*BackupEntry, *ManifestEntry, []ManifestEntry, error) { allTags := map[string]string{} - for _, k := range reason.TagKeys() { + for _, k := range tagKeys(r) { allTags[k] = "" } @@ -292,12 +347,12 @@ func (b *baseFinder) getBase( return nil, nil, nil, nil } - return b.findBasesInSet(ctx, reason, metas) + return b.findBasesInSet(ctx, r, metas) } func (b *baseFinder) FindBases( ctx context.Context, - reasons []Reason, + reasons []Reasoner, tags map[string]string, ) BackupBases { var ( @@ -310,14 +365,14 @@ func (b *baseFinder) FindBases( kopiaAssistSnaps = map[manifest.ID]ManifestEntry{} ) - for _, reason := range reasons { + for _, searchReason := range reasons { ictx := clues.Add( ctx, - "search_service", reason.Service.String(), - "search_category", reason.Category.String()) + "search_service", searchReason.Service().String(), + "search_category", searchReason.Category().String()) logger.Ctx(ictx).Info("searching for previous manifests") - baseBackup, baseSnap, assistSnaps, err := b.getBase(ictx, reason, tags) + baseBackup, baseSnap, assistSnaps, err := b.getBase(ictx, searchReason, tags) if err != nil { logger.Ctx(ctx).Info( "getting base, falling back to full backup for reason", diff --git a/src/internal/kopia/base_finder_test.go b/src/internal/kopia/base_finder_test.go index f76b3c81a..cb3239ca1 100644 --- a/src/internal/kopia/base_finder_test.go +++ b/src/internal/kopia/base_finder_test.go @@ -39,61 +39,24 @@ var ( testUser2 = "user2" testUser3 = "user3" - testAllUsersAllCats = []Reason{ - { - ResourceOwner: testUser1, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, - { - ResourceOwner: testUser1, - Service: path.ExchangeService, - Category: path.EventsCategory, - }, - { - ResourceOwner: testUser2, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, - { - ResourceOwner: testUser2, - Service: path.ExchangeService, - Category: path.EventsCategory, - }, - { - ResourceOwner: testUser3, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, - { - ResourceOwner: testUser3, - Service: path.ExchangeService, - Category: path.EventsCategory, - }, + testAllUsersAllCats = []Reasoner{ + // User1 email and events. + NewReason("", testUser1, path.ExchangeService, path.EmailCategory), + NewReason("", testUser1, path.ExchangeService, path.EventsCategory), + // User2 email and events. + NewReason("", testUser2, path.ExchangeService, path.EmailCategory), + NewReason("", testUser2, path.ExchangeService, path.EventsCategory), + // User3 email and events. + NewReason("", testUser3, path.ExchangeService, path.EmailCategory), + NewReason("", testUser3, path.ExchangeService, path.EventsCategory), } - testAllUsersMail = []Reason{ - { - ResourceOwner: testUser1, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, - { - ResourceOwner: testUser2, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, - { - ResourceOwner: testUser3, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, + testAllUsersMail = []Reasoner{ + NewReason("", testUser1, path.ExchangeService, path.EmailCategory), + NewReason("", testUser2, path.ExchangeService, path.EmailCategory), + NewReason("", testUser3, path.ExchangeService, path.EmailCategory), } - testUser1Mail = []Reason{ - { - ResourceOwner: testUser1, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, + testUser1Mail = []Reasoner{ + NewReason("", testUser1, path.ExchangeService, path.EmailCategory), } ) @@ -322,12 +285,8 @@ func (suite *BaseFinderUnitSuite) TestNoResult_NoBackupsOrSnapshots() { sm: mockEmptySnapshotManager{}, bg: mockEmptyModelGetter{}, } - reasons := []Reason{ - { - ResourceOwner: "a-user", - Service: path.ExchangeService, - Category: path.EmailCategory, - }, + reasons := []Reasoner{ + NewReason("", "a-user", path.ExchangeService, path.EmailCategory), } bb := bf.FindBases(ctx, reasons, nil) @@ -345,12 +304,8 @@ func (suite *BaseFinderUnitSuite) TestNoResult_ErrorListingSnapshots() { sm: &mockSnapshotManager{findErr: assert.AnError}, bg: mockEmptyModelGetter{}, } - reasons := []Reason{ - { - ResourceOwner: "a-user", - Service: path.ExchangeService, - Category: path.EmailCategory, - }, + reasons := []Reasoner{ + NewReason("", "a-user", path.ExchangeService, path.EmailCategory), } bb := bf.FindBases(ctx, reasons, nil) @@ -361,14 +316,14 @@ func (suite *BaseFinderUnitSuite) TestNoResult_ErrorListingSnapshots() { func (suite *BaseFinderUnitSuite) TestGetBases() { table := []struct { name string - input []Reason + input []Reasoner manifestData []manifestInfo // Use this to denote the Reasons a base backup or base manifest is // selected. The int maps to the index of the backup or manifest in data. - expectedBaseReasons map[int][]Reason + expectedBaseReasons map[int][]Reasoner // Use this to denote the Reasons a kopia assised incrementals manifest is // selected. The int maps to the index of the manifest in data. - expectedAssistManifestReasons map[int][]Reason + expectedAssistManifestReasons map[int][]Reasoner backupData []backupInfo }{ { @@ -394,10 +349,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() { testUser1, ), }, - expectedBaseReasons: map[int][]Reason{ + expectedBaseReasons: map[int][]Reasoner{ 1: testUser1Mail, }, - expectedAssistManifestReasons: map[int][]Reason{ + expectedAssistManifestReasons: map[int][]Reasoner{ 1: testUser1Mail, }, backupData: []backupInfo{ @@ -428,10 +383,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() { testUser1, ), }, - expectedBaseReasons: map[int][]Reason{ + expectedBaseReasons: map[int][]Reasoner{ 1: testUser1Mail, }, - expectedAssistManifestReasons: map[int][]Reason{ + expectedAssistManifestReasons: map[int][]Reasoner{ 0: testUser1Mail, 1: testUser1Mail, }, @@ -463,10 +418,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() { testUser1, ), }, - expectedBaseReasons: map[int][]Reason{ + expectedBaseReasons: map[int][]Reasoner{ 1: testUser1Mail, }, - expectedAssistManifestReasons: map[int][]Reason{ + expectedAssistManifestReasons: map[int][]Reasoner{ 0: testUser1Mail, 1: testUser1Mail, }, @@ -492,10 +447,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() { testUser3, ), }, - expectedBaseReasons: map[int][]Reason{ + expectedBaseReasons: map[int][]Reasoner{ 0: testUser1Mail, }, - expectedAssistManifestReasons: map[int][]Reason{ + expectedAssistManifestReasons: map[int][]Reasoner{ 0: testUser1Mail, }, backupData: []backupInfo{ @@ -519,10 +474,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() { testUser3, ), }, - expectedBaseReasons: map[int][]Reason{ + expectedBaseReasons: map[int][]Reasoner{ 0: testAllUsersAllCats, }, - expectedAssistManifestReasons: map[int][]Reason{ + expectedAssistManifestReasons: map[int][]Reasoner{ 0: testAllUsersAllCats, }, backupData: []backupInfo{ @@ -557,76 +512,28 @@ func (suite *BaseFinderUnitSuite) TestGetBases() { testUser3, ), }, - expectedBaseReasons: map[int][]Reason{ + expectedBaseReasons: map[int][]Reasoner{ 0: { - { - ResourceOwner: testUser1, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, - { - ResourceOwner: testUser2, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, - { - ResourceOwner: testUser3, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, + NewReason("", testUser1, path.ExchangeService, path.EmailCategory), + NewReason("", testUser2, path.ExchangeService, path.EmailCategory), + NewReason("", testUser3, path.ExchangeService, path.EmailCategory), }, 1: { - Reason{ - ResourceOwner: testUser1, - Service: path.ExchangeService, - Category: path.EventsCategory, - }, - Reason{ - ResourceOwner: testUser2, - Service: path.ExchangeService, - Category: path.EventsCategory, - }, - Reason{ - ResourceOwner: testUser3, - Service: path.ExchangeService, - Category: path.EventsCategory, - }, + NewReason("", testUser1, path.ExchangeService, path.EventsCategory), + NewReason("", testUser2, path.ExchangeService, path.EventsCategory), + NewReason("", testUser3, path.ExchangeService, path.EventsCategory), }, }, - expectedAssistManifestReasons: map[int][]Reason{ + expectedAssistManifestReasons: map[int][]Reasoner{ 0: { - { - ResourceOwner: testUser1, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, - { - ResourceOwner: testUser2, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, - { - ResourceOwner: testUser3, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, + NewReason("", testUser1, path.ExchangeService, path.EmailCategory), + NewReason("", testUser2, path.ExchangeService, path.EmailCategory), + NewReason("", testUser3, path.ExchangeService, path.EmailCategory), }, 1: { - Reason{ - ResourceOwner: testUser1, - Service: path.ExchangeService, - Category: path.EventsCategory, - }, - Reason{ - ResourceOwner: testUser2, - Service: path.ExchangeService, - Category: path.EventsCategory, - }, - Reason{ - ResourceOwner: testUser3, - Service: path.ExchangeService, - Category: path.EventsCategory, - }, + NewReason("", testUser1, path.ExchangeService, path.EventsCategory), + NewReason("", testUser2, path.ExchangeService, path.EventsCategory), + NewReason("", testUser3, path.ExchangeService, path.EventsCategory), }, }, backupData: []backupInfo{ @@ -657,10 +564,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() { testUser1, ), }, - expectedBaseReasons: map[int][]Reason{ + expectedBaseReasons: map[int][]Reasoner{ 0: testUser1Mail, }, - expectedAssistManifestReasons: map[int][]Reason{ + expectedAssistManifestReasons: map[int][]Reasoner{ 0: testUser1Mail, 1: testUser1Mail, }, @@ -693,10 +600,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() { testUser1, ), }, - expectedBaseReasons: map[int][]Reason{ + expectedBaseReasons: map[int][]Reasoner{ 1: testUser1Mail, }, - expectedAssistManifestReasons: map[int][]Reason{ + expectedAssistManifestReasons: map[int][]Reasoner{ 1: testUser1Mail, }, backupData: []backupInfo{ @@ -728,8 +635,8 @@ func (suite *BaseFinderUnitSuite) TestGetBases() { testUser1, ), }, - expectedBaseReasons: map[int][]Reason{}, - expectedAssistManifestReasons: map[int][]Reason{ + expectedBaseReasons: map[int][]Reasoner{}, + expectedAssistManifestReasons: map[int][]Reasoner{ 1: testUser1Mail, }, backupData: []backupInfo{ @@ -752,10 +659,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() { testUser1, ), }, - expectedBaseReasons: map[int][]Reason{ + expectedBaseReasons: map[int][]Reasoner{ 0: testUser1Mail, }, - expectedAssistManifestReasons: map[int][]Reason{ + expectedAssistManifestReasons: map[int][]Reasoner{ 0: testUser1Mail, }, backupData: []backupInfo{ @@ -787,10 +694,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() { testUser1, ), }, - expectedBaseReasons: map[int][]Reason{ + expectedBaseReasons: map[int][]Reasoner{ 0: testUser1Mail, }, - expectedAssistManifestReasons: map[int][]Reason{ + expectedAssistManifestReasons: map[int][]Reasoner{ 0: testUser1Mail, }, backupData: []backupInfo{ @@ -857,17 +764,17 @@ func (suite *BaseFinderUnitSuite) TestFindBases_CustomTags() { table := []struct { name string - input []Reason + input []Reasoner tags map[string]string // Use this to denote which manifests in data should be expected. Allows // defining data in a table while not repeating things between data and // expected. - expectedIdxs map[int][]Reason + expectedIdxs map[int][]Reasoner }{ { name: "no tags specified", tags: nil, - expectedIdxs: map[int][]Reason{ + expectedIdxs: map[int][]Reasoner{ 0: testUser1Mail, }, }, @@ -877,14 +784,14 @@ func (suite *BaseFinderUnitSuite) TestFindBases_CustomTags() { "fnords": "", "smarf": "", }, - expectedIdxs: map[int][]Reason{ + expectedIdxs: map[int][]Reasoner{ 0: testUser1Mail, }, }, { name: "subset of custom tags", tags: map[string]string{"fnords": ""}, - expectedIdxs: map[int][]Reason{ + expectedIdxs: map[int][]Reasoner{ 0: testUser1Mail, }, }, @@ -925,7 +832,7 @@ func checkManifestEntriesMatch( t *testing.T, retSnaps []ManifestEntry, allExpected []manifestInfo, - expectedIdxsAndReasons map[int][]Reason, + expectedIdxsAndReasons map[int][]Reasoner, ) { // Check the proper snapshot manifests were returned. expected := make([]*snapshot.Manifest, 0, len(expectedIdxsAndReasons)) @@ -941,7 +848,7 @@ func checkManifestEntriesMatch( assert.ElementsMatch(t, expected, got) // Check the reasons for selecting each manifest are correct. - expectedReasons := make(map[manifest.ID][]Reason, len(expectedIdxsAndReasons)) + expectedReasons := make(map[manifest.ID][]Reasoner, len(expectedIdxsAndReasons)) for idx, reasons := range expectedIdxsAndReasons { expectedReasons[allExpected[idx].man.ID] = reasons } @@ -967,7 +874,7 @@ func checkBackupEntriesMatch( t *testing.T, retBups []BackupEntry, allExpected []backupInfo, - expectedIdxsAndReasons map[int][]Reason, + expectedIdxsAndReasons map[int][]Reasoner, ) { // Check the proper snapshot manifests were returned. expected := make([]*backup.Backup, 0, len(expectedIdxsAndReasons)) @@ -983,7 +890,7 @@ func checkBackupEntriesMatch( assert.ElementsMatch(t, expected, got) // Check the reasons for selecting each manifest are correct. - expectedReasons := make(map[model.StableID][]Reason, len(expectedIdxsAndReasons)) + expectedReasons := make(map[model.StableID][]Reasoner, len(expectedIdxsAndReasons)) for idx, reasons := range expectedIdxsAndReasons { expectedReasons[allExpected[idx].b.ID] = reasons } diff --git a/src/internal/kopia/conn.go b/src/internal/kopia/conn.go index d28001f3f..e9d20918a 100644 --- a/src/internal/kopia/conn.go +++ b/src/internal/kopia/conn.go @@ -12,12 +12,16 @@ import ( "github.com/kopia/kopia/repo/blob" "github.com/kopia/kopia/repo/compression" "github.com/kopia/kopia/repo/content" + "github.com/kopia/kopia/repo/format" + "github.com/kopia/kopia/repo/maintenance" "github.com/kopia/kopia/repo/manifest" "github.com/kopia/kopia/snapshot" "github.com/kopia/kopia/snapshot/policy" "github.com/kopia/kopia/snapshot/snapshotfs" "github.com/pkg/errors" + "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/internal/kopia/retention" "github.com/alcionai/corso/src/pkg/control/repository" "github.com/alcionai/corso/src/pkg/storage" ) @@ -326,12 +330,12 @@ func updateCompressionOnPolicy(compressor string, p *policy.Policy) (bool, error return true, nil } -func updateRetentionOnPolicy(retention policy.RetentionPolicy, p *policy.Policy) bool { - if retention == p.RetentionPolicy { +func updateRetentionOnPolicy(retPolicy policy.RetentionPolicy, p *policy.Policy) bool { + if retPolicy == p.RetentionPolicy { return false } - p.RetentionPolicy = retention + p.RetentionPolicy = retPolicy return true } @@ -410,6 +414,118 @@ func checkCompressor(compressor compression.Name) error { return clues.Stack(clues.New("unknown compressor type"), clues.New(string(compressor))) } +func (w *conn) setRetentionParameters( + ctx context.Context, + rrOpts repository.Retention, +) error { + if rrOpts.Mode == nil && rrOpts.Duration == nil && rrOpts.Extend == nil { + return nil + } + + // Somewhat confusing case, when we have no retention but a non-zero duration + // it acts like we passed in only the duration and returns an error about + // having to set both. Return a clearer error here instead. + if ptr.Val(rrOpts.Mode) == repository.NoRetention && ptr.Val(rrOpts.Duration) != 0 { + return clues.New("duration must be 0 if rrOpts is disabled").WithClues(ctx) + } + + dr, ok := w.Repository.(repo.DirectRepository) + if !ok { + return clues.New("getting handle to repo").WithClues(ctx) + } + + blobCfg, params, err := getRetentionConfigs(ctx, dr) + if err != nil { + return clues.Stack(err) + } + + opts := retention.OptsFromConfigs(*blobCfg, *params) + if err := opts.Set(rrOpts); err != nil { + return clues.Stack(err).WithClues(ctx) + } + + return clues.Stack(persistRetentionConfigs(ctx, dr, opts)).OrNil() +} + +func getRetentionConfigs( + ctx context.Context, + dr repo.DirectRepository, +) (*format.BlobStorageConfiguration, *maintenance.Params, error) { + blobCfg, err := dr.FormatManager().BlobCfgBlob() + if err != nil { + return nil, nil, clues.Wrap(err, "getting storage config").WithClues(ctx) + } + + params, err := maintenance.GetParams(ctx, dr) + if err != nil { + return nil, nil, clues.Wrap(err, "getting maintenance config").WithClues(ctx) + } + + return &blobCfg, params, nil +} + +func persistRetentionConfigs( + ctx context.Context, + dr repo.DirectRepository, + opts *retention.Opts, +) error { + // Persist changes. + if !opts.BlobChanged() && !opts.ParamsChanged() { + return nil + } + + blobCfg, params, err := opts.AsConfigs(ctx) + if err != nil { + return clues.Stack(err) + } + + mp, err := dr.FormatManager().GetMutableParameters() + if err != nil { + return clues.Wrap(err, "getting mutable parameters").WithClues(ctx) + } + + requiredFeatures, err := dr.FormatManager().RequiredFeatures() + if err != nil { + return clues.Wrap(err, "getting required features").WithClues(ctx) + } + + // Must be the case that only blob changed. + if !opts.ParamsChanged() { + return clues.Wrap( + dr.FormatManager().SetParameters(ctx, mp, blobCfg, requiredFeatures), + "persisting storage config", + ).WithClues(ctx).OrNil() + } + + // Both blob and maintenance changed. A DirectWriteSession is required to + // update the maintenance config but not the blob config. + err = repo.DirectWriteSession( + ctx, + dr, + repo.WriteSessionOptions{ + Purpose: "Corso immutable backups config", + }, + func(ctx context.Context, dw repo.DirectRepositoryWriter) error { + // Set the maintenance config first as we can bail out of the write + // session later. + if err := maintenance.SetParams(ctx, dw, ¶ms); err != nil { + return clues.Wrap(err, "maintenance config"). + WithClues(ctx) + } + + if !opts.BlobChanged() { + return nil + } + + return clues.Wrap( + dr.FormatManager().SetParameters(ctx, mp, blobCfg, requiredFeatures), + "storage config", + ).WithClues(ctx).OrNil() + }) + + return clues.Wrap(err, "persisting config changes").WithClues(ctx).OrNil() +} + func (w *conn) LoadSnapshot( ctx context.Context, id manifest.ID, diff --git a/src/internal/kopia/inject/inject.go b/src/internal/kopia/inject/inject.go index 6921c353d..5d8dd3bc7 100644 --- a/src/internal/kopia/inject/inject.go +++ b/src/internal/kopia/inject/inject.go @@ -15,7 +15,8 @@ type ( BackupConsumer interface { ConsumeBackupCollections( ctx context.Context, - bases []kopia.IncrementalBase, + backupReasons []kopia.Reasoner, + bases kopia.BackupBases, cs []data.BackupCollection, pmr prefixmatcher.StringSetReader, tags map[string]string, @@ -37,7 +38,7 @@ type ( BaseFinder interface { FindBases( ctx context.Context, - reasons []kopia.Reason, + reasons []kopia.Reasoner, tags map[string]string, ) kopia.BackupBases } diff --git a/src/internal/kopia/merge_collection.go b/src/internal/kopia/merge_collection.go index ff32c4e73..25897fd58 100644 --- a/src/internal/kopia/merge_collection.go +++ b/src/internal/kopia/merge_collection.go @@ -70,7 +70,9 @@ func (mc *mergeCollection) Items( for _, c := range mc.cols { // Unfortunately doesn't seem to be a way right now to see if the // iteration failed and we should be exiting early. - ictx := clues.Add(ctx, "merged_collection_storage_path", c.storagePath) + ictx := clues.Add( + ctx, + "merged_collection_storage_path", path.LoggableDir(c.storagePath)) logger.Ctx(ictx).Debug("sending items from merged collection") for item := range c.Items(ictx, errs) { @@ -95,7 +97,9 @@ func (mc *mergeCollection) FetchItemByName( "merged_collection_count", len(mc.cols)) for _, c := range mc.cols { - ictx := clues.Add(ctx, "merged_collection_storage_path", c.storagePath) + ictx := clues.Add( + ctx, + "merged_collection_storage_path", path.LoggableDir(c.storagePath)) logger.Ctx(ictx).Debug("looking for item in merged collection") diff --git a/src/internal/kopia/retention/opts.go b/src/internal/kopia/retention/opts.go new file mode 100644 index 000000000..b63a6a6a3 --- /dev/null +++ b/src/internal/kopia/retention/opts.go @@ -0,0 +1,139 @@ +package retention + +import ( + "context" + "time" + + "github.com/alcionai/clues" + "github.com/kopia/kopia/repo/blob" + "github.com/kopia/kopia/repo/format" + "github.com/kopia/kopia/repo/maintenance" + + "github.com/alcionai/corso/src/pkg/control/repository" +) + +type Opts struct { + blobCfg format.BlobStorageConfiguration + params maintenance.Params + + blobChanged bool + paramsChanged bool +} + +func NewOpts() *Opts { + return &Opts{} +} + +func OptsFromConfigs( + blobCfg format.BlobStorageConfiguration, + params maintenance.Params, +) *Opts { + return &Opts{ + blobCfg: blobCfg, + params: params, + } +} + +func (r *Opts) AsConfigs( + ctx context.Context, +) (format.BlobStorageConfiguration, maintenance.Params, error) { + // Check the new config is valid. + if r.blobCfg.IsRetentionEnabled() { + if err := maintenance.CheckExtendRetention(ctx, r.blobCfg, &r.params); err != nil { + return format.BlobStorageConfiguration{}, maintenance.Params{}, clues.Wrap( + err, + "invalid retention config", + ).WithClues(ctx) + } + } + + return r.blobCfg, r.params, nil +} + +func (r *Opts) BlobChanged() bool { + return r.blobChanged +} + +func (r *Opts) ParamsChanged() bool { + return r.paramsChanged +} + +func (r *Opts) Set(opts repository.Retention) error { + r.setMaintenanceParams(opts.Extend) + + return clues.Wrap( + r.setBlobConfigParams(opts.Mode, opts.Duration), + "setting mode or duration", + ).OrNil() +} + +func (r *Opts) setMaintenanceParams(extend *bool) { + if extend != nil && r.params.ExtendObjectLocks != *extend { + r.params.ExtendObjectLocks = *extend + r.paramsChanged = true + } +} + +func (r *Opts) setBlobConfigParams( + mode *repository.RetentionMode, + duration *time.Duration, +) error { + err := r.setBlobConfigMode(mode) + if err != nil { + return clues.Stack(err) + } + + r.setBlobConfigDuration(duration) + + return nil +} + +func (r *Opts) setBlobConfigDuration(duration *time.Duration) { + if duration != nil && r.blobCfg.RetentionPeriod != *duration { + r.blobCfg.RetentionPeriod = *duration + r.blobChanged = true + } +} + +func (r *Opts) setBlobConfigMode( + mode *repository.RetentionMode, +) error { + if mode == nil { + return nil + } + + startMode := r.blobCfg.RetentionMode + + switch *mode { + case repository.NoRetention: + if !r.blobCfg.IsRetentionEnabled() { + return nil + } + + r.blobCfg.RetentionMode = "" + r.blobCfg.RetentionPeriod = 0 + + case repository.GovernanceRetention: + r.blobCfg.RetentionMode = blob.Governance + + case repository.ComplianceRetention: + r.blobCfg.RetentionMode = blob.Compliance + + default: + return clues.New("unknown retention mode"). + With("provided_retention_mode", mode.String()) + } + + // Only check if the retention mode is not empty. IsValid errors out if it's + // empty. + if len(r.blobCfg.RetentionMode) > 0 && !r.blobCfg.RetentionMode.IsValid() { + return clues.New("invalid retention mode"). + With("retention_mode", r.blobCfg.RetentionMode) + } + + // Take into account previous operations on r that could have already updated + // blobChanged. + r.blobChanged = r.blobChanged || startMode != r.blobCfg.RetentionMode + + return nil +} diff --git a/src/internal/kopia/retention/opts_test.go b/src/internal/kopia/retention/opts_test.go new file mode 100644 index 000000000..8b250c79a --- /dev/null +++ b/src/internal/kopia/retention/opts_test.go @@ -0,0 +1,204 @@ +package retention_test + +import ( + "testing" + "time" + + "github.com/alcionai/clues" + "github.com/kopia/kopia/repo/blob" + "github.com/kopia/kopia/repo/format" + "github.com/kopia/kopia/repo/maintenance" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/internal/kopia/retention" + "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/pkg/control/repository" +) + +type OptsUnitSuite struct { + tester.Suite +} + +func TestOptsUnitSuite(t *testing.T) { + suite.Run(t, &OptsUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *OptsUnitSuite) TestOptsFromConfigs() { + var ( + t = suite.T() + + mode = blob.Governance + duration = time.Hour * 48 + extend = true + + blobCfgInput = format.BlobStorageConfiguration{ + RetentionMode: mode, + RetentionPeriod: duration, + } + paramsInput = maintenance.Params{ExtendObjectLocks: extend} + ) + + ctx, flush := tester.NewContext(t) + defer flush() + + opts := retention.OptsFromConfigs(blobCfgInput, paramsInput) + + assert.False(t, opts.BlobChanged(), "BlobChanged") + assert.False(t, opts.ParamsChanged(), "ParamsChanged") + + blobCfg, params, err := opts.AsConfigs(ctx) + require.NoError(t, err, "AsConfigs: %v", clues.ToCore(err)) + assert.Equal(t, blobCfgInput, blobCfg) + assert.Equal(t, paramsInput, params) +} + +func (suite *OptsUnitSuite) TestSet() { + var ( + kopiaMode = blob.Governance + mode = repository.GovernanceRetention + duration = time.Hour * 48 + ) + + table := []struct { + name string + inputBlob format.BlobStorageConfiguration + inputParams maintenance.Params + ctrlOpts repository.Retention + setErr require.ErrorAssertionFunc + expectMode blob.RetentionMode + expectDuration time.Duration + expectExtend bool + expectBlobChanged bool + expectParamsChanged bool + }{ + { + name: "All Nils", + setErr: require.NoError, + }, + { + name: "All Off", + ctrlOpts: repository.Retention{ + Mode: ptr.To(repository.NoRetention), + Duration: ptr.To(time.Duration(0)), + Extend: ptr.To(false), + }, + setErr: require.NoError, + }, + { + name: "UnknownRetention", + ctrlOpts: repository.Retention{ + Mode: ptr.To(repository.UnknownRetention), + Duration: ptr.To(duration), + }, + setErr: require.Error, + }, + { + name: "Invalid Retention Mode", + ctrlOpts: repository.Retention{ + Mode: ptr.To(repository.RetentionMode(-1)), + Duration: ptr.To(duration), + }, + setErr: require.Error, + }, + { + name: "Valid Set All", + ctrlOpts: repository.Retention{ + Mode: ptr.To(mode), + Duration: ptr.To(duration), + Extend: ptr.To(true), + }, + setErr: require.NoError, + expectMode: kopiaMode, + expectDuration: duration, + expectExtend: true, + expectBlobChanged: true, + expectParamsChanged: true, + }, + { + name: "Valid Set BlobConfig", + ctrlOpts: repository.Retention{ + Mode: ptr.To(mode), + Duration: ptr.To(duration), + }, + setErr: require.NoError, + expectMode: kopiaMode, + expectDuration: duration, + expectBlobChanged: true, + }, + { + name: "Valid Set Params", + ctrlOpts: repository.Retention{ + Extend: ptr.To(true), + }, + setErr: require.NoError, + expectExtend: true, + expectParamsChanged: true, + }, + { + name: "Partial BlobConfig Change", + inputBlob: format.BlobStorageConfiguration{ + RetentionMode: kopiaMode, + RetentionPeriod: duration, + }, + ctrlOpts: repository.Retention{ + Duration: ptr.To(duration + time.Hour), + }, + setErr: require.NoError, + expectMode: kopiaMode, + expectDuration: duration + time.Hour, + expectBlobChanged: true, + }, + { + name: "No BlobConfig Change", + inputBlob: format.BlobStorageConfiguration{ + RetentionMode: kopiaMode, + RetentionPeriod: duration, + }, + ctrlOpts: repository.Retention{ + Mode: ptr.To(mode), + Duration: ptr.To(duration), + }, + setErr: require.NoError, + expectMode: kopiaMode, + expectDuration: duration, + }, + { + name: "No Params Change", + inputParams: maintenance.Params{ExtendObjectLocks: true}, + ctrlOpts: repository.Retention{ + Extend: ptr.To(true), + }, + setErr: require.NoError, + expectExtend: true, + }, + } + + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + opts := retention.OptsFromConfigs(test.inputBlob, test.inputParams) + err := opts.Set(test.ctrlOpts) + test.setErr(t, err, "setting params: %v", clues.ToCore(err)) + + if err != nil { + return + } + + blobCfg, params, err := opts.AsConfigs(ctx) + require.NoError(t, err, "getting configs: %v", clues.ToCore(err)) + + assert.Equal(t, test.expectMode, blobCfg.RetentionMode, "mode") + assert.Equal(t, test.expectDuration, blobCfg.RetentionPeriod, "duration") + assert.Equal(t, test.expectExtend, params.ExtendObjectLocks, "extend locks") + assert.Equal(t, test.expectBlobChanged, opts.BlobChanged(), "blob changed") + assert.Equal(t, test.expectParamsChanged, opts.ParamsChanged(), "params changed") + }) + } +} diff --git a/src/internal/kopia/upload.go b/src/internal/kopia/upload.go index 8a91367c6..8be75009f 100644 --- a/src/internal/kopia/upload.go +++ b/src/internal/kopia/upload.go @@ -20,6 +20,7 @@ import ( "github.com/kopia/kopia/fs/virtualfs" "github.com/kopia/kopia/repo/manifest" "github.com/kopia/kopia/snapshot/snapshotfs" + "golang.org/x/exp/maps" "github.com/alcionai/corso/src/internal/common/prefixmatcher" "github.com/alcionai/corso/src/internal/data" @@ -970,10 +971,32 @@ func traverseBaseDir( return nil } +func logBaseInfo(ctx context.Context, m ManifestEntry) { + svcs := map[string]struct{}{} + cats := map[string]struct{}{} + + for _, r := range m.Reasons { + svcs[r.Service().String()] = struct{}{} + cats[r.Category().String()] = struct{}{} + } + + mbID, _ := m.GetTag(TagBackupID) + if len(mbID) == 0 { + mbID = "no_backup_id_tag" + } + + logger.Ctx(ctx).Infow( + "using base for backup", + "base_snapshot_id", m.ID, + "services", maps.Keys(svcs), + "categories", maps.Keys(cats), + "base_backup_id", mbID) +} + func inflateBaseTree( ctx context.Context, loader snapshotLoader, - snap IncrementalBase, + snap ManifestEntry, updatedPaths map[string]path.Path, roots map[string]*treeMap, ) error { @@ -996,13 +1019,25 @@ func inflateBaseTree( return clues.New("snapshot root is not a directory").WithClues(ctx) } + // Some logging to help track things. + logBaseInfo(ctx, snap) + // For each subtree corresponding to the tuple // (resource owner, service, category) merge the directories in the base with // what has been reported in the collections we got. - for _, subtreePath := range snap.SubtreePaths { + for _, r := range snap.Reasons { + ictx := clues.Add( + ctx, + "subtree_service", r.Service().String(), + "subtree_category", r.Category().String()) + + subtreePath, err := r.SubtreePath() + if err != nil { + return clues.Wrap(err, "building subtree path").WithClues(ictx) + } + // We're starting from the root directory so don't need it in the path. pathElems := encodeElements(subtreePath.PopFront().Elements()...) - ictx := clues.Add(ctx, "subtree_path", subtreePath) ent, err := snapshotfs.GetNestedEntry(ictx, dir, pathElems) if err != nil { @@ -1022,7 +1057,7 @@ func inflateBaseTree( // This ensures that a migration on the directory prefix can complete. // The prefix is the tenant/service/owner/category set, which remains // otherwise unchecked in tree inflation below this point. - newSubtreePath := subtreePath + newSubtreePath := subtreePath.ToBuilder() if p, ok := updatedPaths[subtreePath.String()]; ok { newSubtreePath = p.ToBuilder() } @@ -1031,7 +1066,7 @@ func inflateBaseTree( ictx, 0, updatedPaths, - subtreePath.Dir(), + subtreePath.ToBuilder().Dir(), newSubtreePath.Dir(), subtreeDir, roots, @@ -1059,7 +1094,7 @@ func inflateBaseTree( func inflateDirTree( ctx context.Context, loader snapshotLoader, - baseSnaps []IncrementalBase, + baseSnaps []ManifestEntry, collections []data.BackupCollection, globalExcludeSet prefixmatcher.StringSetReader, progress *corsoProgress, diff --git a/src/internal/kopia/upload_test.go b/src/internal/kopia/upload_test.go index 0ac10ec6b..bbdbe9e6f 100644 --- a/src/internal/kopia/upload_test.go +++ b/src/internal/kopia/upload_test.go @@ -946,21 +946,22 @@ func (msw *mockSnapshotWalker) SnapshotRoot(*snapshot.Manifest) (fs.Entry, error return msw.snapshotRoot, nil } -func mockIncrementalBase( +func makeManifestEntry( id, tenant, resourceOwner string, service path.ServiceType, categories ...path.CategoryType, -) IncrementalBase { - stps := []*path.Builder{} +) ManifestEntry { + var reasons []Reasoner + for _, c := range categories { - stps = append(stps, path.Builder{}.Append(tenant, service.String(), resourceOwner, c.String())) + reasons = append(reasons, NewReason(tenant, resourceOwner, service, c)) } - return IncrementalBase{ + return ManifestEntry{ Manifest: &snapshot.Manifest{ ID: manifest.ID(id), }, - SubtreePaths: stps, + Reasons: reasons, } } @@ -1331,8 +1332,8 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() { dirTree, err := inflateDirTree( ctx, msw, - []IncrementalBase{ - mockIncrementalBase("", testTenant, testUser, path.ExchangeService, path.EmailCategory), + []ManifestEntry{ + makeManifestEntry("", testTenant, testUser, path.ExchangeService, path.EmailCategory), }, test.inputCollections(), pmMock.NewPrefixMap(nil), @@ -2260,8 +2261,8 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto dirTree, err := inflateDirTree( ctx, msw, - []IncrementalBase{ - mockIncrementalBase("", testTenant, testUser, path.ExchangeService, path.EmailCategory), + []ManifestEntry{ + makeManifestEntry("", testTenant, testUser, path.ExchangeService, path.EmailCategory), }, test.inputCollections(t), ie, @@ -2425,8 +2426,8 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSkipsDeletedSubtre dirTree, err := inflateDirTree( ctx, msw, - []IncrementalBase{ - mockIncrementalBase("", testTenant, testUser, path.ExchangeService, path.EmailCategory), + []ManifestEntry{ + makeManifestEntry("", testTenant, testUser, path.ExchangeService, path.EmailCategory), }, collections, pmMock.NewPrefixMap(nil), @@ -2531,8 +2532,8 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_HandleEmptyBase() dirTree, err := inflateDirTree( ctx, msw, - []IncrementalBase{ - mockIncrementalBase("", testTenant, testUser, path.ExchangeService, path.EmailCategory), + []ManifestEntry{ + makeManifestEntry("", testTenant, testUser, path.ExchangeService, path.EmailCategory), }, collections, pmMock.NewPrefixMap(nil), @@ -2782,9 +2783,9 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsCorrectSubt dirTree, err := inflateDirTree( ctx, msw, - []IncrementalBase{ - mockIncrementalBase("id1", testTenant, testUser, path.ExchangeService, path.ContactsCategory), - mockIncrementalBase("id2", testTenant, testUser, path.ExchangeService, path.EmailCategory), + []ManifestEntry{ + makeManifestEntry("id1", testTenant, testUser, path.ExchangeService, path.ContactsCategory), + makeManifestEntry("id2", testTenant, testUser, path.ExchangeService, path.EmailCategory), }, collections, pmMock.NewPrefixMap(nil), @@ -2948,8 +2949,8 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsMigrateSubt dirTree, err := inflateDirTree( ctx, msw, - []IncrementalBase{ - mockIncrementalBase("id1", testTenant, testUser, path.ExchangeService, path.EmailCategory, path.ContactsCategory), + []ManifestEntry{ + makeManifestEntry("id1", testTenant, testUser, path.ExchangeService, path.EmailCategory, path.ContactsCategory), }, []data.BackupCollection{mce, mcc}, pmMock.NewPrefixMap(nil), diff --git a/src/internal/kopia/wrapper.go b/src/internal/kopia/wrapper.go index f65827f76..3963b30f6 100644 --- a/src/internal/kopia/wrapper.go +++ b/src/internal/kopia/wrapper.go @@ -4,22 +4,19 @@ import ( "context" "errors" "strings" - "time" "github.com/alcionai/clues" "github.com/kopia/kopia/fs" "github.com/kopia/kopia/repo" - "github.com/kopia/kopia/repo/blob" - "github.com/kopia/kopia/repo/format" "github.com/kopia/kopia/repo/maintenance" "github.com/kopia/kopia/repo/manifest" "github.com/kopia/kopia/snapshot" "github.com/kopia/kopia/snapshot/policy" "github.com/kopia/kopia/snapshot/snapshotfs" "github.com/kopia/kopia/snapshot/snapshotmaintenance" + "golang.org/x/exp/maps" "github.com/alcionai/corso/src/internal/common/prefixmatcher" - "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/diagnostics" "github.com/alcionai/corso/src/internal/observe" @@ -132,11 +129,6 @@ func (w *Wrapper) Close(ctx context.Context) error { return nil } -type IncrementalBase struct { - *snapshot.Manifest - SubtreePaths []*path.Builder -} - // ConsumeBackupCollections takes a set of collections and creates a kopia snapshot // with the data that they contain. previousSnapshots is used for incremental // backups and should represent the base snapshot from which metadata is sourced @@ -145,10 +137,11 @@ type IncrementalBase struct { // complete backup of all data. func (w Wrapper) ConsumeBackupCollections( ctx context.Context, - previousSnapshots []IncrementalBase, + backupReasons []Reasoner, + bases BackupBases, collections []data.BackupCollection, globalExcludeSet prefixmatcher.StringSetReader, - tags map[string]string, + additionalTags map[string]string, buildTreeWithBase bool, errs *fault.Bus, ) (*BackupStats, *details.Builder, DetailsMergeInfoer, error) { @@ -174,15 +167,23 @@ func (w Wrapper) ConsumeBackupCollections( // When running an incremental backup, we need to pass the prior // snapshot bases into inflateDirTree so that the new snapshot // includes historical data. - var base []IncrementalBase - if buildTreeWithBase { - base = previousSnapshots + var ( + mergeBase []ManifestEntry + assistBase []ManifestEntry + ) + + if bases != nil { + if buildTreeWithBase { + mergeBase = bases.MergeBases() + } + + assistBase = bases.AssistBases() } dirTree, err := inflateDirTree( ctx, w.c, - base, + mergeBase, collections, globalExcludeSet, progress) @@ -190,9 +191,22 @@ func (w Wrapper) ConsumeBackupCollections( return nil, nil, nil, clues.Wrap(err, "building kopia directories") } + // Add some extra tags so we can look things up by reason. + tags := maps.Clone(additionalTags) + if tags == nil { + // Some platforms seem to return nil if the input is nil. + tags = map[string]string{} + } + + for _, r := range backupReasons { + for _, k := range tagKeys(r) { + tags[k] = "" + } + } + s, err := w.makeSnapshotWithRoot( ctx, - previousSnapshots, + assistBase, dirTree, tags, progress) @@ -205,7 +219,7 @@ func (w Wrapper) ConsumeBackupCollections( func (w Wrapper) makeSnapshotWithRoot( ctx context.Context, - prevSnapEntries []IncrementalBase, + prevSnapEntries []ManifestEntry, root fs.Directory, addlTags map[string]string, progress *corsoProgress, @@ -225,8 +239,8 @@ func (w Wrapper) makeSnapshotWithRoot( ctx = clues.Add( ctx, - "len_prev_base_snapshots", len(prevSnapEntries), - "assist_snap_ids", snapIDs, + "num_assist_snapshots", len(prevSnapEntries), + "assist_snapshot_ids", snapIDs, "additional_tags", addlTags) if len(snapIDs) > 0 { @@ -722,202 +736,5 @@ func (w *Wrapper) SetRetentionParameters( ctx context.Context, retention repository.Retention, ) error { - if retention.Mode == nil && retention.Duration == nil && retention.Extend == nil { - return nil - } - - // Somewhat confusing case, when we have no retention but a non-zero duration - // it acts like we passed in only the duration and returns an error about - // having to set both. Return a clearer error here instead. Check if mode is - // set so we still allow changing duration if mode is already set. - if m, ok := ptr.ValOK(retention.Mode); ok && m == repository.NoRetention && ptr.Val(retention.Duration) != 0 { - return clues.New("duration must be 0 if retention is disabled").WithClues(ctx) - } - - dr, ok := w.c.Repository.(repo.DirectRepository) - if !ok { - return clues.New("getting handle to repo").WithClues(ctx) - } - - blobCfg, params, err := getRetentionConfigs(ctx, dr) - if err != nil { - return clues.Stack(err) - } - - // Update blob config information. - blobChanged, err := w.setBlobConfigParams(retention.Mode, retention.Duration, blobCfg) - if err != nil { - return clues.Wrap(err, "setting retention mode or duration").WithClues(ctx) - } - - // Update maintenance config information. - var maintenanceChanged bool - - if retention.Extend != nil && params.ExtendObjectLocks != *retention.Extend { - params.ExtendObjectLocks = *retention.Extend - maintenanceChanged = true - } - - // Check the new config is valid. - if blobCfg.IsRetentionEnabled() { - if err := maintenance.CheckExtendRetention(ctx, *blobCfg, params); err != nil { - return clues.Wrap(err, "invalid retention config").WithClues(ctx) - } - } - - return clues.Stack(persistRetentionConfigs( - ctx, - dr, - blobCfg, - blobChanged, - params, - maintenanceChanged, - )).OrNil() -} - -func getRetentionConfigs( - ctx context.Context, - dr repo.DirectRepository, -) (*format.BlobStorageConfiguration, *maintenance.Params, error) { - blobCfg, err := dr.FormatManager().BlobCfgBlob() - if err != nil { - return nil, nil, clues.Wrap(err, "getting storage config").WithClues(ctx) - } - - params, err := maintenance.GetParams(ctx, dr) - if err != nil { - return nil, nil, clues.Wrap(err, "getting maintenance config").WithClues(ctx) - } - - return &blobCfg, params, nil -} - -func persistRetentionConfigs( - ctx context.Context, - dr repo.DirectRepository, - blobCfg *format.BlobStorageConfiguration, - blobChanged bool, - params *maintenance.Params, - maintenanceChanged bool, -) error { - // Persist changes. - if !blobChanged && !maintenanceChanged { - return nil - } - - mp, err := dr.FormatManager().GetMutableParameters() - if err != nil { - return clues.Wrap(err, "getting mutable parameters") - } - - requiredFeatures, err := dr.FormatManager().RequiredFeatures() - if err != nil { - return clues.Wrap(err, "getting required features").WithClues(ctx) - } - - // Must be the case that only blob changed. - if !maintenanceChanged { - return clues.Wrap( - dr.FormatManager().SetParameters(ctx, mp, *blobCfg, requiredFeatures), - "persisting storage config", - ).WithClues(ctx).OrNil() - } - - // Both blob and maintenance changed. A DirectWriteSession is required to - // update the maintenance config but not the blob config. - err = repo.DirectWriteSession( - ctx, - dr, - repo.WriteSessionOptions{ - Purpose: "Corso immutable backups config", - }, - func(ctx context.Context, dw repo.DirectRepositoryWriter) error { - // Set the maintenance config first as we can bail out of the write - // session later. - if err := maintenance.SetParams(ctx, dw, params); err != nil { - return clues.Wrap(err, "maintenance config"). - WithClues(ctx) - } - - if !blobChanged { - return nil - } - - return clues.Wrap( - dr.FormatManager().SetParameters(ctx, mp, *blobCfg, requiredFeatures), - "storage config", - ).WithClues(ctx).OrNil() - }) - - return clues.Wrap(err, "persisting config changes").WithClues(ctx).OrNil() -} - -func (w Wrapper) setBlobConfigParams( - mode *repository.RetentionMode, - duration *time.Duration, - blobCfg *format.BlobStorageConfiguration, -) (bool, error) { - changed, err := setBlobConfigMode(mode, blobCfg) - if err != nil { - return false, clues.Stack(err) - } - - tmp := setBlobConfigDuration(duration, blobCfg) - changed = changed || tmp - - return changed, nil -} - -func setBlobConfigDuration( - duration *time.Duration, - blobCfg *format.BlobStorageConfiguration, -) bool { - var changed bool - - if duration != nil && blobCfg.RetentionPeriod != *duration { - blobCfg.RetentionPeriod = *duration - changed = true - } - - return changed -} - -func setBlobConfigMode( - mode *repository.RetentionMode, - blobCfg *format.BlobStorageConfiguration, -) (bool, error) { - if mode == nil { - return false, nil - } - - startMode := blobCfg.RetentionMode - - switch *mode { - case repository.NoRetention: - if !blobCfg.IsRetentionEnabled() { - return false, nil - } - - blobCfg.RetentionMode = "" - blobCfg.RetentionPeriod = 0 - - case repository.GovernanceRetention: - blobCfg.RetentionMode = blob.Governance - - case repository.ComplianceRetention: - blobCfg.RetentionMode = blob.Compliance - - default: - return false, clues.New("unknown retention mode"). - With("provided_retention_mode", mode.String()) - } - - // Only check if the retention mode is not empty. IsValid errors out if it's - // empty. - if len(blobCfg.RetentionMode) > 0 && !blobCfg.RetentionMode.IsValid() { - return false, clues.New("invalid retention mode"). - With("retention_mode", blobCfg.RetentionMode) - } - - return startMode != blobCfg.RetentionMode, nil + return clues.Stack(w.c.setRetentionParameters(ctx, retention)).OrNil() } diff --git a/src/internal/kopia/wrapper_test.go b/src/internal/kopia/wrapper_test.go index 12857904f..5014e07c1 100644 --- a/src/internal/kopia/wrapper_test.go +++ b/src/internal/kopia/wrapper_test.go @@ -696,6 +696,24 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections() { 42), } + c1 := exchMock.NewCollection( + suite.storePath1, + suite.locPath1, + 0) + c1.ColState = data.NotMovedState + c1.PrevPath = suite.storePath1 + + c2 := exchMock.NewCollection( + suite.storePath2, + suite.locPath2, + 0) + c2.ColState = data.NotMovedState + c2.PrevPath = suite.storePath2 + + // Make empty collections at the same locations to force a backup with no + // changes. Needed to ensure we force a backup even if nothing has changed. + emptyCollections := []data.BackupCollection{c1, c2} + // tags that are supplied by the caller. This includes basic tags to support // lookups and extra tags the caller may want to apply. tags := map[string]string{ @@ -703,108 +721,246 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections() { "brunhilda": "", } - reasons := []Reason{ - { - ResourceOwner: suite.storePath1.ResourceOwner(), - Service: suite.storePath1.Service(), - Category: suite.storePath1.Category(), - }, - { - ResourceOwner: suite.storePath2.ResourceOwner(), - Service: suite.storePath2.Service(), - Category: suite.storePath2.Category(), - }, - } - - for _, r := range reasons { - for _, k := range r.TagKeys() { - tags[k] = "" - } + reasons := []Reasoner{ + NewReason( + testTenant, + suite.storePath1.ResourceOwner(), + suite.storePath1.Service(), + suite.storePath1.Category(), + ), + NewReason( + testTenant, + suite.storePath2.ResourceOwner(), + suite.storePath2.Service(), + suite.storePath2.Category(), + ), } expectedTags := map[string]string{} - maps.Copy(expectedTags, normalizeTagKVs(tags)) + maps.Copy(expectedTags, tags) - table := []struct { - name string - expectedUploadedFiles int - expectedCachedFiles int - // Whether entries in the resulting details should be marked as updated. - deetsUpdated bool - }{ - { - name: "Uncached", - expectedUploadedFiles: 47, - expectedCachedFiles: 0, - deetsUpdated: true, - }, - { - name: "Cached", - expectedUploadedFiles: 0, - expectedCachedFiles: 47, - deetsUpdated: false, - }, + for _, r := range reasons { + for _, k := range tagKeys(r) { + expectedTags[k] = "" + } } - prevSnaps := []IncrementalBase{} + expectedTags = normalizeTagKVs(expectedTags) + + type testCase struct { + name string + baseBackups func(base ManifestEntry) BackupBases + collections []data.BackupCollection + expectedUploadedFiles int + expectedCachedFiles int + // We're either going to get details entries or entries in the details + // merger. Details is populated when there's entries in the collection. The + // details merger is populated for cached entries. The details merger + // doesn't count folders, only items. + // + // Setting this to true looks for details merger entries. Setting it to + // false looks for details entries. + expectMerge bool + // Whether entries in the resulting details should be marked as updated. + deetsUpdated assert.BoolAssertionFunc + hashedBytesCheck assert.ValueAssertionFunc + // Range of bytes (inclusive) to expect as uploaded. A little fragile, but + // allows us to differentiate between content that wasn't uploaded due to + // being cached/deduped/skipped due to existing dir entries and stuff that + // was actually pushed to S3. + uploadedBytes []int64 + } + + // Initial backup. All files should be considered new by kopia. + baseBackupCase := testCase{ + name: "Uncached", + baseBackups: func(ManifestEntry) BackupBases { + return NewMockBackupBases() + }, + collections: collections, + expectedUploadedFiles: 47, + expectedCachedFiles: 0, + deetsUpdated: assert.True, + hashedBytesCheck: assert.NotZero, + uploadedBytes: []int64{8000, 10000}, + } + + runAndTestBackup := func(test testCase, base ManifestEntry) ManifestEntry { + var res ManifestEntry - for _, test := range table { suite.Run(test.name, func() { t := suite.T() - stats, deets, _, err := suite.w.ConsumeBackupCollections( - suite.ctx, - prevSnaps, - collections, + ctx, flush := tester.NewContext(t) + defer flush() + + bbs := test.baseBackups(base) + + stats, deets, deetsMerger, err := suite.w.ConsumeBackupCollections( + ctx, + reasons, + bbs, + test.collections, nil, tags, true, fault.New(true)) - assert.NoError(t, err, clues.ToCore(err)) + require.NoError(t, err, clues.ToCore(err)) assert.Equal(t, test.expectedUploadedFiles, stats.TotalFileCount, "total files") assert.Equal(t, test.expectedUploadedFiles, stats.UncachedFileCount, "uncached files") assert.Equal(t, test.expectedCachedFiles, stats.CachedFileCount, "cached files") - assert.Equal(t, 6, stats.TotalDirectoryCount) + assert.Equal(t, 4+len(test.collections), stats.TotalDirectoryCount, "directory count") assert.Equal(t, 0, stats.IgnoredErrorCount) assert.Equal(t, 0, stats.ErrorCount) assert.False(t, stats.Incomplete) - - // 47 file and 2 folder entries. - details := deets.Details().Entries - assert.Len( + test.hashedBytesCheck(t, stats.TotalHashedBytes, "hashed bytes") + assert.LessOrEqual( t, - details, - test.expectedUploadedFiles+test.expectedCachedFiles+2, - ) + test.uploadedBytes[0], + stats.TotalUploadedBytes, + "low end of uploaded bytes") + assert.GreaterOrEqual( + t, + test.uploadedBytes[1], + stats.TotalUploadedBytes, + "high end of uploaded bytes") - for _, entry := range details { - assert.Equal(t, test.deetsUpdated, entry.Updated) + if test.expectMerge { + assert.Empty(t, deets.Details().Entries, "details entries") + assert.Equal( + t, + test.expectedUploadedFiles+test.expectedCachedFiles, + deetsMerger.ItemsToMerge(), + "details merger entries") + } else { + assert.Zero(t, deetsMerger.ItemsToMerge(), "details merger entries") + + details := deets.Details().Entries + assert.Len( + t, + details, + // 47 file and 2 folder entries. + test.expectedUploadedFiles+test.expectedCachedFiles+2, + ) + + for _, entry := range details { + test.deetsUpdated(t, entry.Updated) + } } checkSnapshotTags( t, - suite.ctx, + ctx, suite.w.c, expectedTags, stats.SnapshotID, ) snap, err := snapshot.LoadSnapshot( - suite.ctx, + ctx, suite.w.c, manifest.ID(stats.SnapshotID), ) require.NoError(t, err, clues.ToCore(err)) - prevSnaps = append(prevSnaps, IncrementalBase{ + res = ManifestEntry{ Manifest: snap, - SubtreePaths: []*path.Builder{ - suite.storePath1.ToBuilder().Dir(), - }, - }) + Reasons: reasons, + } }) + + return res + } + + base := runAndTestBackup(baseBackupCase, ManifestEntry{}) + + table := []testCase{ + { + name: "Kopia Assist And Merge All Files Changed", + baseBackups: func(base ManifestEntry) BackupBases { + return NewMockBackupBases().WithMergeBases(base) + }, + collections: collections, + expectedUploadedFiles: 0, + expectedCachedFiles: 47, + deetsUpdated: assert.False, + hashedBytesCheck: assert.Zero, + uploadedBytes: []int64{4000, 6000}, + }, + { + name: "Kopia Assist And Merge No Files Changed", + baseBackups: func(base ManifestEntry) BackupBases { + return NewMockBackupBases().WithMergeBases(base) + }, + // Pass in empty collections to force a backup. Otherwise we'll skip + // actually trying to do anything because we'll see there's nothing that + // changed. The real goal is to get it to deal with the merged collections + // again though. + collections: emptyCollections, + // Should hit cached check prior to dir entry check so we see them as + // cached. + expectedUploadedFiles: 0, + expectedCachedFiles: 47, + // Entries go into the details merger because we never materialize details + // info for the items since they're from the base. + expectMerge: true, + // Not used since there's no details entries. + deetsUpdated: assert.False, + hashedBytesCheck: assert.Zero, + uploadedBytes: []int64{4000, 6000}, + }, + { + name: "Kopia Assist Only", + baseBackups: func(base ManifestEntry) BackupBases { + return NewMockBackupBases().WithAssistBases(base) + }, + collections: collections, + expectedUploadedFiles: 0, + expectedCachedFiles: 47, + deetsUpdated: assert.False, + hashedBytesCheck: assert.Zero, + uploadedBytes: []int64{4000, 6000}, + }, + { + name: "Merge Only", + baseBackups: func(base ManifestEntry) BackupBases { + return NewMockBackupBases().WithMergeBases(base).ClearMockAssistBases() + }, + // Pass in empty collections to force a backup. Otherwise we'll skip + // actually trying to do anything because we'll see there's nothing that + // changed. The real goal is to get it to deal with the merged collections + // again though. + collections: emptyCollections, + expectedUploadedFiles: 47, + expectedCachedFiles: 0, + expectMerge: true, + // Not used since there's no details entries. + deetsUpdated: assert.False, + // Kopia still counts these bytes as "hashed" even though it shouldn't + // read the file data since they already have dir entries it can reuse. + hashedBytesCheck: assert.NotZero, + uploadedBytes: []int64{4000, 6000}, + }, + { + name: "Content Hash Only", + baseBackups: func(base ManifestEntry) BackupBases { + return NewMockBackupBases() + }, + collections: collections, + expectedUploadedFiles: 47, + expectedCachedFiles: 0, + // Marked as updated because we still fall into the uploadFile handler in + // kopia instead of the cachedFile handler. + deetsUpdated: assert.True, + hashedBytesCheck: assert.NotZero, + uploadedBytes: []int64{4000, 6000}, + }, + } + + for _, test := range table { + runAndTestBackup(test, base) } } @@ -837,23 +993,25 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() { "brunhilda": "", } - reasons := []Reason{ - { - ResourceOwner: storePath.ResourceOwner(), - Service: storePath.Service(), - Category: storePath.Category(), - }, - } - - for _, r := range reasons { - for _, k := range r.TagKeys() { - tags[k] = "" - } + reasons := []Reasoner{ + NewReason( + testTenant, + storePath.ResourceOwner(), + storePath.Service(), + storePath.Category()), } expectedTags := map[string]string{} - maps.Copy(expectedTags, normalizeTagKVs(tags)) + maps.Copy(expectedTags, tags) + + for _, r := range reasons { + for _, k := range tagKeys(r) { + expectedTags[k] = "" + } + } + + expectedTags = normalizeTagKVs(expectedTags) table := []struct { name string @@ -931,7 +1089,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() { }, } - prevSnaps := []IncrementalBase{} + prevSnaps := NewMockBackupBases() for _, test := range table { suite.Run(test.name, func() { @@ -940,6 +1098,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() { stats, deets, prevShortRefs, err := suite.w.ConsumeBackupCollections( suite.ctx, + reasons, prevSnaps, collections, nil, @@ -992,12 +1151,12 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() { manifest.ID(stats.SnapshotID)) require.NoError(t, err, clues.ToCore(err)) - prevSnaps = append(prevSnaps, IncrementalBase{ - Manifest: snap, - SubtreePaths: []*path.Builder{ - storePath.ToBuilder().Dir(), + prevSnaps.WithMergeBases( + ManifestEntry{ + Manifest: snap, + Reasons: reasons, }, - }) + ) }) } } @@ -1016,16 +1175,7 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() { w := &Wrapper{k} - tags := map[string]string{} - reason := Reason{ - ResourceOwner: testUser, - Service: path.ExchangeService, - Category: path.EmailCategory, - } - - for _, k := range reason.TagKeys() { - tags[k] = "" - } + r := NewReason(testTenant, testUser, path.ExchangeService, path.EmailCategory) dc1 := exchMock.NewCollection(suite.storePath1, suite.locPath1, 1) dc2 := exchMock.NewCollection(suite.storePath2, suite.locPath2, 1) @@ -1038,10 +1188,11 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() { stats, _, _, err := w.ConsumeBackupCollections( ctx, + []Reasoner{r}, nil, []data.BackupCollection{dc1, dc2}, nil, - tags, + nil, true, fault.New(true)) require.NoError(t, err, clues.ToCore(err)) @@ -1112,16 +1263,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() { loc1 := path.Builder{}.Append(suite.storePath1.Folders()...) loc2 := path.Builder{}.Append(suite.storePath2.Folders()...) - tags := map[string]string{} - reason := Reason{ - ResourceOwner: testUser, - Service: path.ExchangeService, - Category: path.EmailCategory, - } - - for _, k := range reason.TagKeys() { - tags[k] = "" - } + r := NewReason(testTenant, testUser, path.ExchangeService, path.EmailCategory) collections := []data.BackupCollection{ &mockBackupCollection{ @@ -1164,10 +1306,11 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() { stats, deets, _, err := suite.w.ConsumeBackupCollections( suite.ctx, + []Reasoner{r}, nil, collections, nil, - tags, + nil, true, fault.New(true)) require.Error(t, err, clues.ToCore(err)) @@ -1239,6 +1382,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollectionsHandlesNoCollections() s, d, _, err := suite.w.ConsumeBackupCollections( ctx, nil, + nil, test.collections, nil, nil, @@ -1391,23 +1535,15 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() { collections = append(collections, collection) } - tags := map[string]string{} - reason := Reason{ - ResourceOwner: testUser, - Service: path.ExchangeService, - Category: path.EmailCategory, - } - - for _, k := range reason.TagKeys() { - tags[k] = "" - } + r := NewReason(testTenant, testUser, path.ExchangeService, path.EmailCategory) stats, deets, _, err := suite.w.ConsumeBackupCollections( suite.ctx, + []Reasoner{r}, nil, collections, nil, - tags, + nil, false, fault.New(true)) require.NoError(t, err, clues.ToCore(err)) @@ -1437,32 +1573,11 @@ func (c *i64counter) Count(i int64) { } func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() { - reason := Reason{ - ResourceOwner: testUser, - Service: path.ExchangeService, - Category: path.EmailCategory, - } - - subtreePathTmp, err := path.Build( - testTenant, - testUser, - path.ExchangeService, - path.EmailCategory, - false, - "tmp") - require.NoError(suite.T(), err, clues.ToCore(err)) - - subtreePath := subtreePathTmp.ToBuilder().Dir() + r := NewReason(testTenant, testUser, path.ExchangeService, path.EmailCategory) man, err := suite.w.c.LoadSnapshot(suite.ctx, suite.snapshotID) require.NoError(suite.T(), err, "getting base snapshot: %v", clues.ToCore(err)) - tags := map[string]string{} - - for _, k := range reason.TagKeys() { - tags[k] = "" - } - table := []struct { name string excludeItem bool @@ -1551,17 +1666,16 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() { stats, _, _, err := suite.w.ConsumeBackupCollections( suite.ctx, - []IncrementalBase{ - { + []Reasoner{r}, + NewMockBackupBases().WithMergeBases( + ManifestEntry{ Manifest: man, - SubtreePaths: []*path.Builder{ - subtreePath, - }, + Reasons: []Reasoner{r}, }, - }, + ), test.cols(), excluded, - tags, + nil, true, fault.New(true)) require.NoError(t, err, clues.ToCore(err)) diff --git a/src/internal/m365/backup_test.go b/src/internal/m365/backup_test.go index bb59741f8..b80bd4ddc 100644 --- a/src/internal/m365/backup_test.go +++ b/src/internal/m365/backup_test.go @@ -57,7 +57,7 @@ func (suite *DataCollectionIntgSuite) SetupSuite() { suite.tenantID = creds.AzureTenantID - suite.ac, err = api.NewClient(creds) + suite.ac, err = api.NewClient(creds, control.Defaults()) require.NoError(t, err, clues.ToCore(err)) } diff --git a/src/internal/m365/controller.go b/src/internal/m365/controller.go index 9b037350b..4dd2c19e8 100644 --- a/src/internal/m365/controller.go +++ b/src/internal/m365/controller.go @@ -69,7 +69,7 @@ func NewController( return nil, clues.Wrap(err, "retrieving m365 account configuration").WithClues(ctx) } - ac, err := api.NewClient(creds) + ac, err := api.NewClient(creds, co) if err != nil { return nil, clues.Wrap(err, "creating api client").WithClues(ctx) } diff --git a/src/internal/m365/controller_test.go b/src/internal/m365/controller_test.go index ef729493b..487603b39 100644 --- a/src/internal/m365/controller_test.go +++ b/src/internal/m365/controller_test.go @@ -692,6 +692,7 @@ func runRestoreBackupTestVersions( tenant string, resourceOwners []string, opts control.Options, + crc control.RestoreConfig, ) { ctx, flush := tester.NewContext(t) defer flush() @@ -702,7 +703,7 @@ func runRestoreBackupTestVersions( Service: test.service, Tenant: tenant, ResourceOwners: resourceOwners, - RestoreCfg: testdata.DefaultRestoreConfig(""), + RestoreCfg: crc, } totalItems, _, collections, _, err := stub.GetCollectionsAndExpected( diff --git a/src/internal/m365/exchange/backup_test.go b/src/internal/m365/exchange/backup_test.go index 8ac8c14dd..34735eda8 100644 --- a/src/internal/m365/exchange/backup_test.go +++ b/src/internal/m365/exchange/backup_test.go @@ -414,7 +414,7 @@ func (suite *BackupIntgSuite) SetupSuite() { creds, err := acct.M365Config() require.NoError(t, err, clues.ToCore(err)) - suite.ac, err = api.NewClient(creds) + suite.ac, err = api.NewClient(creds, control.Defaults()) require.NoError(t, err, clues.ToCore(err)) suite.tenantID = creds.AzureTenantID diff --git a/src/internal/m365/exchange/container_resolver_test.go b/src/internal/m365/exchange/container_resolver_test.go index 8b5fa7c95..54cd23c67 100644 --- a/src/internal/m365/exchange/container_resolver_test.go +++ b/src/internal/m365/exchange/container_resolver_test.go @@ -17,6 +17,7 @@ import ( "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/services/m365/api" @@ -698,7 +699,7 @@ func (suite *ContainerResolverSuite) SetupSuite() { } func (suite *ContainerResolverSuite) TestPopulate() { - ac, err := api.NewClient(suite.credentials) + ac, err := api.NewClient(suite.credentials, control.Defaults()) require.NoError(suite.T(), err, clues.ToCore(err)) eventFunc := func(t *testing.T) graph.ContainerResolver { diff --git a/src/internal/m365/exchange/helper_test.go b/src/internal/m365/exchange/helper_test.go index 7e604c466..f8cadd227 100644 --- a/src/internal/m365/exchange/helper_test.go +++ b/src/internal/m365/exchange/helper_test.go @@ -9,6 +9,7 @@ import ( "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/services/m365/api" ) @@ -30,7 +31,7 @@ func newIntegrationTesterSetup(t *testing.T) intgTesterSetup { its.creds = creds - its.ac, err = api.NewClient(creds) + its.ac, err = api.NewClient(creds, control.Defaults()) require.NoError(t, err, clues.ToCore(err)) its.userID = tconfig.GetM365UserID(ctx) diff --git a/src/internal/m365/exchange/mail_container_cache_test.go b/src/internal/m365/exchange/mail_container_cache_test.go index b95a9a170..64f453092 100644 --- a/src/internal/m365/exchange/mail_container_cache_test.go +++ b/src/internal/m365/exchange/mail_container_cache_test.go @@ -12,6 +12,7 @@ import ( "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/services/m365/api" ) @@ -83,7 +84,7 @@ func (suite *MailFolderCacheIntegrationSuite) TestDeltaFetch() { ctx, flush := tester.NewContext(t) defer flush() - ac, err := api.NewClient(suite.credentials) + ac, err := api.NewClient(suite.credentials, control.Defaults()) require.NoError(t, err, clues.ToCore(err)) acm := ac.Mail() diff --git a/src/internal/m365/exchange/restore_test.go b/src/internal/m365/exchange/restore_test.go index 4d91329e9..42e61a915 100644 --- a/src/internal/m365/exchange/restore_test.go +++ b/src/internal/m365/exchange/restore_test.go @@ -44,7 +44,7 @@ func (suite *RestoreIntgSuite) SetupSuite() { require.NoError(t, err, clues.ToCore(err)) suite.credentials = m365 - suite.ac, err = api.NewClient(m365) + suite.ac, err = api.NewClient(m365, control.Defaults()) require.NoError(t, err, clues.ToCore(err)) } diff --git a/src/internal/m365/graph/http_wrapper_test.go b/src/internal/m365/graph/http_wrapper_test.go index 594eb75cd..19711edc4 100644 --- a/src/internal/m365/graph/http_wrapper_test.go +++ b/src/internal/m365/graph/http_wrapper_test.go @@ -93,7 +93,7 @@ func (suite *HTTPWrapperUnitSuite) TestNewHTTPWrapper_redirectMiddleware() { hdr.Set("Location", "localhost:99999999/smarfs") toResp := &http.Response{ - StatusCode: 302, + StatusCode: http.StatusFound, Header: hdr, } diff --git a/src/internal/m365/onedrive/item_collector_test.go b/src/internal/m365/onedrive/item_collector_test.go index ec2ab26af..fc2cccd62 100644 --- a/src/internal/m365/onedrive/item_collector_test.go +++ b/src/internal/m365/onedrive/item_collector_test.go @@ -313,7 +313,7 @@ func (suite *OneDriveIntgSuite) SetupSuite() { suite.creds = creds - suite.ac, err = api.NewClient(creds) + suite.ac, err = api.NewClient(creds, control.Defaults()) require.NoError(t, err, clues.ToCore(err)) } diff --git a/src/internal/m365/onedrive/service_test.go b/src/internal/m365/onedrive/service_test.go index 4569acffc..a39a65a76 100644 --- a/src/internal/m365/onedrive/service_test.go +++ b/src/internal/m365/onedrive/service_test.go @@ -9,6 +9,7 @@ import ( "github.com/alcionai/corso/src/internal/m365/support" "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/services/m365/api" ) @@ -20,7 +21,7 @@ type oneDriveService struct { } func NewOneDriveService(credentials account.M365Config) (*oneDriveService, error) { - ac, err := api.NewClient(credentials) + ac, err := api.NewClient(credentials, control.Defaults()) if err != nil { return nil, err } diff --git a/src/internal/m365/onedrive/url_cache_test.go b/src/internal/m365/onedrive/url_cache_test.go index 8adcf36cc..7946da840 100644 --- a/src/internal/m365/onedrive/url_cache_test.go +++ b/src/internal/m365/onedrive/url_cache_test.go @@ -53,7 +53,7 @@ func (suite *URLCacheIntegrationSuite) SetupSuite() { creds, err := acct.M365Config() require.NoError(t, err, clues.ToCore(err)) - suite.ac, err = api.NewClient(creds) + suite.ac, err = api.NewClient(creds, control.Defaults()) require.NoError(t, err, clues.ToCore(err)) drive, err := suite.ac.Users().GetDefaultDrive(ctx, suite.user) diff --git a/src/internal/m365/onedrive_test.go b/src/internal/m365/onedrive_test.go index 3fbd5f531..eade30c9d 100644 --- a/src/internal/m365/onedrive_test.go +++ b/src/internal/m365/onedrive_test.go @@ -22,6 +22,7 @@ import ( "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/internal/version" "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/control/testdata" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/services/m365/api" ) @@ -516,6 +517,9 @@ func testRestoreAndBackupMultipleFilesAndFoldersNoPermissions( collectionsLatest: expected, } + rc := testdata.DefaultRestoreConfig("od_restore_and_backup_multi") + rc.OnCollision = control.Replace + runRestoreBackupTestVersions( t, testData, @@ -524,7 +528,8 @@ func testRestoreAndBackupMultipleFilesAndFoldersNoPermissions( control.Options{ RestorePermissions: true, ToggleFeatures: control.Toggles{}, - }) + }, + rc) }) } } @@ -763,6 +768,9 @@ func testPermissionsRestoreAndBackup(suite oneDriveSuite, startVersion int) { collectionsLatest: expected, } + rc := testdata.DefaultRestoreConfig("perms_restore_and_backup") + rc.OnCollision = control.Replace + runRestoreBackupTestVersions( t, testData, @@ -771,7 +779,8 @@ func testPermissionsRestoreAndBackup(suite oneDriveSuite, startVersion int) { control.Options{ RestorePermissions: true, ToggleFeatures: control.Toggles{}, - }) + }, + rc) }) } } @@ -851,6 +860,9 @@ func testPermissionsBackupAndNoRestore(suite oneDriveSuite, startVersion int) { collectionsLatest: expected, } + rc := testdata.DefaultRestoreConfig("perms_backup_no_restore") + rc.OnCollision = control.Replace + runRestoreBackupTestVersions( t, testData, @@ -859,7 +871,8 @@ func testPermissionsBackupAndNoRestore(suite oneDriveSuite, startVersion int) { control.Options{ RestorePermissions: false, ToggleFeatures: control.Toggles{}, - }) + }, + rc) }) } } @@ -1054,6 +1067,9 @@ func testPermissionsInheritanceRestoreAndBackup(suite oneDriveSuite, startVersio collectionsLatest: expected, } + rc := testdata.DefaultRestoreConfig("perms_inherit_restore_and_backup") + rc.OnCollision = control.Replace + runRestoreBackupTestVersions( t, testData, @@ -1062,7 +1078,8 @@ func testPermissionsInheritanceRestoreAndBackup(suite oneDriveSuite, startVersio control.Options{ RestorePermissions: true, ToggleFeatures: control.Toggles{}, - }) + }, + rc) }) } } @@ -1247,6 +1264,9 @@ func testLinkSharesInheritanceRestoreAndBackup(suite oneDriveSuite, startVersion collectionsLatest: expected, } + rc := testdata.DefaultRestoreConfig("linkshares_inherit_restore_and_backup") + rc.OnCollision = control.Replace + runRestoreBackupTestVersions( t, testData, @@ -1255,7 +1275,8 @@ func testLinkSharesInheritanceRestoreAndBackup(suite oneDriveSuite, startVersion control.Options{ RestorePermissions: true, ToggleFeatures: control.Toggles{}, - }) + }, + rc) }) } } diff --git a/src/internal/m365/restore.go b/src/internal/m365/restore.go index 31e36e2bb..5d58fdb26 100644 --- a/src/internal/m365/restore.go +++ b/src/internal/m365/restore.go @@ -36,7 +36,7 @@ func (ctrl *Controller) ConsumeRestoreCollections( defer end() ctx = graph.BindRateLimiterConfig(ctx, graph.LimiterCfg{Service: sels.PathService()}) - ctx = clues.Add(ctx, "restore_config", restoreCfg) // TODO(rkeepers): needs PII control + ctx = clues.Add(ctx, "restore_config", restoreCfg) if len(dcs) == 0 { return nil, clues.New("no data collections to restore") diff --git a/src/internal/m365/sharepoint/backup_test.go b/src/internal/m365/sharepoint/backup_test.go index 6e878f0b9..973a55670 100644 --- a/src/internal/m365/sharepoint/backup_test.go +++ b/src/internal/m365/sharepoint/backup_test.go @@ -201,7 +201,7 @@ func (suite *SharePointPagesSuite) TestCollectPages() { creds, err := a.M365Config() require.NoError(t, err, clues.ToCore(err)) - ac, err := api.NewClient(creds) + ac, err := api.NewClient(creds, control.Defaults()) require.NoError(t, err, clues.ToCore(err)) col, err := collectPages( diff --git a/src/internal/m365/sharepoint/collection_test.go b/src/internal/m365/sharepoint/collection_test.go index babe6114e..42f9ad9a1 100644 --- a/src/internal/m365/sharepoint/collection_test.go +++ b/src/internal/m365/sharepoint/collection_test.go @@ -43,7 +43,7 @@ func (suite *SharePointCollectionSuite) SetupSuite() { suite.creds = m365 - ac, err := api.NewClient(m365) + ac, err := api.NewClient(m365, control.Defaults()) require.NoError(t, err, clues.ToCore(err)) suite.ac = ac diff --git a/src/internal/operations/backup.go b/src/internal/operations/backup.go index 00eb82884..82ae79fb6 100644 --- a/src/internal/operations/backup.go +++ b/src/internal/operations/backup.go @@ -6,7 +6,6 @@ import ( "github.com/alcionai/clues" "github.com/google/uuid" - "github.com/kopia/kopia/repo/manifest" "github.com/alcionai/corso/src/internal/common/crash" "github.com/alcionai/corso/src/internal/common/dttm" @@ -280,8 +279,8 @@ func (op *BackupOperation) do( backupID model.StableID, ) (*details.Builder, error) { var ( - reasons = selectorToReasons(op.Selectors, false) - fallbackReasons = makeFallbackReasons(op.Selectors) + reasons = selectorToReasons(op.account.ID(), op.Selectors, false) + fallbackReasons = makeFallbackReasons(op.account.ID(), op.Selectors) lastBackupVersion = version.NoBackup ) @@ -370,10 +369,10 @@ func (op *BackupOperation) do( return deets, nil } -func makeFallbackReasons(sel selectors.Selector) []kopia.Reason { +func makeFallbackReasons(tenant string, sel selectors.Selector) []kopia.Reasoner { if sel.PathService() != path.SharePointService && sel.DiscreteOwner != sel.DiscreteOwnerName { - return selectorToReasons(sel, true) + return selectorToReasons(tenant, sel, true) } return nil @@ -420,9 +419,13 @@ func produceBackupDataCollections( // Consumer funcs // --------------------------------------------------------------------------- -func selectorToReasons(sel selectors.Selector, useOwnerNameForID bool) []kopia.Reason { +func selectorToReasons( + tenant string, + sel selectors.Selector, + useOwnerNameForID bool, +) []kopia.Reasoner { service := sel.PathService() - reasons := []kopia.Reason{} + reasons := []kopia.Reasoner{} pcs, err := sel.PathCategories() if err != nil { @@ -438,43 +441,19 @@ func selectorToReasons(sel selectors.Selector, useOwnerNameForID bool) []kopia.R for _, sl := range [][]path.CategoryType{pcs.Includes, pcs.Filters} { for _, cat := range sl { - reasons = append(reasons, kopia.Reason{ - ResourceOwner: owner, - Service: service, - Category: cat, - }) + reasons = append(reasons, kopia.NewReason(tenant, owner, service, cat)) } } return reasons } -func builderFromReason(ctx context.Context, tenant string, r kopia.Reason) (*path.Builder, error) { - ctx = clues.Add(ctx, "category", r.Category.String()) - - // This is hacky, but we want the path package to format the path the right - // way (e.x. proper order for service, category, etc), but we don't care about - // the folders after the prefix. - p, err := path.Build( - tenant, - r.ResourceOwner, - r.Service, - r.Category, - false, - "tmp") - if err != nil { - return nil, clues.Wrap(err, "building path").WithClues(ctx) - } - - return p.ToBuilder().Dir(), nil -} - // calls kopia to backup the collections of data func consumeBackupCollections( ctx context.Context, bc kinject.BackupConsumer, tenantID string, - reasons []kopia.Reason, + reasons []kopia.Reasoner, bbs kopia.BackupBases, cs []data.BackupCollection, pmr prefixmatcher.StringSetReader, @@ -495,90 +474,10 @@ func consumeBackupCollections( kopia.TagBackupCategory: "", } - for _, reason := range reasons { - for _, k := range reason.TagKeys() { - tags[k] = "" - } - } - - // AssistBases should be the upper bound for how many snapshots we pass in. - bases := make([]kopia.IncrementalBase, 0, len(bbs.AssistBases())) - // Track IDs we've seen already so we don't accidentally duplicate some - // manifests. This can be removed when we move the code below into the kopia - // package. - ids := map[manifest.ID]struct{}{} - - var mb []kopia.ManifestEntry - - if bbs != nil { - mb = bbs.MergeBases() - } - - // TODO(ashmrtn): Make a wrapper for Reson that allows adding a tenant and - // make a function that will spit out a prefix that includes the tenant. With - // that done this code can be moved to kopia wrapper since it's really more - // specific to that. - for _, m := range mb { - paths := make([]*path.Builder, 0, len(m.Reasons)) - services := map[string]struct{}{} - categories := map[string]struct{}{} - - for _, reason := range m.Reasons { - pb, err := builderFromReason(ctx, tenantID, reason) - if err != nil { - return nil, nil, nil, clues.Wrap(err, "getting subtree paths for bases") - } - - paths = append(paths, pb) - services[reason.Service.String()] = struct{}{} - categories[reason.Category.String()] = struct{}{} - } - - ids[m.ID] = struct{}{} - - bases = append(bases, kopia.IncrementalBase{ - Manifest: m.Manifest, - SubtreePaths: paths, - }) - - svcs := make([]string, 0, len(services)) - for k := range services { - svcs = append(svcs, k) - } - - cats := make([]string, 0, len(categories)) - for k := range categories { - cats = append(cats, k) - } - - mbID, ok := m.GetTag(kopia.TagBackupID) - if !ok { - mbID = "no_backup_id_tag" - } - - logger.Ctx(ctx).Infow( - "using base for backup", - "base_snapshot_id", m.ID, - "services", svcs, - "categories", cats, - "base_backup_id", mbID) - } - - // At the moment kopia assisted snapshots are in the same set as merge bases. - // When we fixup generating subtree paths we can remove this. - if bbs != nil { - for _, ab := range bbs.AssistBases() { - if _, ok := ids[ab.ID]; ok { - continue - } - - bases = append(bases, kopia.IncrementalBase{Manifest: ab.Manifest}) - } - } - kopiaStats, deets, itemsSourcedFromBase, err := bc.ConsumeBackupCollections( ctx, - bases, + reasons, + bbs, cs, pmr, tags, @@ -586,7 +485,7 @@ func consumeBackupCollections( errs) if err != nil { if kopiaStats == nil { - return nil, nil, nil, err + return nil, nil, nil, clues.Stack(err) } return nil, nil, nil, clues.Stack(err).With( @@ -609,11 +508,11 @@ func consumeBackupCollections( return kopiaStats, deets, itemsSourcedFromBase, err } -func matchesReason(reasons []kopia.Reason, p path.Path) bool { +func matchesReason(reasons []kopia.Reasoner, p path.Path) bool { for _, reason := range reasons { - if p.ResourceOwner() == reason.ResourceOwner && - p.Service() == reason.Service && - p.Category() == reason.Category { + if p.ResourceOwner() == reason.ProtectedResource() && + p.Service() == reason.Service() && + p.Category() == reason.Category() { return true } } diff --git a/src/internal/operations/backup_test.go b/src/internal/operations/backup_test.go index ffa164c81..3aaeae45c 100644 --- a/src/internal/operations/backup_test.go +++ b/src/internal/operations/backup_test.go @@ -107,7 +107,8 @@ func checkPaths(t *testing.T, expected, got []path.Path) { type mockBackupConsumer struct { checkFunc func( - bases []kopia.IncrementalBase, + backupReasons []kopia.Reasoner, + bases kopia.BackupBases, cs []data.BackupCollection, tags map[string]string, buildTreeWithBase bool) @@ -115,7 +116,8 @@ type mockBackupConsumer struct { func (mbu mockBackupConsumer) ConsumeBackupCollections( ctx context.Context, - bases []kopia.IncrementalBase, + backupReasons []kopia.Reasoner, + bases kopia.BackupBases, cs []data.BackupCollection, excluded prefixmatcher.StringSetReader, tags map[string]string, @@ -123,7 +125,7 @@ func (mbu mockBackupConsumer) ConsumeBackupCollections( errs *fault.Bus, ) (*kopia.BackupStats, *details.Builder, kopia.DetailsMergeInfoer, error) { if mbu.checkFunc != nil { - mbu.checkFunc(bases, cs, tags, buildTreeWithBase) + mbu.checkFunc(backupReasons, bases, cs, tags, buildTreeWithBase) } return &kopia.BackupStats{}, &details.Builder{}, nil, nil @@ -388,31 +390,25 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_PersistResults() { func (suite *BackupOpUnitSuite) TestBackupOperation_ConsumeBackupDataCollections_Paths() { var ( + t = suite.T() + tenant = "a-tenant" resourceOwner = "a-user" - emailBuilder = path.Builder{}.Append( + emailReason = kopia.NewReason( tenant, - path.ExchangeService.String(), resourceOwner, - path.EmailCategory.String(), - ) - contactsBuilder = path.Builder{}.Append( + path.ExchangeService, + path.EmailCategory) + contactsReason = kopia.NewReason( tenant, - path.ExchangeService.String(), resourceOwner, - path.ContactsCategory.String(), - ) + path.ExchangeService, + path.ContactsCategory) - emailReason = kopia.Reason{ - ResourceOwner: resourceOwner, - Service: path.ExchangeService, - Category: path.EmailCategory, - } - contactsReason = kopia.Reason{ - ResourceOwner: resourceOwner, - Service: path.ExchangeService, - Category: path.ContactsCategory, + reasons = []kopia.Reasoner{ + emailReason, + contactsReason, } manifest1 = &snapshot.Manifest{ @@ -421,147 +417,57 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_ConsumeBackupDataCollections manifest2 = &snapshot.Manifest{ ID: "id2", } + + bases = kopia.NewMockBackupBases().WithMergeBases( + kopia.ManifestEntry{ + Manifest: manifest1, + Reasons: []kopia.Reasoner{ + emailReason, + }, + }).WithAssistBases( + kopia.ManifestEntry{ + Manifest: manifest2, + Reasons: []kopia.Reasoner{ + contactsReason, + }, + }) + + backupID = model.StableID("foo") + expectedTags = map[string]string{ + kopia.TagBackupID: string(backupID), + kopia.TagBackupCategory: "", + } ) - table := []struct { - name string - // Backup model is untouched in this test so there's no need to populate it. - input kopia.BackupBases - expected []kopia.IncrementalBase - }{ - { - name: "SingleManifestSingleReason", - input: kopia.NewMockBackupBases().WithMergeBases( - kopia.ManifestEntry{ - Manifest: manifest1, - Reasons: []kopia.Reason{ - emailReason, - }, - }).ClearMockAssistBases(), - expected: []kopia.IncrementalBase{ - { - Manifest: manifest1, - SubtreePaths: []*path.Builder{ - emailBuilder, - }, - }, - }, - }, - { - name: "SingleManifestMultipleReasons", - input: kopia.NewMockBackupBases().WithMergeBases( - kopia.ManifestEntry{ - Manifest: manifest1, - Reasons: []kopia.Reason{ - emailReason, - contactsReason, - }, - }).ClearMockAssistBases(), - expected: []kopia.IncrementalBase{ - { - Manifest: manifest1, - SubtreePaths: []*path.Builder{ - emailBuilder, - contactsBuilder, - }, - }, - }, - }, - { - name: "MultipleManifestsMultipleReasons", - input: kopia.NewMockBackupBases().WithMergeBases( - kopia.ManifestEntry{ - Manifest: manifest1, - Reasons: []kopia.Reason{ - emailReason, - contactsReason, - }, - }, - kopia.ManifestEntry{ - Manifest: manifest2, - Reasons: []kopia.Reason{ - emailReason, - contactsReason, - }, - }).ClearMockAssistBases(), - expected: []kopia.IncrementalBase{ - { - Manifest: manifest1, - SubtreePaths: []*path.Builder{ - emailBuilder, - contactsBuilder, - }, - }, - { - Manifest: manifest2, - SubtreePaths: []*path.Builder{ - emailBuilder, - contactsBuilder, - }, - }, - }, - }, - { - name: "Single Manifest Single Reason With Assist Base", - input: kopia.NewMockBackupBases().WithMergeBases( - kopia.ManifestEntry{ - Manifest: manifest1, - Reasons: []kopia.Reason{ - emailReason, - }, - }).WithAssistBases( - kopia.ManifestEntry{ - Manifest: manifest2, - Reasons: []kopia.Reason{ - contactsReason, - }, - }), - expected: []kopia.IncrementalBase{ - { - Manifest: manifest1, - SubtreePaths: []*path.Builder{ - emailBuilder, - }, - }, - { - Manifest: manifest2, - }, - }, + mbu := &mockBackupConsumer{ + checkFunc: func( + backupReasons []kopia.Reasoner, + gotBases kopia.BackupBases, + cs []data.BackupCollection, + gotTags map[string]string, + buildTreeWithBase bool, + ) { + kopia.AssertBackupBasesEqual(t, bases, gotBases) + assert.Equal(t, expectedTags, gotTags) + assert.ElementsMatch(t, reasons, backupReasons) }, } - for _, test := range table { - suite.Run(test.name, func() { - t := suite.T() + ctx, flush := tester.NewContext(t) + defer flush() - ctx, flush := tester.NewContext(t) - defer flush() - - mbu := &mockBackupConsumer{ - checkFunc: func( - bases []kopia.IncrementalBase, - cs []data.BackupCollection, - tags map[string]string, - buildTreeWithBase bool, - ) { - assert.ElementsMatch(t, test.expected, bases) - }, - } - - //nolint:errcheck - consumeBackupCollections( - ctx, - mbu, - tenant, - nil, - test.input, - nil, - nil, - model.StableID(""), - true, - fault.New(true)) - }) - } + //nolint:errcheck + consumeBackupCollections( + ctx, + mbu, + tenant, + reasons, + bases, + nil, + nil, + backupID, + true, + fault.New(true)) } func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems() { @@ -629,16 +535,16 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems DetailsID: "did2", } - pathReason1 = kopia.Reason{ - ResourceOwner: itemPath1.ResourceOwner(), - Service: itemPath1.Service(), - Category: itemPath1.Category(), - } - pathReason3 = kopia.Reason{ - ResourceOwner: itemPath3.ResourceOwner(), - Service: itemPath3.Service(), - Category: itemPath3.Category(), - } + pathReason1 = kopia.NewReason( + "", + itemPath1.ResourceOwner(), + itemPath1.Service(), + itemPath1.Category()) + pathReason3 = kopia.NewReason( + "", + itemPath3.ResourceOwner(), + itemPath3.Service(), + itemPath3.Category()) ) itemParents1, err := path.GetDriveFolderPath(itemPath1) @@ -684,7 +590,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems }, DetailsID: "foo", }, - Reasons: []kopia.Reason{ + Reasons: []kopia.Reasoner{ pathReason1, }, }, @@ -703,7 +609,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems inputBackups: []kopia.BackupEntry{ { Backup: &backup1, - Reasons: []kopia.Reason{ + Reasons: []kopia.Reasoner{ pathReason1, }, }, @@ -730,13 +636,13 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems inputBackups: []kopia.BackupEntry{ { Backup: &backup1, - Reasons: []kopia.Reason{ + Reasons: []kopia.Reasoner{ pathReason1, }, }, { Backup: &backup1, - Reasons: []kopia.Reason{ + Reasons: []kopia.Reasoner{ pathReason1, }, }, @@ -763,7 +669,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems inputBackups: []kopia.BackupEntry{ { Backup: &backup1, - Reasons: []kopia.Reason{ + Reasons: []kopia.Reasoner{ pathReason1, }, }, @@ -822,7 +728,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems inputBackups: []kopia.BackupEntry{ { Backup: &backup1, - Reasons: []kopia.Reason{ + Reasons: []kopia.Reasoner{ pathReason1, }, }, @@ -849,7 +755,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems inputBackups: []kopia.BackupEntry{ { Backup: &backup1, - Reasons: []kopia.Reason{ + Reasons: []kopia.Reasoner{ pathReason1, }, }, @@ -879,7 +785,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems inputBackups: []kopia.BackupEntry{ { Backup: &backup1, - Reasons: []kopia.Reason{ + Reasons: []kopia.Reasoner{ pathReason1, }, }, @@ -909,7 +815,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems inputBackups: []kopia.BackupEntry{ { Backup: &backup1, - Reasons: []kopia.Reason{ + Reasons: []kopia.Reasoner{ pathReason1, }, }, @@ -940,7 +846,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems inputBackups: []kopia.BackupEntry{ { Backup: &backup1, - Reasons: []kopia.Reason{ + Reasons: []kopia.Reasoner{ pathReason1, }, }, @@ -971,13 +877,13 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems inputBackups: []kopia.BackupEntry{ { Backup: &backup1, - Reasons: []kopia.Reason{ + Reasons: []kopia.Reasoner{ pathReason1, }, }, { Backup: &backup2, - Reasons: []kopia.Reason{ + Reasons: []kopia.Reasoner{ pathReason3, }, }, @@ -1064,11 +970,11 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde locPath1 = path.Builder{}.Append(itemPath1.Folders()...) - pathReason1 = kopia.Reason{ - ResourceOwner: itemPath1.ResourceOwner(), - Service: itemPath1.Service(), - Category: itemPath1.Category(), - } + pathReason1 = kopia.NewReason( + "", + itemPath1.ResourceOwner(), + itemPath1.Service(), + itemPath1.Category()) backup1 = kopia.BackupEntry{ Backup: &backup.Backup{ @@ -1077,7 +983,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde }, DetailsID: "did1", }, - Reasons: []kopia.Reason{ + Reasons: []kopia.Reasoner{ pathReason1, }, } @@ -1231,7 +1137,7 @@ func (suite *BackupOpIntegrationSuite) SetupSuite() { creds, err := a.M365Config() require.NoError(t, err, clues.ToCore(err)) - suite.ac, err = api.NewClient(creds) + suite.ac, err = api.NewClient(creds, control.Defaults()) require.NoError(t, err, clues.ToCore(err)) } diff --git a/src/internal/operations/manifests.go b/src/internal/operations/manifests.go index 5e1c79e4f..1c5d1716c 100644 --- a/src/internal/operations/manifests.go +++ b/src/internal/operations/manifests.go @@ -23,7 +23,7 @@ func produceManifestsAndMetadata( ctx context.Context, bf inject.BaseFinder, rp inject.RestoreProducer, - reasons, fallbackReasons []kopia.Reason, + reasons, fallbackReasons []kopia.Reasoner, tenantID string, getMetadata bool, ) (kopia.BackupBases, []data.RestoreCollection, bool, error) { @@ -47,8 +47,8 @@ func produceManifestsAndMetadata( bb = bb.MergeBackupBases( ctx, fbb, - func(r kopia.Reason) string { - return r.Service.String() + r.Category.String() + func(r kopia.Reasoner) string { + return r.Service().String() + r.Category().String() }) if !getMetadata { @@ -115,9 +115,9 @@ func collectMetadata( Append(fn). ToServiceCategoryMetadataPath( tenantID, - reason.ResourceOwner, - reason.Service, - reason.Category, + reason.ProtectedResource(), + reason.Service(), + reason.Category(), true) if err != nil { return nil, clues. diff --git a/src/internal/operations/manifests_test.go b/src/internal/operations/manifests_test.go index e4ae9b6d3..5fdf22424 100644 --- a/src/internal/operations/manifests_test.go +++ b/src/internal/operations/manifests_test.go @@ -47,7 +47,7 @@ type mockBackupFinder struct { func (bf *mockBackupFinder) FindBases( _ context.Context, - reasons []kopia.Reason, + reasons []kopia.Reasoner, _ map[string]string, ) kopia.BackupBases { if len(reasons) == 0 { @@ -58,7 +58,7 @@ func (bf *mockBackupFinder) FindBases( return kopia.NewMockBackupBases() } - b := bf.data[reasons[0].ResourceOwner] + b := bf.data[reasons[0].ProtectedResource()] if b == nil { return kopia.NewMockBackupBases() } @@ -102,7 +102,7 @@ func (suite *OperationsManifestsUnitSuite) TestCollectMetadata() { table := []struct { name string manID string - reasons []kopia.Reason + reasons []kopia.Reasoner fileNames []string expectPaths func(*testing.T, []string) []path.Path expectErr error @@ -110,12 +110,8 @@ func (suite *OperationsManifestsUnitSuite) TestCollectMetadata() { { name: "single reason, single file", manID: "single single", - reasons: []kopia.Reason{ - { - ResourceOwner: ro, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, + reasons: []kopia.Reasoner{ + kopia.NewReason(tid, ro, path.ExchangeService, path.EmailCategory), }, expectPaths: func(t *testing.T, files []string) []path.Path { ps := make([]path.Path, 0, len(files)) @@ -133,12 +129,8 @@ func (suite *OperationsManifestsUnitSuite) TestCollectMetadata() { { name: "single reason, multiple files", manID: "single multi", - reasons: []kopia.Reason{ - { - ResourceOwner: ro, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, + reasons: []kopia.Reasoner{ + kopia.NewReason(tid, ro, path.ExchangeService, path.EmailCategory), }, expectPaths: func(t *testing.T, files []string) []path.Path { ps := make([]path.Path, 0, len(files)) @@ -156,17 +148,9 @@ func (suite *OperationsManifestsUnitSuite) TestCollectMetadata() { { name: "multiple reasons, single file", manID: "multi single", - reasons: []kopia.Reason{ - { - ResourceOwner: ro, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, - { - ResourceOwner: ro, - Service: path.ExchangeService, - Category: path.ContactsCategory, - }, + reasons: []kopia.Reasoner{ + kopia.NewReason(tid, ro, path.ExchangeService, path.EmailCategory), + kopia.NewReason(tid, ro, path.ExchangeService, path.ContactsCategory), }, expectPaths: func(t *testing.T, files []string) []path.Path { ps := make([]path.Path, 0, len(files)) @@ -187,17 +171,9 @@ func (suite *OperationsManifestsUnitSuite) TestCollectMetadata() { { name: "multiple reasons, multiple file", manID: "multi multi", - reasons: []kopia.Reason{ - { - ResourceOwner: ro, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, - { - ResourceOwner: ro, - Service: path.ExchangeService, - Category: path.ContactsCategory, - }, + reasons: []kopia.Reasoner{ + kopia.NewReason(tid, ro, path.ExchangeService, path.EmailCategory), + kopia.NewReason(tid, ro, path.ExchangeService, path.ContactsCategory), }, expectPaths: func(t *testing.T, files []string) []path.Path { ps := make([]path.Path, 0, len(files)) @@ -243,17 +219,13 @@ func buildReasons( ro string, service path.ServiceType, cats ...path.CategoryType, -) []kopia.Reason { - var reasons []kopia.Reason +) []kopia.Reasoner { + var reasons []kopia.Reasoner for _, cat := range cats { reasons = append( reasons, - kopia.Reason{ - ResourceOwner: ro, - Service: service, - Category: cat, - }) + kopia.NewReason("", ro, service, cat)) } return reasons @@ -280,7 +252,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() { name string bf *mockBackupFinder rp mockRestoreProducer - reasons []kopia.Reason + reasons []kopia.Reasoner getMeta bool assertErr assert.ErrorAssertionFunc assertB assert.BoolAssertionFunc @@ -291,7 +263,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() { { name: "don't get metadata, no mans", rp: mockRestoreProducer{}, - reasons: []kopia.Reason{}, + reasons: []kopia.Reasoner{}, getMeta: false, assertErr: assert.NoError, assertB: assert.False, @@ -308,12 +280,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() { }, }, rp: mockRestoreProducer{}, - reasons: []kopia.Reason{ - { - ResourceOwner: ro, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, + reasons: []kopia.Reasoner{ + kopia.NewReason("", ro, path.ExchangeService, path.EmailCategory), }, getMeta: false, assertErr: assert.NoError, @@ -333,12 +301,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() { }, }, rp: mockRestoreProducer{}, - reasons: []kopia.Reason{ - { - ResourceOwner: ro, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, + reasons: []kopia.Reasoner{ + kopia.NewReason("", ro, path.ExchangeService, path.EmailCategory), }, getMeta: true, assertErr: assert.NoError, @@ -365,17 +329,9 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() { "id1": {data.NoFetchRestoreCollection{Collection: mockColl{id: "id1"}}}, }, }, - reasons: []kopia.Reason{ - { - ResourceOwner: ro, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, - { - ResourceOwner: ro, - Service: path.ExchangeService, - Category: path.ContactsCategory, - }, + reasons: []kopia.Reasoner{ + kopia.NewReason("", ro, path.ExchangeService, path.EmailCategory), + kopia.NewReason("", ro, path.ExchangeService, path.ContactsCategory), }, getMeta: true, assertErr: assert.NoError, @@ -421,12 +377,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() { "id2": {data.NoFetchRestoreCollection{Collection: mockColl{id: "id2"}}}, }, }, - reasons: []kopia.Reason{ - { - ResourceOwner: ro, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, + reasons: []kopia.Reasoner{ + kopia.NewReason("", ro, path.ExchangeService, path.EmailCategory), }, getMeta: true, assertErr: assert.NoError, @@ -454,12 +406,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() { "id2": {data.NoFetchRestoreCollection{Collection: mockColl{id: "id2"}}}, }, }, - reasons: []kopia.Reason{ - { - ResourceOwner: ro, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, + reasons: []kopia.Reasoner{ + kopia.NewReason("", ro, path.ExchangeService, path.EmailCategory), }, getMeta: true, assertErr: assert.NoError, @@ -480,12 +428,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() { }, }, rp: mockRestoreProducer{err: assert.AnError}, - reasons: []kopia.Reason{ - { - ResourceOwner: ro, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, + reasons: []kopia.Reasoner{ + kopia.NewReason("", ro, path.ExchangeService, path.EmailCategory), }, getMeta: true, assertErr: assert.Error, @@ -588,24 +532,24 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb } } - emailReason := kopia.Reason{ - ResourceOwner: ro, - Service: path.ExchangeService, - Category: path.EmailCategory, - } + emailReason := kopia.NewReason( + "", + ro, + path.ExchangeService, + path.EmailCategory) - fbEmailReason := kopia.Reason{ - ResourceOwner: fbro, - Service: path.ExchangeService, - Category: path.EmailCategory, - } + fbEmailReason := kopia.NewReason( + "", + fbro, + path.ExchangeService, + path.EmailCategory) table := []struct { name string bf *mockBackupFinder rp mockRestoreProducer - reasons []kopia.Reason - fallbackReasons []kopia.Reason + reasons []kopia.Reasoner + fallbackReasons []kopia.Reasoner getMeta bool assertErr assert.ErrorAssertionFunc assertB assert.BoolAssertionFunc @@ -624,7 +568,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb }, }, rp: mockRestoreProducer{}, - fallbackReasons: []kopia.Reason{fbEmailReason}, + fallbackReasons: []kopia.Reasoner{fbEmailReason}, getMeta: false, assertErr: assert.NoError, assertB: assert.False, @@ -649,7 +593,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb "fb_id1": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id1"}}}, }, }, - fallbackReasons: []kopia.Reason{fbEmailReason}, + fallbackReasons: []kopia.Reasoner{fbEmailReason}, getMeta: true, assertErr: assert.NoError, assertB: assert.True, @@ -680,8 +624,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb "fb_id1": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id1"}}}, }, }, - reasons: []kopia.Reason{emailReason}, - fallbackReasons: []kopia.Reason{fbEmailReason}, + reasons: []kopia.Reasoner{emailReason}, + fallbackReasons: []kopia.Reasoner{fbEmailReason}, getMeta: true, assertErr: assert.NoError, assertB: assert.True, @@ -708,8 +652,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb "fb_id2": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id2"}}}, }, }, - reasons: []kopia.Reason{emailReason}, - fallbackReasons: []kopia.Reason{fbEmailReason}, + reasons: []kopia.Reasoner{emailReason}, + fallbackReasons: []kopia.Reasoner{fbEmailReason}, getMeta: true, assertErr: assert.NoError, assertB: assert.True, @@ -744,8 +688,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb "fb_id2": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id2"}}}, }, }, - reasons: []kopia.Reason{emailReason}, - fallbackReasons: []kopia.Reason{fbEmailReason}, + reasons: []kopia.Reasoner{emailReason}, + fallbackReasons: []kopia.Reasoner{fbEmailReason}, getMeta: true, assertErr: assert.NoError, assertB: assert.True, @@ -776,8 +720,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb "fb_id1": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id1"}}}, }, }, - reasons: []kopia.Reason{emailReason}, - fallbackReasons: []kopia.Reason{fbEmailReason}, + reasons: []kopia.Reasoner{emailReason}, + fallbackReasons: []kopia.Reasoner{fbEmailReason}, getMeta: true, assertErr: assert.NoError, assertB: assert.True, @@ -808,8 +752,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb "fb_id2": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id2"}}}, }, }, - reasons: []kopia.Reason{emailReason}, - fallbackReasons: []kopia.Reason{fbEmailReason}, + reasons: []kopia.Reasoner{emailReason}, + fallbackReasons: []kopia.Reasoner{fbEmailReason}, getMeta: true, assertErr: assert.NoError, assertB: assert.True, @@ -838,21 +782,13 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb "fb_id1": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id1"}}}, }, }, - reasons: []kopia.Reason{ + reasons: []kopia.Reasoner{ emailReason, - { - ResourceOwner: ro, - Service: path.ExchangeService, - Category: path.ContactsCategory, - }, + kopia.NewReason("", ro, path.ExchangeService, path.ContactsCategory), }, - fallbackReasons: []kopia.Reason{ + fallbackReasons: []kopia.Reasoner{ fbEmailReason, - { - ResourceOwner: fbro, - Service: path.ExchangeService, - Category: path.ContactsCategory, - }, + kopia.NewReason("", fbro, path.ExchangeService, path.ContactsCategory), }, getMeta: true, assertErr: assert.NoError, @@ -882,13 +818,9 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb "fb_id1": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id1"}}}, }, }, - reasons: []kopia.Reason{emailReason}, - fallbackReasons: []kopia.Reason{ - { - ResourceOwner: fbro, - Service: path.ExchangeService, - Category: path.ContactsCategory, - }, + reasons: []kopia.Reasoner{emailReason}, + fallbackReasons: []kopia.Reasoner{ + kopia.NewReason("", fbro, path.ExchangeService, path.ContactsCategory), }, getMeta: true, assertErr: assert.NoError, @@ -921,21 +853,13 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb "fb_id1": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id1"}}}, }, }, - reasons: []kopia.Reason{ + reasons: []kopia.Reasoner{ emailReason, - { - ResourceOwner: ro, - Service: path.ExchangeService, - Category: path.ContactsCategory, - }, + kopia.NewReason("", ro, path.ExchangeService, path.ContactsCategory), }, - fallbackReasons: []kopia.Reason{ + fallbackReasons: []kopia.Reasoner{ fbEmailReason, - { - ResourceOwner: fbro, - Service: path.ExchangeService, - Category: path.ContactsCategory, - }, + kopia.NewReason("", fbro, path.ExchangeService, path.ContactsCategory), }, getMeta: true, assertErr: assert.NoError, diff --git a/src/internal/operations/test/exchange_test.go b/src/internal/operations/test/exchange_test.go index 647c7a397..e33cdd0ae 100644 --- a/src/internal/operations/test/exchange_test.go +++ b/src/internal/operations/test/exchange_test.go @@ -278,7 +278,7 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr creds, err := acct.M365Config() require.NoError(t, err, clues.ToCore(err)) - ac, err := api.NewClient(creds) + ac, err := api.NewClient(creds, control.Defaults()) require.NoError(t, err, clues.ToCore(err)) // generate 3 new folders with two items each. diff --git a/src/internal/operations/test/helper_test.go b/src/internal/operations/test/helper_test.go index 93a609365..f1da62cbe 100644 --- a/src/internal/operations/test/helper_test.go +++ b/src/internal/operations/test/helper_test.go @@ -242,13 +242,7 @@ func checkBackupIsInManifests( for _, category := range categories { t.Run(category.String(), func(t *testing.T) { var ( - reasons = []kopia.Reason{ - { - ResourceOwner: resourceOwner, - Service: sel.PathService(), - Category: category, - }, - } + r = kopia.NewReason("", resourceOwner, sel.PathService(), category) tags = map[string]string{kopia.TagBackupCategory: ""} found bool ) @@ -256,7 +250,7 @@ func checkBackupIsInManifests( bf, err := kw.NewBaseFinder(sw) require.NoError(t, err, clues.ToCore(err)) - mans := bf.FindBases(ctx, reasons, tags) + mans := bf.FindBases(ctx, []kopia.Reasoner{r}, tags) for _, man := range mans.MergeBases() { bID, ok := man.GetTag(kopia.TagBackupID) if !assert.Truef(t, ok, "snapshot manifest %s missing backup ID tag", man.ID) { @@ -591,7 +585,7 @@ func newIntegrationTesterSetup(t *testing.T) intgTesterSetup { creds, err := a.M365Config() require.NoError(t, err, clues.ToCore(err)) - its.ac, err = api.NewClient(creds) + its.ac, err = api.NewClient(creds, control.Defaults()) require.NoError(t, err, clues.ToCore(err)) its.gockAC, err = mock.NewClient(creds) diff --git a/src/internal/streamstore/streamstore.go b/src/internal/streamstore/streamstore.go index 9deb0176d..6f5918c81 100644 --- a/src/internal/streamstore/streamstore.go +++ b/src/internal/streamstore/streamstore.go @@ -234,6 +234,7 @@ func write( backupStats, _, _, err := bup.ConsumeBackupCollections( ctx, nil, + nil, dbcs, prefixmatcher.NopReader[map[string]struct{}](), nil, diff --git a/src/internal/tester/tconfig/config.go b/src/internal/tester/tconfig/config.go index 8660e6135..eda63eead 100644 --- a/src/internal/tester/tconfig/config.go +++ b/src/internal/tester/tconfig/config.go @@ -112,7 +112,7 @@ func ReadTestConfig() (map[string]string, error) { testEnv := map[string]string{} fallbackTo(testEnv, TestCfgStorageProvider, vpr.GetString(TestCfgStorageProvider)) fallbackTo(testEnv, TestCfgAccountProvider, vpr.GetString(TestCfgAccountProvider)) - fallbackTo(testEnv, TestCfgBucket, os.Getenv("S3_BUCKET"), vpr.GetString(TestCfgBucket), "test-corso-repo-init") + fallbackTo(testEnv, TestCfgBucket, os.Getenv("S3_BUCKET"), vpr.GetString(TestCfgBucket)) fallbackTo(testEnv, TestCfgEndpoint, vpr.GetString(TestCfgEndpoint), "s3.amazonaws.com") fallbackTo(testEnv, TestCfgPrefix, vpr.GetString(TestCfgPrefix)) fallbackTo(testEnv, TestCfgAzureTenantID, os.Getenv(account.AzureTenantID), vpr.GetString(TestCfgAzureTenantID)) diff --git a/src/pkg/control/options.go b/src/pkg/control/options.go index 23375f229..fbb3d08a9 100644 --- a/src/pkg/control/options.go +++ b/src/pkg/control/options.go @@ -7,14 +7,17 @@ import ( // Options holds the optional configurations for a process type Options struct { + // DeltaPageSize controls the quantity of items fetched in each page + // during multi-page queries, such as graph api delta endpoints. + DeltaPageSize int32 `json:"deltaPageSize"` DisableMetrics bool `json:"disableMetrics"` FailureHandling FailurePolicy `json:"failureHandling"` + ItemExtensionFactory []extensions.CreateItemExtensioner `json:"-"` + Parallelism Parallelism `json:"parallelism"` + Repo repository.Options `json:"repo"` RestorePermissions bool `json:"restorePermissions"` SkipReduce bool `json:"skipReduce"` ToggleFeatures Toggles `json:"toggleFeatures"` - Parallelism Parallelism `json:"parallelism"` - Repo repository.Options `json:"repo"` - ItemExtensionFactory []extensions.CreateItemExtensioner `json:"-"` } type Parallelism struct { @@ -39,6 +42,7 @@ const ( func Defaults() Options { return Options{ FailureHandling: FailAfterRecovery, + DeltaPageSize: 500, ToggleFeatures: Toggles{}, Parallelism: Parallelism{ CollectionBuffer: 4, diff --git a/src/pkg/control/restore.go b/src/pkg/control/restore.go index 2b4129d9f..79d49ae20 100644 --- a/src/pkg/control/restore.go +++ b/src/pkg/control/restore.go @@ -2,13 +2,17 @@ package control import ( "context" + "encoding/json" + "fmt" "strings" + "github.com/alcionai/clues" "golang.org/x/exp/maps" "golang.org/x/exp/slices" "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/pkg/logger" + "github.com/alcionai/corso/src/pkg/path" ) const ( @@ -39,24 +43,24 @@ const RootLocation = "/" type RestoreConfig struct { // Defines the per-item collision handling policy. // Defaults to Skip. - OnCollision CollisionPolicy + OnCollision CollisionPolicy `json:"onCollision"` // ProtectedResource specifies which resource the data will be restored to. // If empty, restores to the same resource that was backed up. // Defaults to empty. - ProtectedResource string + ProtectedResource string `json:"protectedResource"` // Location specifies the container into which the data will be restored. // Only accepts container names, does not accept IDs. // If empty or "/", data will get restored in place, beginning at the root. // Defaults to "Corso_Restore_" - Location string + Location string `json:"location"` // Drive specifies the name of the drive into which the data will be // restored. If empty, data is restored to the same drive that was backed // up. // Defaults to empty. - Drive string + Drive string `json:"drive"` } func DefaultRestoreConfig(timeFormat dttm.TimeFormat) RestoreConfig { @@ -90,3 +94,58 @@ func EnsureRestoreConfigDefaults( return rc } + +// --------------------------------------------------------------------------- +// pii control +// --------------------------------------------------------------------------- + +var ( + // interface compliance required for handling PII + _ clues.Concealer = &RestoreConfig{} + _ fmt.Stringer = &RestoreConfig{} + + // interface compliance for the observe package to display + // values without concealing PII. + _ clues.PlainStringer = &RestoreConfig{} +) + +func (rc RestoreConfig) marshal() string { + bs, err := json.Marshal(rc) + if err != nil { + return "err marshalling" + } + + return string(bs) +} + +func (rc RestoreConfig) concealed() RestoreConfig { + return RestoreConfig{ + OnCollision: rc.OnCollision, + ProtectedResource: clues.Hide(rc.ProtectedResource).Conceal(), + Location: path.LoggableDir(rc.Location), + Drive: clues.Hide(rc.Drive).Conceal(), + } +} + +// Conceal produces a concealed representation of the config, suitable for +// logging, storing in errors, and other output. +func (rc RestoreConfig) Conceal() string { + return rc.concealed().marshal() +} + +// Format produces a concealed representation of the config, even when +// used within a PrintF, suitable for logging, storing in errors, +// and other output. +func (rc RestoreConfig) Format(fs fmt.State, _ rune) { + fmt.Fprint(fs, rc.concealed()) +} + +// String returns a plain text version of the restoreConfig. +func (rc RestoreConfig) String() string { + return rc.PlainString() +} + +// PlainString returns an unescaped, unmodified string of the restore configuration. +func (rc RestoreConfig) PlainString() string { + return rc.marshal() +} diff --git a/src/pkg/services/m365/api/client.go b/src/pkg/services/m365/api/client.go index 957da03db..c74bf215b 100644 --- a/src/pkg/services/m365/api/client.go +++ b/src/pkg/services/m365/api/client.go @@ -8,6 +8,7 @@ import ( "github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/path" ) @@ -36,11 +37,13 @@ type Client struct { // arbitrary urls instead of constructing queries using the // graph api client. Requester graph.Requester + + options control.Options } // NewClient produces a new exchange api client. Must be used in // place of creating an ad-hoc client struct. -func NewClient(creds account.M365Config) (Client, error) { +func NewClient(creds account.M365Config, co control.Options) (Client, error) { s, err := NewService(creds) if err != nil { return Client{}, err @@ -53,7 +56,11 @@ func NewClient(creds account.M365Config) (Client, error) { rqr := graph.NewNoTimeoutHTTPWrapper() - return Client{creds, s, li, rqr}, nil + if co.DeltaPageSize < 1 || co.DeltaPageSize > maxDeltaPageSize { + co.DeltaPageSize = maxDeltaPageSize + } + + return Client{creds, s, li, rqr, co}, nil } // initConcurrencyLimit ensures that the graph concurrency limiter is diff --git a/src/pkg/services/m365/api/contacts_pager.go b/src/pkg/services/m365/api/contacts_pager.go index f997bd2e7..9a86f1e00 100644 --- a/src/pkg/services/m365/api/contacts_pager.go +++ b/src/pkg/services/m365/api/contacts_pager.go @@ -277,7 +277,7 @@ func (c Contacts) NewContactDeltaIDsPager( Select: idAnd(parentFolderID), // do NOT set Top. It limits the total items received. }, - Headers: newPreferHeaders(preferPageSize(maxDeltaPageSize), preferImmutableIDs(immutableIDs)), + Headers: newPreferHeaders(preferPageSize(c.options.DeltaPageSize), preferImmutableIDs(immutableIDs)), } var builder *users.ItemContactFoldersItemContactsDeltaRequestBuilder diff --git a/src/pkg/services/m365/api/events_pager.go b/src/pkg/services/m365/api/events_pager.go index d70e1d281..2874d37e5 100644 --- a/src/pkg/services/m365/api/events_pager.go +++ b/src/pkg/services/m365/api/events_pager.go @@ -244,7 +244,7 @@ func (c Events) NewEventDeltaIDsPager( immutableIDs bool, ) (itemIDPager, error) { options := &users.ItemCalendarsItemEventsDeltaRequestBuilderGetRequestConfiguration{ - Headers: newPreferHeaders(preferPageSize(maxDeltaPageSize), preferImmutableIDs(immutableIDs)), + Headers: newPreferHeaders(preferPageSize(c.options.DeltaPageSize), preferImmutableIDs(immutableIDs)), QueryParameters: &users.ItemCalendarsItemEventsDeltaRequestBuilderGetQueryParameters{ // do NOT set Top. It limits the total items received. }, diff --git a/src/pkg/services/m365/api/helper_test.go b/src/pkg/services/m365/api/helper_test.go index 984411f4a..1d6f27593 100644 --- a/src/pkg/services/m365/api/helper_test.go +++ b/src/pkg/services/m365/api/helper_test.go @@ -16,6 +16,7 @@ import ( "github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester/tconfig" + "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/services/m365/api" "github.com/alcionai/corso/src/pkg/services/m365/api/mock" ) @@ -97,7 +98,7 @@ func newIntegrationTesterSetup(t *testing.T) intgTesterSetup { creds, err := a.M365Config() require.NoError(t, err, clues.ToCore(err)) - its.ac, err = api.NewClient(creds) + its.ac, err = api.NewClient(creds, control.Defaults()) require.NoError(t, err, clues.ToCore(err)) its.gockAC, err = mock.NewClient(creds) diff --git a/src/pkg/services/m365/api/mail_pager.go b/src/pkg/services/m365/api/mail_pager.go index 5472239f8..0648a906c 100644 --- a/src/pkg/services/m365/api/mail_pager.go +++ b/src/pkg/services/m365/api/mail_pager.go @@ -310,7 +310,7 @@ func (c Mail) NewMailDeltaIDsPager( Select: idAnd("isRead"), // do NOT set Top. It limits the total items received. }, - Headers: newPreferHeaders(preferPageSize(maxDeltaPageSize), preferImmutableIDs(immutableIDs)), + Headers: newPreferHeaders(preferPageSize(c.options.DeltaPageSize), preferImmutableIDs(immutableIDs)), } var builder *users.ItemMailFoldersItemMessagesDeltaRequestBuilder diff --git a/src/pkg/services/m365/api/sites.go b/src/pkg/services/m365/api/sites.go index e573cfc07..4e13ebcfb 100644 --- a/src/pkg/services/m365/api/sites.go +++ b/src/pkg/services/m365/api/sites.go @@ -225,13 +225,13 @@ func ValidateSite(item models.Siteable) error { wURL := ptr.Val(item.GetWebUrl()) if len(wURL) == 0 { - return clues.New("missing webURL").With("site_id", id) // TODO: pii + return clues.New("missing webURL").With("site_id", clues.Hide(id)) } // personal (ie: oneDrive) sites have to be filtered out server-side. if strings.Contains(wURL, PersonalSitePath) { return clues.Stack(ErrKnownSkippableCase). - With("site_id", id, "site_web_url", wURL) // TODO: pii + With("site_id", clues.Hide(id), "site_web_url", clues.Hide(wURL)) } name := ptr.Val(item.GetDisplayName()) @@ -239,10 +239,10 @@ func ValidateSite(item models.Siteable) error { // the built-in site at "https://{tenant-domain}/search" never has a name. if strings.HasSuffix(wURL, "/search") { return clues.Stack(ErrKnownSkippableCase). - With("site_id", id, "site_web_url", wURL) // TODO: pii + With("site_id", clues.Hide(id), "site_web_url", clues.Hide(wURL)) } - return clues.New("missing site display name").With("site_id", id) + return clues.New("missing site display name").With("site_id", clues.Hide(id)) } return nil diff --git a/src/pkg/services/m365/m365.go b/src/pkg/services/m365/m365.go index 91141696f..9dd803cf5 100644 --- a/src/pkg/services/m365/m365.go +++ b/src/pkg/services/m365/m365.go @@ -10,6 +10,7 @@ import ( "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/services/m365/api" @@ -328,7 +329,7 @@ func makeAC( return api.Client{}, clues.Wrap(err, "getting m365 account creds") } - cli, err := api.NewClient(creds) + cli, err := api.NewClient(creds, control.Defaults()) if err != nil { return api.Client{}, clues.Wrap(err, "constructing api client") } diff --git a/website/blog/2023-07-24-multi-tenant-backup-with-corso.md b/website/blog/2023-07-24-multi-tenant-backup-with-corso.md new file mode 100644 index 000000000..e828bad2c --- /dev/null +++ b/website/blog/2023-07-24-multi-tenant-backup-with-corso.md @@ -0,0 +1,175 @@ +--- +slug: multi-tenant-backup-with-corso +title: "Using Corso to Build a Self-Hosted Multi-Tenant Office 365 Backup Solution" +description: "" +authors: + - name: meuchels + title: Corso Community Member, IT Lead + url: https://github.com/meuchels + image_url: https://avatars.githubusercontent.com/u/77171293?v=4 +tags: [corso, microsoft 365, backups, msp, multi-tenant] +date: 2023-07-24 +image: ./images/data-center.jpg +--- + +![A woman engineer holding a laptop in front of a data center](./images/data-center.jpg) + +This community-contributed blog post shows how MSPs in the community are using Corso to build out a multi-tenant backup +solution for their Microsoft 365 customers. If you have questions, come find the author (or us) on +[Discord](https://www.alcion.ai/discord). + + + +First of all, I offer a fully managed backup solution. My clients have no access to the backup software or the data. I +require them to request recovery in a ticket. For my use case I have a self-hosted instance of MinIO that I won't be +going over but there is [another blog post on it](./2023-2-4-where-to-store-corso.md#local-s3-testing). I will show the +layout and an example of how to backup emails using the exchange option in Corso. + +## Organizing the file structure on your storage + +I wanted my S3 bucket to be laid out in the following fashion utilizing 1 bucket with prefixes for the tenants. For now, +all I did is create a bucket with access to a user for corso. While it's possible to use a single bucket and use prefix +paths per tenant within it, I didn't do that in my setup. The will be generated later with the backup initialization. + +```bash +BUCKET + tenant1-exchange + tenant1-onedrive + tenant1-sharepoint + tenant2-exchange + tenant2-onedrive + tenant2-sharepoint +``` + +If I don’t backup a particular service for a client, it will be clear by looking at whether the bucket exists or not. + +I have a short name for each tenant to differentiate them. + +## The backup compute server layout + +I utilize Ubuntu Server for this task. In my setup, everything is done as the root user. I have put the corso +executable in `/opt/corso/` and will be building everything under there. Here is the folder layout before I go into +usage. + +```bash +# For logs +/opt/corso/logs +# For config files +/opt/corso/toml +# Root of the scripts folder +/opt/corso/scripts +# For building out the environment loaders +/opt/corso/scripts/environments +# For building out the backup scripts +/opt/corso/scripts/back-available +# For adding a link to the backups that will be run +/opt/corso/scripts/back-active +``` + +## The environment files + +For [configuration](../../docs/setup/configuration/), create an environment file +`/opt/corso/scripts/environments/blank-exchange` with the following content for a template. You can copy this template +to `-exchange` in the same folder to setup your client exchange backup environment. + +```bash +##################################### +#EDIT THIS SECTION TO MEET YOUR NEEDS +##################################### + +# this is a shortname for your tenant to setup storage +export tenantshortname="" + +# this is your tenant info from the app setup on O365 +export AZURE_TENANT_ID="" +export AZURE_CLIENT_ID="" +export AZURE_CLIENT_SECRET="" + +# this is your credentials for your s3 storage +export AWS_ACCESS_KEY_ID="" +export AWS_SECRET_ACCESS_KEY="=3.0.0 <4.0.0", "immutable": "^4.0.0", @@ -22569,9 +22569,9 @@ "integrity": "sha512-Wb4p1J4zyFTbM+u6WuO4XstYx4Ky9Cewe4DWrel7B0w6VVICvPwdOpotjzcf6eD8TsckVnIMNONQyPIUFOUbCQ==" }, "postcss": { - "version": "8.4.26", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.26.tgz", - "integrity": "sha512-jrXHFF8iTloAenySjM/ob3gSj7pCu0Ji49hnjqzsgSRa50hkWCKD0HQ+gMNJkW38jBI68MpAAg7ZWwHwX8NMMw==", + "version": "8.4.27", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.27.tgz", + "integrity": "sha512-gY/ACJtJPSmUFPDCHtX78+01fHa64FaU4zaaWfuh1MhGJISufJAH4cun6k/8fwsHYeK4UQmENQK+tRLCFJE8JQ==", "requires": { "nanoid": "^3.3.6", "picocolors": "^1.0.0", @@ -23802,9 +23802,9 @@ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" }, "sass": { - "version": "1.64.0", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.64.0.tgz", - "integrity": "sha512-m7YtAGmQta9uANIUJwXesAJMSncqH+3INc8kdVXs6eV6GUC8Qu2IYKQSN8PRLgiQfpca697G94klm2leYMxSHw==", + "version": "1.64.1", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.64.1.tgz", + "integrity": "sha512-16rRACSOFEE8VN7SCgBu1MpYCyN7urj9At898tyzdXFhC+a+yOX5dXwAR7L8/IdPJ1NB8OYoXmD55DM30B2kEQ==", "requires": { "chokidar": ">=3.0.0 <4.0.0", "immutable": "^4.0.0", diff --git a/website/package.json b/website/package.json index 3e1388b77..14becab38 100644 --- a/website/package.json +++ b/website/package.json @@ -30,7 +30,7 @@ "prism-react-renderer": "^1.3.5", "react": "^17.0.2", "react-dom": "^17.0.2", - "sass": "^1.64.0", + "sass": "^1.64.1", "tiny-slider": "^2.9.4", "tw-elements": "^1.0.0-alpha13", "wow.js": "^1.2.2" @@ -39,7 +39,7 @@ "@docusaurus/module-type-aliases": "2.4.1", "@iconify/react": "^4.1.1", "autoprefixer": "^10.4.14", - "postcss": "^8.4.26", + "postcss": "^8.4.27", "tailwindcss": "^3.3.3" }, "browserslist": {