Merge branch 'main' into teamsDiscovery
This commit is contained in:
commit
7302a3f059
@ -24,6 +24,10 @@ inputs:
|
|||||||
log-dir:
|
log-dir:
|
||||||
description: Folder to store test log files
|
description: Folder to store test log files
|
||||||
required: true
|
required: true
|
||||||
|
on-collision:
|
||||||
|
description: Value for the --collisions flag
|
||||||
|
requried: false
|
||||||
|
default: "replace"
|
||||||
|
|
||||||
outputs:
|
outputs:
|
||||||
backup-id:
|
backup-id:
|
||||||
@ -57,6 +61,7 @@ runs:
|
|||||||
./corso restore '${{ inputs.service }}' \
|
./corso restore '${{ inputs.service }}' \
|
||||||
--no-stats \
|
--no-stats \
|
||||||
--hide-progress \
|
--hide-progress \
|
||||||
|
--collisions ${{ inputs.on-collision }} \
|
||||||
${{ inputs.restore-args }} \
|
${{ inputs.restore-args }} \
|
||||||
--backup '${{ steps.backup.outputs.result }}' \
|
--backup '${{ steps.backup.outputs.result }}' \
|
||||||
2>&1 |
|
2>&1 |
|
||||||
|
|||||||
2
.github/actions/purge-m365-data/action.yml
vendored
2
.github/actions/purge-m365-data/action.yml
vendored
@ -89,4 +89,4 @@ runs:
|
|||||||
env:
|
env:
|
||||||
M365_TENANT_ADMIN_USER: ${{ inputs.m365-admin-user }}
|
M365_TENANT_ADMIN_USER: ${{ inputs.m365-admin-user }}
|
||||||
M365_TENANT_ADMIN_PASSWORD: ${{ inputs.m365-admin-password }}
|
M365_TENANT_ADMIN_PASSWORD: ${{ inputs.m365-admin-password }}
|
||||||
run: ./onedrivePurge.ps1 -Site ${{ inputs.site }} -LibraryNameList "${{ inputs.libraries }}".split(",") -FolderPrefixPurgeList ${{ inputs.folder-prefix }} -LibraryPrefixDeleteList ${{ inputs.library-prefix }} -PurgeBeforeTimestamp ${{ inputs.older-than }}
|
run: ./onedrivePurge.ps1 -Site ${{ inputs.site }} -LibraryNameList "${{ inputs.libraries }}".split(",") -FolderPrefixPurgeList ${{ inputs.folder-prefix }} -LibraryPrefixDeleteList ${{ inputs.library-prefix && inputs.library-prefix || '[]' }} -PurgeBeforeTimestamp ${{ inputs.older-than }}
|
||||||
|
|||||||
57
.github/actions/slack-message/action.yml
vendored
Normal file
57
.github/actions/slack-message/action.yml
vendored
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
name: Send a message to slack
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
msg:
|
||||||
|
description: The slack message text
|
||||||
|
slack_url:
|
||||||
|
description: passthrough for secrets.SLACK_WEBHOOK_URL
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: set github ref
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "github_reference=${{ github.ref }}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: trim github ref
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "trimmed_ref=${github_reference#refs/}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: build urls
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "logurl=$(printf '<https://github.com/alcionai/corso/actions/runs/%s|[Action]>' ${{ github.run_id }})" >> $GITHUB_ENV
|
||||||
|
echo "commiturl=$(printf '<https://github.com/alcionai/corso/commit/%s|[Commit]>' ${{ github.sha }})" >> $GITHUB_ENV
|
||||||
|
echo "refurl=$(printf '<https://github.com/alcionai/corso/%s|[Ref]>' ${{ env.trimmed_ref }})" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: use url or blank val
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "JOB=${{ github.job || '' }}" >> $GITHUB_ENV
|
||||||
|
echo "LOGS=${{ github.run_id && env.logurl || '-' }}" >> $GITHUB_ENV
|
||||||
|
echo "COMMIT=${{ github.sha && env.commiturl || '-' }}" >> $GITHUB_ENV
|
||||||
|
echo "REF=${{ env.trimmed_ref && env.refurl || '-' }}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- id: slack-message
|
||||||
|
uses: slackapi/slack-github-action@v1.24.0
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ inputs.slack_url }}
|
||||||
|
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||||
|
with:
|
||||||
|
payload: |
|
||||||
|
{
|
||||||
|
"text": "${{ inputs.msg }} :: ${{ env.LOGS }} ${{ env.COMMIT }} ${{ env.REF }}",
|
||||||
|
"blocks": [
|
||||||
|
{
|
||||||
|
"type": "section",
|
||||||
|
"text": {
|
||||||
|
"type": "mrkdwn",
|
||||||
|
"text": "${{ inputs.msg }} :: ${{ env.JOB }}\n${{ env.LOGS }} ${{ env.COMMIT }} ${{ env.REF }}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
22
.github/workflows/ci_test_cleanup.yml
vendored
22
.github/workflows/ci_test_cleanup.yml
vendored
@ -18,9 +18,7 @@ jobs:
|
|||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
# sets the maximum time to now-30m.
|
# sets the maximum time to now-30m.
|
||||||
# CI test have a 10 minute timeout.
|
# CI test have a 20 minute timeout.
|
||||||
# At 20 minutes ago, we should be safe from conflicts.
|
|
||||||
# The additional 10 minutes is just to be good citizens.
|
|
||||||
- name: Set purge boundary
|
- name: Set purge boundary
|
||||||
run: echo "HALF_HOUR_AGO=$(date -d '30 minutes ago' -u +"%Y-%m-%dT%H:%M:%SZ")" >> $GITHUB_ENV
|
run: echo "HALF_HOUR_AGO=$(date -d '30 minutes ago' -u +"%Y-%m-%dT%H:%M:%SZ")" >> $GITHUB_ENV
|
||||||
|
|
||||||
@ -36,6 +34,13 @@ jobs:
|
|||||||
m365-admin-user: ${{ secrets.M365_TENANT_ADMIN_USER }}
|
m365-admin-user: ${{ secrets.M365_TENANT_ADMIN_USER }}
|
||||||
m365-admin-password: ${{ secrets.M365_TENANT_ADMIN_PASSWORD }}
|
m365-admin-password: ${{ secrets.M365_TENANT_ADMIN_PASSWORD }}
|
||||||
|
|
||||||
|
- name: Notify failure in slack
|
||||||
|
if: failure()
|
||||||
|
uses: ./.github/actions/slack-message
|
||||||
|
with:
|
||||||
|
msg: "[FAILED] ${{ vars[matrix.user] }} CI Cleanup"
|
||||||
|
slack_url: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||||
|
|
||||||
Test-Site-Data-Cleanup:
|
Test-Site-Data-Cleanup:
|
||||||
environment: Testing
|
environment: Testing
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@ -48,9 +53,7 @@ jobs:
|
|||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
# sets the maximum time to now-30m.
|
# sets the maximum time to now-30m.
|
||||||
# CI test have a 10 minute timeout.
|
# CI test have a 20 minute timeout.
|
||||||
# At 20 minutes ago, we should be safe from conflicts.
|
|
||||||
# The additional 10 minutes is just to be good citizens.
|
|
||||||
- name: Set purge boundary
|
- name: Set purge boundary
|
||||||
run: echo "HALF_HOUR_AGO=$(date -d '30 minutes ago' -u +"%Y-%m-%dT%H:%M:%SZ")" >> $GITHUB_ENV
|
run: echo "HALF_HOUR_AGO=$(date -d '30 minutes ago' -u +"%Y-%m-%dT%H:%M:%SZ")" >> $GITHUB_ENV
|
||||||
|
|
||||||
@ -67,3 +70,10 @@ jobs:
|
|||||||
azure-tenant-id: ${{ secrets.TENANT_ID }}
|
azure-tenant-id: ${{ secrets.TENANT_ID }}
|
||||||
m365-admin-user: ${{ secrets.M365_TENANT_ADMIN_USER }}
|
m365-admin-user: ${{ secrets.M365_TENANT_ADMIN_USER }}
|
||||||
m365-admin-password: ${{ secrets.M365_TENANT_ADMIN_PASSWORD }}
|
m365-admin-password: ${{ secrets.M365_TENANT_ADMIN_PASSWORD }}
|
||||||
|
|
||||||
|
- name: Notify failure in slack
|
||||||
|
if: failure()
|
||||||
|
uses: ./.github/actions/slack-message
|
||||||
|
with:
|
||||||
|
msg: "[FAILED] ${{ vars[matrix.site] }} CI Cleanup"
|
||||||
|
slack_url: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||||
|
|||||||
32
.github/workflows/longevity_test.yml
vendored
32
.github/workflows/longevity_test.yml
vendored
@ -277,33 +277,9 @@ jobs:
|
|||||||
if-no-files-found: error
|
if-no-files-found: error
|
||||||
retention-days: 14
|
retention-days: 14
|
||||||
|
|
||||||
- name: SHA info
|
- name: Notify failure in slack
|
||||||
id: sha-info
|
|
||||||
if: failure()
|
if: failure()
|
||||||
run: |
|
uses: ./.github/actions/slack-message
|
||||||
echo ${GITHUB_REF#refs/heads/}-${GITHUB_SHA}
|
|
||||||
echo SHA=${GITHUB_REF#refs/heads/}-${GITHUB_SHA} >> $GITHUB_OUTPUT
|
|
||||||
echo RUN_URL=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} >> $GITHUB_OUTPUT
|
|
||||||
echo COMMIT_URL=${{ github.server_url }}/${{ github.repository }}/commit/${GITHUB_SHA} >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Send Github Action failure to Slack
|
|
||||||
id: slack-notification
|
|
||||||
if: failure()
|
|
||||||
uses: slackapi/slack-github-action@v1.24.0
|
|
||||||
with:
|
with:
|
||||||
payload: |
|
msg: "[FAILED] Longevity Test"
|
||||||
{
|
slack_url: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||||
"text": "Longevity test failure - build: ${{ job.status }} - SHA: ${{ steps.sha-info.outputs.SHA }}",
|
|
||||||
"blocks": [
|
|
||||||
{
|
|
||||||
"type": "section",
|
|
||||||
"text": {
|
|
||||||
"type": "mrkdwn",
|
|
||||||
"text": "[FAILED] Longevity Checks :: <${{ steps.sha-info.outputs.RUN_URL }}|[Logs]> <${{ steps.sha-info.outputs.COMMIT_URL }}|[Base]>\nCommit: <${{ steps.sha-info.outputs.COMMIT_URL }}|${{ steps.sha-info.outputs.SHA }}>"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
|
||||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
|
||||||
|
|||||||
33
.github/workflows/nightly_test.yml
vendored
33
.github/workflows/nightly_test.yml
vendored
@ -94,6 +94,7 @@ jobs:
|
|||||||
CORSO_PASSPHRASE: ${{ secrets.INTEGRATION_TEST_CORSO_PASSPHRASE }}
|
CORSO_PASSPHRASE: ${{ secrets.INTEGRATION_TEST_CORSO_PASSPHRASE }}
|
||||||
CORSO_LOG_FILE: ${{ github.workspace }}/testlog/run-nightly.log
|
CORSO_LOG_FILE: ${{ github.workspace }}/testlog/run-nightly.log
|
||||||
LOG_GRAPH_REQUESTS: true
|
LOG_GRAPH_REQUESTS: true
|
||||||
|
S3_BUCKET: ${{ secrets.CI_TESTS_S3_BUCKET }}
|
||||||
run: |
|
run: |
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
go test \
|
go test \
|
||||||
@ -119,33 +120,9 @@ jobs:
|
|||||||
if-no-files-found: error
|
if-no-files-found: error
|
||||||
retention-days: 14
|
retention-days: 14
|
||||||
|
|
||||||
- name: SHA info
|
- name: Notify failure in slack
|
||||||
id: sha-info
|
|
||||||
if: failure()
|
if: failure()
|
||||||
run: |
|
uses: ./.github/actions/slack-message
|
||||||
echo ${GITHUB_REF#refs/heads/}-${GITHUB_SHA}
|
|
||||||
echo SHA=${GITHUB_REF#refs/heads/}-${GITHUB_SHA} >> $GITHUB_OUTPUT
|
|
||||||
echo RUN_URL=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} >> $GITHUB_OUTPUT
|
|
||||||
echo COMMIT_URL=${{ github.server_url }}/${{ github.repository }}/commit/${GITHUB_SHA} >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Send Github Action failure to Slack
|
|
||||||
id: slack-notification
|
|
||||||
if: failure()
|
|
||||||
uses: slackapi/slack-github-action@v1.24.0
|
|
||||||
with:
|
with:
|
||||||
payload: |
|
msg: "[FAILED] Nightly Checks"
|
||||||
{
|
slack_url: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||||
"text": "Nightly test failure - build: ${{ job.status }} - SHA: ${{ steps.sha-info.outputs.SHA }}",
|
|
||||||
"blocks": [
|
|
||||||
{
|
|
||||||
"type": "section",
|
|
||||||
"text": {
|
|
||||||
"type": "mrkdwn",
|
|
||||||
"text": "[FAILED] Nightly Checks :: <${{ steps.sha-info.outputs.RUN_URL }}|[Logs]> <${{ steps.sha-info.outputs.COMMIT_URL }}|[Base]>\nCommit: <${{ steps.sha-info.outputs.COMMIT_URL }}|${{ steps.sha-info.outputs.SHA }}>"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
|
||||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
|
||||||
|
|||||||
32
.github/workflows/sanity-test.yaml
vendored
32
.github/workflows/sanity-test.yaml
vendored
@ -333,33 +333,9 @@ jobs:
|
|||||||
if-no-files-found: error
|
if-no-files-found: error
|
||||||
retention-days: 14
|
retention-days: 14
|
||||||
|
|
||||||
- name: SHA info
|
- name: Notify failure in slack
|
||||||
id: sha-info
|
|
||||||
if: failure()
|
if: failure()
|
||||||
run: |
|
uses: ./.github/actions/slack-message
|
||||||
echo ${GITHUB_REF#refs/heads/}-${GITHUB_SHA}
|
|
||||||
echo SHA=${GITHUB_REF#refs/heads/}-${GITHUB_SHA} >> $GITHUB_OUTPUT
|
|
||||||
echo RUN_URL=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} >> $GITHUB_OUTPUT
|
|
||||||
echo COMMIT_URL=${{ github.server_url }}/${{ github.repository }}/commit/${GITHUB_SHA} >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Send Github Action failure to Slack
|
|
||||||
id: slack-notification
|
|
||||||
if: failure()
|
|
||||||
uses: slackapi/slack-github-action@v1.24.0
|
|
||||||
with:
|
with:
|
||||||
payload: |
|
msg: "[FAILED] Sanity Tests"
|
||||||
{
|
slack_url: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||||
"text": "Sanity test failure - build: ${{ job.status }} - SHA: ${{ steps.sha-info.outputs.SHA }}",
|
|
||||||
"blocks": [
|
|
||||||
{
|
|
||||||
"type": "section",
|
|
||||||
"text": {
|
|
||||||
"type": "mrkdwn",
|
|
||||||
"text": "[FAILED] Sanity Checks :: <${{ steps.sha-info.outputs.RUN_URL }}|[Logs]> <${{ github.event.pull_request.html_url || github.event.head_commit.url }}|[Base]>\nCommit: <${{ steps.sha-info.outputs.COMMIT_URL }}|${{ steps.sha-info.outputs.SHA }}>"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
|
||||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
|
||||||
@ -3,14 +3,20 @@ run:
|
|||||||
|
|
||||||
linters:
|
linters:
|
||||||
enable:
|
enable:
|
||||||
|
- errcheck
|
||||||
|
- forbidigo
|
||||||
- gci
|
- gci
|
||||||
- gofmt
|
- gofmt
|
||||||
- gofumpt
|
- gofumpt
|
||||||
- errcheck
|
- gosimple
|
||||||
- forbidigo
|
- govet
|
||||||
|
- ineffassign
|
||||||
- lll
|
- lll
|
||||||
|
- loggercheck
|
||||||
- misspell
|
- misspell
|
||||||
- revive
|
- revive
|
||||||
|
- unused
|
||||||
|
- usestdlibvars
|
||||||
- wsl
|
- wsl
|
||||||
|
|
||||||
disable:
|
disable:
|
||||||
|
|||||||
@ -94,6 +94,7 @@ func addExchangeCommands(cmd *cobra.Command) *cobra.Command {
|
|||||||
flags.AddDisableDeltaFlag(c)
|
flags.AddDisableDeltaFlag(c)
|
||||||
flags.AddEnableImmutableIDFlag(c)
|
flags.AddEnableImmutableIDFlag(c)
|
||||||
flags.AddDisableConcurrencyLimiterFlag(c)
|
flags.AddDisableConcurrencyLimiterFlag(c)
|
||||||
|
flags.AddDeltaPageSizeFlag(c)
|
||||||
|
|
||||||
case listCommand:
|
case listCommand:
|
||||||
c, fs = utils.AddCommand(cmd, exchangeListCmd())
|
c, fs = utils.AddCommand(cmd, exchangeListCmd())
|
||||||
@ -175,7 +176,7 @@ func createExchangeCmd(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
sel := exchangeBackupCreateSelectors(flags.UserFV, flags.CategoryDataFV)
|
sel := exchangeBackupCreateSelectors(flags.UserFV, flags.CategoryDataFV)
|
||||||
|
|
||||||
ins, err := utils.UsersMap(ctx, *acct, fault.New(true))
|
ins, err := utils.UsersMap(ctx, *acct, utils.Control(), fault.New(true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Only(ctx, clues.Wrap(err, "Failed to retrieve M365 users"))
|
return Only(ctx, clues.Wrap(err, "Failed to retrieve M365 users"))
|
||||||
}
|
}
|
||||||
|
|||||||
@ -37,11 +37,11 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() {
|
|||||||
expectRunE func(*cobra.Command, []string) error
|
expectRunE func(*cobra.Command, []string) error
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
"create exchange",
|
name: "create exchange",
|
||||||
createCommand,
|
use: createCommand,
|
||||||
expectUse + " " + exchangeServiceCommandCreateUseSuffix,
|
expectUse: expectUse + " " + exchangeServiceCommandCreateUseSuffix,
|
||||||
exchangeCreateCmd().Short,
|
expectShort: exchangeCreateCmd().Short,
|
||||||
[]string{
|
flags: []string{
|
||||||
flags.UserFN,
|
flags.UserFN,
|
||||||
flags.CategoryDataFN,
|
flags.CategoryDataFN,
|
||||||
flags.DisableIncrementalsFN,
|
flags.DisableIncrementalsFN,
|
||||||
@ -50,28 +50,29 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() {
|
|||||||
flags.FetchParallelismFN,
|
flags.FetchParallelismFN,
|
||||||
flags.SkipReduceFN,
|
flags.SkipReduceFN,
|
||||||
flags.NoStatsFN,
|
flags.NoStatsFN,
|
||||||
|
flags.DeltaPageSizeFN,
|
||||||
},
|
},
|
||||||
createExchangeCmd,
|
expectRunE: createExchangeCmd,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"list exchange",
|
name: "list exchange",
|
||||||
listCommand,
|
use: listCommand,
|
||||||
expectUse,
|
expectUse: expectUse,
|
||||||
exchangeListCmd().Short,
|
expectShort: exchangeListCmd().Short,
|
||||||
[]string{
|
flags: []string{
|
||||||
flags.BackupFN,
|
flags.BackupFN,
|
||||||
flags.FailedItemsFN,
|
flags.FailedItemsFN,
|
||||||
flags.SkippedItemsFN,
|
flags.SkippedItemsFN,
|
||||||
flags.RecoveredErrorsFN,
|
flags.RecoveredErrorsFN,
|
||||||
},
|
},
|
||||||
listExchangeCmd,
|
expectRunE: listExchangeCmd,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"details exchange",
|
name: "details exchange",
|
||||||
detailsCommand,
|
use: detailsCommand,
|
||||||
expectUse + " " + exchangeServiceCommandDetailsUseSuffix,
|
expectUse: expectUse + " " + exchangeServiceCommandDetailsUseSuffix,
|
||||||
exchangeDetailsCmd().Short,
|
expectShort: exchangeDetailsCmd().Short,
|
||||||
[]string{
|
flags: []string{
|
||||||
flags.BackupFN,
|
flags.BackupFN,
|
||||||
flags.ContactFN,
|
flags.ContactFN,
|
||||||
flags.ContactFolderFN,
|
flags.ContactFolderFN,
|
||||||
@ -90,7 +91,7 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() {
|
|||||||
flags.EventStartsBeforeFN,
|
flags.EventStartsBeforeFN,
|
||||||
flags.EventSubjectFN,
|
flags.EventSubjectFN,
|
||||||
},
|
},
|
||||||
detailsExchangeCmd,
|
expectRunE: detailsExchangeCmd,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"delete exchange",
|
"delete exchange",
|
||||||
|
|||||||
@ -157,7 +157,7 @@ func createOneDriveCmd(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
sel := oneDriveBackupCreateSelectors(flags.UserFV)
|
sel := oneDriveBackupCreateSelectors(flags.UserFV)
|
||||||
|
|
||||||
ins, err := utils.UsersMap(ctx, *acct, fault.New(true))
|
ins, err := utils.UsersMap(ctx, *acct, utils.Control(), fault.New(true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Only(ctx, clues.Wrap(err, "Failed to retrieve M365 users"))
|
return Only(ctx, clues.Wrap(err, "Failed to retrieve M365 users"))
|
||||||
}
|
}
|
||||||
|
|||||||
@ -5,6 +5,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
DeltaPageSizeFN = "delta-page-size"
|
||||||
DisableConcurrencyLimiterFN = "disable-concurrency-limiter"
|
DisableConcurrencyLimiterFN = "disable-concurrency-limiter"
|
||||||
DisableDeltaFN = "disable-delta"
|
DisableDeltaFN = "disable-delta"
|
||||||
DisableIncrementalsFN = "disable-incrementals"
|
DisableIncrementalsFN = "disable-incrementals"
|
||||||
@ -21,6 +22,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
DeltaPageSizeFV int
|
||||||
DisableConcurrencyLimiterFV bool
|
DisableConcurrencyLimiterFV bool
|
||||||
DisableDeltaFV bool
|
DisableDeltaFV bool
|
||||||
DisableIncrementalsFV bool
|
DisableIncrementalsFV bool
|
||||||
@ -72,6 +74,18 @@ func AddSkipReduceFlag(cmd *cobra.Command) {
|
|||||||
cobra.CheckErr(fs.MarkHidden(SkipReduceFN))
|
cobra.CheckErr(fs.MarkHidden(SkipReduceFN))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddDeltaPageSizeFlag adds a hidden flag that allows callers to reduce delta
|
||||||
|
// query page sizes below 500.
|
||||||
|
func AddDeltaPageSizeFlag(cmd *cobra.Command) {
|
||||||
|
fs := cmd.Flags()
|
||||||
|
fs.IntVar(
|
||||||
|
&DeltaPageSizeFV,
|
||||||
|
DeltaPageSizeFN,
|
||||||
|
500,
|
||||||
|
"Control quantity of items returned in paged queries. Valid range is [1-500]. Default: 500")
|
||||||
|
cobra.CheckErr(fs.MarkHidden(DeltaPageSizeFN))
|
||||||
|
}
|
||||||
|
|
||||||
// AddFetchParallelismFlag adds a hidden flag that allows callers to reduce call
|
// AddFetchParallelismFlag adds a hidden flag that allows callers to reduce call
|
||||||
// paralellism (ie, the corso worker pool size) from 4 to as low as 1.
|
// paralellism (ie, the corso worker pool size) from 4 to as low as 1.
|
||||||
func AddFetchParallelismFlag(cmd *cobra.Command) {
|
func AddFetchParallelismFlag(cmd *cobra.Command) {
|
||||||
|
|||||||
@ -14,6 +14,12 @@ func Control() control.Options {
|
|||||||
opt.FailureHandling = control.FailFast
|
opt.FailureHandling = control.FailFast
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dps := int32(flags.DeltaPageSizeFV)
|
||||||
|
if dps > 500 || dps < 1 {
|
||||||
|
dps = 500
|
||||||
|
}
|
||||||
|
|
||||||
|
opt.DeltaPageSize = dps
|
||||||
opt.DisableMetrics = flags.NoStatsFV
|
opt.DisableMetrics = flags.NoStatsFV
|
||||||
opt.RestorePermissions = flags.RestorePermissionsFV
|
opt.RestorePermissions = flags.RestorePermissionsFV
|
||||||
opt.SkipReduce = flags.SkipReduceFV
|
opt.SkipReduce = flags.SkipReduceFV
|
||||||
|
|||||||
@ -35,6 +35,7 @@ func (suite *OptionsUnitSuite) TestAddExchangeCommands() {
|
|||||||
assert.True(t, flags.SkipReduceFV, flags.SkipReduceFN)
|
assert.True(t, flags.SkipReduceFV, flags.SkipReduceFN)
|
||||||
assert.Equal(t, 2, flags.FetchParallelismFV, flags.FetchParallelismFN)
|
assert.Equal(t, 2, flags.FetchParallelismFV, flags.FetchParallelismFN)
|
||||||
assert.True(t, flags.DisableConcurrencyLimiterFV, flags.DisableConcurrencyLimiterFN)
|
assert.True(t, flags.DisableConcurrencyLimiterFV, flags.DisableConcurrencyLimiterFN)
|
||||||
|
assert.Equal(t, 499, flags.DeltaPageSizeFV, flags.DeltaPageSizeFN)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -48,6 +49,7 @@ func (suite *OptionsUnitSuite) TestAddExchangeCommands() {
|
|||||||
flags.AddSkipReduceFlag(cmd)
|
flags.AddSkipReduceFlag(cmd)
|
||||||
flags.AddFetchParallelismFlag(cmd)
|
flags.AddFetchParallelismFlag(cmd)
|
||||||
flags.AddDisableConcurrencyLimiterFlag(cmd)
|
flags.AddDisableConcurrencyLimiterFlag(cmd)
|
||||||
|
flags.AddDeltaPageSizeFlag(cmd)
|
||||||
|
|
||||||
// Test arg parsing for few args
|
// Test arg parsing for few args
|
||||||
cmd.SetArgs([]string{
|
cmd.SetArgs([]string{
|
||||||
@ -60,6 +62,7 @@ func (suite *OptionsUnitSuite) TestAddExchangeCommands() {
|
|||||||
"--" + flags.SkipReduceFN,
|
"--" + flags.SkipReduceFN,
|
||||||
"--" + flags.FetchParallelismFN, "2",
|
"--" + flags.FetchParallelismFN, "2",
|
||||||
"--" + flags.DisableConcurrencyLimiterFN,
|
"--" + flags.DisableConcurrencyLimiterFN,
|
||||||
|
"--" + flags.DeltaPageSizeFN, "499",
|
||||||
})
|
})
|
||||||
|
|
||||||
err := cmd.Execute()
|
err := cmd.Execute()
|
||||||
|
|||||||
2
src/cli/utils/testdata/flags.go
vendored
2
src/cli/utils/testdata/flags.go
vendored
@ -48,6 +48,8 @@ var (
|
|||||||
Destination = "destination"
|
Destination = "destination"
|
||||||
RestorePermissions = true
|
RestorePermissions = true
|
||||||
|
|
||||||
|
DeltaPageSize = "deltaPageSize"
|
||||||
|
|
||||||
AzureClientID = "testAzureClientId"
|
AzureClientID = "testAzureClientId"
|
||||||
AzureTenantID = "testAzureTenantId"
|
AzureTenantID = "testAzureTenantId"
|
||||||
AzureClientSecret = "testAzureClientSecret"
|
AzureClientSecret = "testAzureClientSecret"
|
||||||
|
|||||||
@ -7,6 +7,7 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/idname"
|
"github.com/alcionai/corso/src/internal/common/idname"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
)
|
)
|
||||||
@ -15,9 +16,10 @@ import (
|
|||||||
func UsersMap(
|
func UsersMap(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
acct account.Account,
|
acct account.Account,
|
||||||
|
co control.Options,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) (idname.Cacher, error) {
|
) (idname.Cacher, error) {
|
||||||
au, err := makeUserAPI(acct)
|
au, err := makeUserAPI(acct, co)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Wrap(err, "constructing a graph client")
|
return nil, clues.Wrap(err, "constructing a graph client")
|
||||||
}
|
}
|
||||||
@ -25,13 +27,13 @@ func UsersMap(
|
|||||||
return au.GetAllIDsAndNames(ctx, errs)
|
return au.GetAllIDsAndNames(ctx, errs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeUserAPI(acct account.Account) (api.Users, error) {
|
func makeUserAPI(acct account.Account, co control.Options) (api.Users, error) {
|
||||||
creds, err := acct.M365Config()
|
creds, err := acct.M365Config()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return api.Users{}, clues.Wrap(err, "getting m365 account creds")
|
return api.Users{}, clues.Wrap(err, "getting m365 account creds")
|
||||||
}
|
}
|
||||||
|
|
||||||
cli, err := api.NewClient(creds)
|
cli, err := api.NewClient(creds, co)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return api.Users{}, clues.Wrap(err, "constructing api client")
|
return api.Users{}, clues.Wrap(err, "constructing api client")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,157 +0,0 @@
|
|||||||
// get_item.go is a source file designed to retrieve an m365 object from an
|
|
||||||
// existing M365 account. Data displayed is representative of the current
|
|
||||||
// serialization abstraction versioning used by Microsoft Graph and stored by Corso.
|
|
||||||
|
|
||||||
package exchange
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/alcionai/clues"
|
|
||||||
"github.com/microsoft/kiota-abstractions-go/serialization"
|
|
||||||
kw "github.com/microsoft/kiota-serialization-json-go"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/cli/utils"
|
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
|
||||||
"github.com/alcionai/corso/src/internal/common/str"
|
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
|
||||||
"github.com/alcionai/corso/src/pkg/credentials"
|
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Required inputs from user for command execution
|
|
||||||
var (
|
|
||||||
user, tenant, m365ID, category string
|
|
||||||
)
|
|
||||||
|
|
||||||
func AddCommands(parent *cobra.Command) {
|
|
||||||
exCmd := &cobra.Command{
|
|
||||||
Use: "exchange",
|
|
||||||
Short: "Get an M365ID item JSON",
|
|
||||||
RunE: handleExchangeCmd,
|
|
||||||
}
|
|
||||||
|
|
||||||
fs := exCmd.PersistentFlags()
|
|
||||||
fs.StringVar(&m365ID, "id", "", "m365 identifier for object")
|
|
||||||
fs.StringVar(&category, "category", "", "type of M365 data (contacts, email, events)")
|
|
||||||
fs.StringVar(&user, "user", "", "m365 user id of M365 user")
|
|
||||||
fs.StringVar(&tenant, "tenant", "", "m365 identifier for the tenant")
|
|
||||||
|
|
||||||
cobra.CheckErr(exCmd.MarkPersistentFlagRequired("user"))
|
|
||||||
cobra.CheckErr(exCmd.MarkPersistentFlagRequired("id"))
|
|
||||||
cobra.CheckErr(exCmd.MarkPersistentFlagRequired("category"))
|
|
||||||
|
|
||||||
parent.AddCommand(exCmd)
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleExchangeCmd(cmd *cobra.Command, args []string) error {
|
|
||||||
if utils.HasNoFlagsAndShownHelp(cmd) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
tid := str.First(tenant, os.Getenv(account.AzureTenantID))
|
|
||||||
|
|
||||||
ctx := clues.Add(
|
|
||||||
cmd.Context(),
|
|
||||||
"item_id", m365ID,
|
|
||||||
"resource_owner", user,
|
|
||||||
"tenant", tid)
|
|
||||||
|
|
||||||
creds := account.M365Config{
|
|
||||||
M365: credentials.GetM365(),
|
|
||||||
AzureTenantID: tid,
|
|
||||||
}
|
|
||||||
|
|
||||||
err := runDisplayM365JSON(ctx, creds, user, m365ID, fault.New(true))
|
|
||||||
if err != nil {
|
|
||||||
cmd.SilenceUsage = true
|
|
||||||
cmd.SilenceErrors = true
|
|
||||||
|
|
||||||
return clues.Wrap(err, "getting item")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func runDisplayM365JSON(
|
|
||||||
ctx context.Context,
|
|
||||||
creds account.M365Config,
|
|
||||||
user, itemID string,
|
|
||||||
errs *fault.Bus,
|
|
||||||
) error {
|
|
||||||
var (
|
|
||||||
bs []byte
|
|
||||||
err error
|
|
||||||
cat = path.ToCategoryType(category)
|
|
||||||
sw = kw.NewJsonSerializationWriter()
|
|
||||||
)
|
|
||||||
|
|
||||||
ac, err := api.NewClient(creds)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch cat {
|
|
||||||
case path.EmailCategory:
|
|
||||||
bs, err = getItem(ctx, ac.Mail(), user, itemID, true, errs)
|
|
||||||
case path.EventsCategory:
|
|
||||||
bs, err = getItem(ctx, ac.Events(), user, itemID, true, errs)
|
|
||||||
case path.ContactsCategory:
|
|
||||||
bs, err = getItem(ctx, ac.Contacts(), user, itemID, true, errs)
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unable to process category: %s", cat)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = sw.WriteStringValue("", ptr.To(string(bs)))
|
|
||||||
if err != nil {
|
|
||||||
return clues.Wrap(err, "Error writing string value: "+itemID)
|
|
||||||
}
|
|
||||||
|
|
||||||
array, err := sw.GetSerializedContent()
|
|
||||||
if err != nil {
|
|
||||||
return clues.Wrap(err, "Error serializing item: "+itemID)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Println(string(array))
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type itemer interface {
|
|
||||||
GetItem(
|
|
||||||
ctx context.Context,
|
|
||||||
user, itemID string,
|
|
||||||
immutableID bool,
|
|
||||||
errs *fault.Bus,
|
|
||||||
) (serialization.Parsable, *details.ExchangeInfo, error)
|
|
||||||
Serialize(
|
|
||||||
ctx context.Context,
|
|
||||||
item serialization.Parsable,
|
|
||||||
user, itemID string,
|
|
||||||
) ([]byte, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getItem(
|
|
||||||
ctx context.Context,
|
|
||||||
itm itemer,
|
|
||||||
user, itemID string,
|
|
||||||
immutableIDs bool,
|
|
||||||
errs *fault.Bus,
|
|
||||||
) ([]byte, error) {
|
|
||||||
sp, _, err := itm.GetItem(ctx, user, itemID, immutableIDs, errs)
|
|
||||||
if err != nil {
|
|
||||||
return nil, clues.Wrap(err, "getting item")
|
|
||||||
}
|
|
||||||
|
|
||||||
return itm.Serialize(ctx, sp, user, itemID)
|
|
||||||
}
|
|
||||||
@ -1,36 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
|
|
||||||
. "github.com/alcionai/corso/src/cli/print"
|
|
||||||
"github.com/alcionai/corso/src/cmd/getM365/exchange"
|
|
||||||
"github.com/alcionai/corso/src/cmd/getM365/onedrive"
|
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
|
||||||
)
|
|
||||||
|
|
||||||
var rootCmd = &cobra.Command{
|
|
||||||
Use: "getM365",
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
ls := logger.Settings{
|
|
||||||
Level: logger.LLDebug,
|
|
||||||
Format: logger.LFText,
|
|
||||||
}
|
|
||||||
ctx, _ := logger.CtxOrSeed(context.Background(), ls)
|
|
||||||
|
|
||||||
ctx = SetRootCmd(ctx, rootCmd)
|
|
||||||
defer logger.Flush(ctx)
|
|
||||||
|
|
||||||
exchange.AddCommands(rootCmd)
|
|
||||||
onedrive.AddCommands(rootCmd)
|
|
||||||
|
|
||||||
if err := rootCmd.Execute(); err != nil {
|
|
||||||
Err(ctx, err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,207 +0,0 @@
|
|||||||
// get_item.go is a source file designed to retrieve an m365 object from an
|
|
||||||
// existing M365 account. Data displayed is representative of the current
|
|
||||||
// serialization abstraction versioning used by Microsoft Graph and stored by Corso.
|
|
||||||
|
|
||||||
package onedrive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/alcionai/clues"
|
|
||||||
"github.com/microsoft/kiota-abstractions-go/serialization"
|
|
||||||
kjson "github.com/microsoft/kiota-serialization-json-go"
|
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
|
|
||||||
. "github.com/alcionai/corso/src/cli/print"
|
|
||||||
"github.com/alcionai/corso/src/cli/utils"
|
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
|
||||||
"github.com/alcionai/corso/src/internal/common/str"
|
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
|
||||||
"github.com/alcionai/corso/src/pkg/credentials"
|
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
|
||||||
)
|
|
||||||
|
|
||||||
const downloadURLKey = "@microsoft.graph.downloadUrl"
|
|
||||||
|
|
||||||
// Required inputs from user for command execution
|
|
||||||
var (
|
|
||||||
user, tenant, m365ID string
|
|
||||||
)
|
|
||||||
|
|
||||||
func AddCommands(parent *cobra.Command) {
|
|
||||||
exCmd := &cobra.Command{
|
|
||||||
Use: "onedrive",
|
|
||||||
Short: "Get an M365ID item",
|
|
||||||
RunE: handleOneDriveCmd,
|
|
||||||
}
|
|
||||||
|
|
||||||
fs := exCmd.PersistentFlags()
|
|
||||||
fs.StringVar(&m365ID, "id", "", "m365 identifier for object")
|
|
||||||
fs.StringVar(&user, "user", "", "m365 user id of M365 user")
|
|
||||||
fs.StringVar(&tenant, "tenant", "", "m365 identifier for the tenant")
|
|
||||||
|
|
||||||
cobra.CheckErr(exCmd.MarkPersistentFlagRequired("user"))
|
|
||||||
cobra.CheckErr(exCmd.MarkPersistentFlagRequired("id"))
|
|
||||||
|
|
||||||
parent.AddCommand(exCmd)
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleOneDriveCmd(cmd *cobra.Command, args []string) error {
|
|
||||||
if utils.HasNoFlagsAndShownHelp(cmd) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
tid := str.First(tenant, os.Getenv(account.AzureTenantID))
|
|
||||||
|
|
||||||
ctx := clues.Add(
|
|
||||||
cmd.Context(),
|
|
||||||
"item_id", m365ID,
|
|
||||||
"resource_owner", user,
|
|
||||||
"tenant", tid)
|
|
||||||
|
|
||||||
// get account info
|
|
||||||
creds := account.M365Config{
|
|
||||||
M365: credentials.GetM365(),
|
|
||||||
AzureTenantID: tid,
|
|
||||||
}
|
|
||||||
|
|
||||||
gr := graph.NewNoTimeoutHTTPWrapper()
|
|
||||||
|
|
||||||
ac, err := api.NewClient(creds)
|
|
||||||
if err != nil {
|
|
||||||
return Only(ctx, clues.Wrap(err, "getting api client"))
|
|
||||||
}
|
|
||||||
|
|
||||||
err = runDisplayM365JSON(ctx, ac, gr, creds, user, m365ID)
|
|
||||||
if err != nil {
|
|
||||||
cmd.SilenceUsage = true
|
|
||||||
cmd.SilenceErrors = true
|
|
||||||
|
|
||||||
return Only(ctx, clues.Wrap(err, "getting item"))
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type itemData struct {
|
|
||||||
Size int `json:"size"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type itemPrintable struct {
|
|
||||||
Info json.RawMessage `json:"info"`
|
|
||||||
Permissions json.RawMessage `json:"permissions"`
|
|
||||||
Data itemData `json:"data"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i itemPrintable) MinimumPrintable() any {
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
func runDisplayM365JSON(
|
|
||||||
ctx context.Context,
|
|
||||||
ac api.Client,
|
|
||||||
gr graph.Requester,
|
|
||||||
creds account.M365Config,
|
|
||||||
userID, itemID string,
|
|
||||||
) error {
|
|
||||||
drive, err := ac.Users().GetDefaultDrive(ctx, userID)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
driveID := ptr.Val(drive.GetId())
|
|
||||||
|
|
||||||
it := itemPrintable{}
|
|
||||||
|
|
||||||
item, err := ac.Drives().GetItem(ctx, driveID, itemID)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if item != nil {
|
|
||||||
content, err := getDriveItemContent(ctx, gr, item)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// We could get size from item.GetSize(), but the
|
|
||||||
// getDriveItemContent call is to ensure that we are able to
|
|
||||||
// download the file.
|
|
||||||
it.Data.Size = len(content)
|
|
||||||
}
|
|
||||||
|
|
||||||
sInfo, err := serializeObject(item)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = json.Unmarshal([]byte(sInfo), &it.Info)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
perms, err := ac.Drives().GetItemPermission(ctx, driveID, itemID)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
sPerms, err := serializeObject(perms)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = json.Unmarshal([]byte(sPerms), &it.Permissions)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
PrettyJSON(ctx, it)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func serializeObject(data serialization.Parsable) (string, error) {
|
|
||||||
sw := kjson.NewJsonSerializationWriter()
|
|
||||||
|
|
||||||
err := sw.WriteObjectValue("", data)
|
|
||||||
if err != nil {
|
|
||||||
return "", clues.Wrap(err, "writing serializing info")
|
|
||||||
}
|
|
||||||
|
|
||||||
content, err := sw.GetSerializedContent()
|
|
||||||
if err != nil {
|
|
||||||
return "", clues.Wrap(err, "getting serializing info")
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(content), err
|
|
||||||
}
|
|
||||||
|
|
||||||
func getDriveItemContent(
|
|
||||||
ctx context.Context,
|
|
||||||
gr graph.Requester,
|
|
||||||
item models.DriveItemable,
|
|
||||||
) ([]byte, error) {
|
|
||||||
url, ok := item.GetAdditionalData()[downloadURLKey].(*string)
|
|
||||||
if !ok {
|
|
||||||
return nil, clues.New("retrieving download url")
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := gr.Request(ctx, http.MethodGet, *url, nil, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, clues.New("requesting item content").With("error", err)
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
content, err := io.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, clues.New("reading item content").With("error", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return content, nil
|
|
||||||
}
|
|
||||||
@ -8,7 +8,7 @@ require (
|
|||||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0
|
||||||
github.com/alcionai/clues v0.0.0-20230630194723-e24d7940e07a
|
github.com/alcionai/clues v0.0.0-20230630194723-e24d7940e07a
|
||||||
github.com/armon/go-metrics v0.4.1
|
github.com/armon/go-metrics v0.4.1
|
||||||
github.com/aws/aws-sdk-go v1.44.305
|
github.com/aws/aws-sdk-go v1.44.307
|
||||||
github.com/aws/aws-xray-sdk-go v1.8.1
|
github.com/aws/aws-xray-sdk-go v1.8.1
|
||||||
github.com/cenkalti/backoff/v4 v4.2.1
|
github.com/cenkalti/backoff/v4 v4.2.1
|
||||||
github.com/google/uuid v1.3.0
|
github.com/google/uuid v1.3.0
|
||||||
@ -16,7 +16,7 @@ require (
|
|||||||
github.com/kopia/kopia v0.12.2-0.20230327171220-747baeebdab1
|
github.com/kopia/kopia v0.12.2-0.20230327171220-747baeebdab1
|
||||||
github.com/microsoft/kiota-abstractions-go v1.1.0
|
github.com/microsoft/kiota-abstractions-go v1.1.0
|
||||||
github.com/microsoft/kiota-authentication-azure-go v1.0.0
|
github.com/microsoft/kiota-authentication-azure-go v1.0.0
|
||||||
github.com/microsoft/kiota-http-go v1.0.0
|
github.com/microsoft/kiota-http-go v1.0.1
|
||||||
github.com/microsoft/kiota-serialization-form-go v1.0.0
|
github.com/microsoft/kiota-serialization-form-go v1.0.0
|
||||||
github.com/microsoft/kiota-serialization-json-go v1.0.4
|
github.com/microsoft/kiota-serialization-json-go v1.0.4
|
||||||
github.com/microsoftgraph/msgraph-sdk-go v1.12.0
|
github.com/microsoftgraph/msgraph-sdk-go v1.12.0
|
||||||
|
|||||||
@ -66,8 +66,8 @@ github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY
|
|||||||
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||||
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||||
github.com/aws/aws-sdk-go v1.44.305 h1:fU/5lY3WyBjGU9fkmQYd8o4fZu+2RaOv/i+sPaJVvFg=
|
github.com/aws/aws-sdk-go v1.44.307 h1:2R0/EPgpZcFSUwZhYImq/srjaOrOfLv5MNRzrFyAM38=
|
||||||
github.com/aws/aws-sdk-go v1.44.305/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
github.com/aws/aws-sdk-go v1.44.307/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||||
github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo=
|
github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo=
|
||||||
github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A=
|
github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A=
|
||||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||||
@ -275,8 +275,8 @@ github.com/microsoft/kiota-abstractions-go v1.1.0 h1:X1aKlsYCRs/0RSChr/fbq4j/+kx
|
|||||||
github.com/microsoft/kiota-abstractions-go v1.1.0/go.mod h1:RkxyZ5x87Njik7iVeQY9M2wtrrL1MJZcXiI/BxD/82g=
|
github.com/microsoft/kiota-abstractions-go v1.1.0/go.mod h1:RkxyZ5x87Njik7iVeQY9M2wtrrL1MJZcXiI/BxD/82g=
|
||||||
github.com/microsoft/kiota-authentication-azure-go v1.0.0 h1:29FNZZ/4nnCOwFcGWlB/sxPvWz487HA2bXH8jR5k2Rk=
|
github.com/microsoft/kiota-authentication-azure-go v1.0.0 h1:29FNZZ/4nnCOwFcGWlB/sxPvWz487HA2bXH8jR5k2Rk=
|
||||||
github.com/microsoft/kiota-authentication-azure-go v1.0.0/go.mod h1:rnx3PRlkGdXDcA/0lZQTbBwyYGmc+3POt7HpE/e4jGw=
|
github.com/microsoft/kiota-authentication-azure-go v1.0.0/go.mod h1:rnx3PRlkGdXDcA/0lZQTbBwyYGmc+3POt7HpE/e4jGw=
|
||||||
github.com/microsoft/kiota-http-go v1.0.0 h1:F1hd6gMlLeEgH2CkRB7z13ow7LxMKMWEmms/t0VfS+k=
|
github.com/microsoft/kiota-http-go v1.0.1 h1:818u3aiLpxj35hZgfUSqphQ18IUTK3gVdTE4cQ5vjLw=
|
||||||
github.com/microsoft/kiota-http-go v1.0.0/go.mod h1:eujxJliqodotsYepIc6ihhK+vXMMt5Q8YiSNL7+7M7U=
|
github.com/microsoft/kiota-http-go v1.0.1/go.mod h1:H0cg+ly+5ZSR8z4swj5ea9O/GB5ll2YuYeQ0/pJs7AY=
|
||||||
github.com/microsoft/kiota-serialization-form-go v1.0.0 h1:UNdrkMnLFqUCccQZerKjblsyVgifS11b3WCx+eFEsAI=
|
github.com/microsoft/kiota-serialization-form-go v1.0.0 h1:UNdrkMnLFqUCccQZerKjblsyVgifS11b3WCx+eFEsAI=
|
||||||
github.com/microsoft/kiota-serialization-form-go v1.0.0/go.mod h1:h4mQOO6KVTNciMF6azi1J9QB19ujSw3ULKcSNyXXOMA=
|
github.com/microsoft/kiota-serialization-form-go v1.0.0/go.mod h1:h4mQOO6KVTNciMF6azi1J9QB19ujSw3ULKcSNyXXOMA=
|
||||||
github.com/microsoft/kiota-serialization-json-go v1.0.4 h1:5TaISWwd2Me8clrK7SqNATo0tv9seOq59y4I5953egQ=
|
github.com/microsoft/kiota-serialization-json-go v1.0.4 h1:5TaISWwd2Me8clrK7SqNATo0tv9seOq59y4I5953egQ=
|
||||||
|
|||||||
@ -82,8 +82,8 @@ var (
|
|||||||
RudderStackDataPlaneURL string
|
RudderStackDataPlaneURL string
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewBus(ctx context.Context, s storage.Storage, tenID string, opts control.Options) (Bus, error) {
|
func NewBus(ctx context.Context, s storage.Storage, tenID string, co control.Options) (Bus, error) {
|
||||||
if opts.DisableMetrics {
|
if co.DisableMetrics {
|
||||||
return Bus{}, nil
|
return Bus{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -24,7 +24,7 @@ type BackupBases interface {
|
|||||||
MergeBackupBases(
|
MergeBackupBases(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
other BackupBases,
|
other BackupBases,
|
||||||
reasonToKey func(Reason) string,
|
reasonToKey func(Reasoner) string,
|
||||||
) BackupBases
|
) BackupBases
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -109,10 +109,10 @@ func (bb *backupBases) ClearAssistBases() {
|
|||||||
// some migration that disrupts lookup), and that the BackupBases used to call
|
// some migration that disrupts lookup), and that the BackupBases used to call
|
||||||
// this function contains the current version.
|
// this function contains the current version.
|
||||||
//
|
//
|
||||||
// reasonToKey should be a function that, given a Reason, will produce some
|
// reasonToKey should be a function that, given a Reasoner, will produce some
|
||||||
// string that represents Reason in the context of the merge operation. For
|
// string that represents Reasoner in the context of the merge operation. For
|
||||||
// example, to merge BackupBases across a ResourceOwner migration, the Reason's
|
// example, to merge BackupBases across a ProtectedResource migration, the
|
||||||
// service and category can be used as the key.
|
// Reasoner's service and category can be used as the key.
|
||||||
//
|
//
|
||||||
// Selection priority, for each reason key generated by reasonsToKey, follows
|
// Selection priority, for each reason key generated by reasonsToKey, follows
|
||||||
// these rules:
|
// these rules:
|
||||||
@ -125,7 +125,7 @@ func (bb *backupBases) ClearAssistBases() {
|
|||||||
func (bb *backupBases) MergeBackupBases(
|
func (bb *backupBases) MergeBackupBases(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
other BackupBases,
|
other BackupBases,
|
||||||
reasonToKey func(reason Reason) string,
|
reasonToKey func(reason Reasoner) string,
|
||||||
) BackupBases {
|
) BackupBases {
|
||||||
if other == nil || (len(other.MergeBases()) == 0 && len(other.AssistBases()) == 0) {
|
if other == nil || (len(other.MergeBases()) == 0 && len(other.AssistBases()) == 0) {
|
||||||
return bb
|
return bb
|
||||||
@ -159,7 +159,7 @@ func (bb *backupBases) MergeBackupBases(
|
|||||||
|
|
||||||
// Calculate the set of mergeBases to pull from other into this one.
|
// Calculate the set of mergeBases to pull from other into this one.
|
||||||
for _, m := range other.MergeBases() {
|
for _, m := range other.MergeBases() {
|
||||||
useReasons := []Reason{}
|
useReasons := []Reasoner{}
|
||||||
|
|
||||||
for _, r := range m.Reasons {
|
for _, r := range m.Reasons {
|
||||||
k := reasonToKey(r)
|
k := reasonToKey(r)
|
||||||
@ -210,7 +210,7 @@ func (bb *backupBases) MergeBackupBases(
|
|||||||
|
|
||||||
// Add assistBases from other to this one as needed.
|
// Add assistBases from other to this one as needed.
|
||||||
for _, m := range other.AssistBases() {
|
for _, m := range other.AssistBases() {
|
||||||
useReasons := []Reason{}
|
useReasons := []Reasoner{}
|
||||||
|
|
||||||
// Assume that all complete manifests in assist overlap with MergeBases.
|
// Assume that all complete manifests in assist overlap with MergeBases.
|
||||||
if len(m.IncompleteReason) == 0 {
|
if len(m.IncompleteReason) == 0 {
|
||||||
@ -267,8 +267,8 @@ func findNonUniqueManifests(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, reason := range man.Reasons {
|
for _, reason := range man.Reasons {
|
||||||
reasonKey := reason.ResourceOwner + reason.Service.String() + reason.Category.String()
|
mapKey := reasonKey(reason)
|
||||||
reasons[reasonKey] = append(reasons[reasonKey], man)
|
reasons[mapKey] = append(reasons[mapKey], man)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -16,7 +16,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
|
|
||||||
func makeManifest(id, incmpl, bID string, reasons ...Reason) ManifestEntry {
|
func makeManifest(id, incmpl, bID string, reasons ...Reasoner) ManifestEntry {
|
||||||
bIDKey, _ := makeTagKV(TagBackupID)
|
bIDKey, _ := makeTagKV(TagBackupID)
|
||||||
|
|
||||||
return ManifestEntry{
|
return ManifestEntry{
|
||||||
@ -223,14 +223,10 @@ func (suite *BackupBasesUnitSuite) TestMergeBackupBases() {
|
|||||||
ir = "checkpoint"
|
ir = "checkpoint"
|
||||||
}
|
}
|
||||||
|
|
||||||
reasons := make([]Reason, 0, len(i.cat))
|
reasons := make([]Reasoner, 0, len(i.cat))
|
||||||
|
|
||||||
for _, c := range i.cat {
|
for _, c := range i.cat {
|
||||||
reasons = append(reasons, Reason{
|
reasons = append(reasons, NewReason("", ro, path.ExchangeService, c))
|
||||||
ResourceOwner: ro,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: c,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
m := makeManifest(baseID, ir, "b"+baseID, reasons...)
|
m := makeManifest(baseID, ir, "b"+baseID, reasons...)
|
||||||
@ -457,8 +453,8 @@ func (suite *BackupBasesUnitSuite) TestMergeBackupBases() {
|
|||||||
got := bb.MergeBackupBases(
|
got := bb.MergeBackupBases(
|
||||||
ctx,
|
ctx,
|
||||||
other,
|
other,
|
||||||
func(reason Reason) string {
|
func(r Reasoner) string {
|
||||||
return reason.Service.String() + reason.Category.String()
|
return r.Service().String() + r.Category().String()
|
||||||
})
|
})
|
||||||
AssertBackupBasesEqual(t, expect, got)
|
AssertBackupBasesEqual(t, expect, got)
|
||||||
})
|
})
|
||||||
@ -469,13 +465,8 @@ func (suite *BackupBasesUnitSuite) TestFixupAndVerify() {
|
|||||||
ro := "resource_owner"
|
ro := "resource_owner"
|
||||||
|
|
||||||
makeMan := func(pct path.CategoryType, id, incmpl, bID string) ManifestEntry {
|
makeMan := func(pct path.CategoryType, id, incmpl, bID string) ManifestEntry {
|
||||||
reason := Reason{
|
r := NewReason("", ro, path.ExchangeService, pct)
|
||||||
ResourceOwner: ro,
|
return makeManifest(id, incmpl, bID, r)
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: pct,
|
|
||||||
}
|
|
||||||
|
|
||||||
return makeManifest(id, incmpl, bID, reason)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make a function so tests can modify things without messing with each other.
|
// Make a function so tests can modify things without messing with each other.
|
||||||
@ -606,11 +597,7 @@ func (suite *BackupBasesUnitSuite) TestFixupAndVerify() {
|
|||||||
res := validMail1()
|
res := validMail1()
|
||||||
res.mergeBases[0].Reasons = append(
|
res.mergeBases[0].Reasons = append(
|
||||||
res.mergeBases[0].Reasons,
|
res.mergeBases[0].Reasons,
|
||||||
Reason{
|
NewReason("", ro, path.ExchangeService, path.ContactsCategory))
|
||||||
ResourceOwner: ro,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.ContactsCategory,
|
|
||||||
})
|
|
||||||
res.assistBases = res.mergeBases
|
res.assistBases = res.mergeBases
|
||||||
|
|
||||||
return res
|
return res
|
||||||
@ -619,11 +606,7 @@ func (suite *BackupBasesUnitSuite) TestFixupAndVerify() {
|
|||||||
res := validMail1()
|
res := validMail1()
|
||||||
res.mergeBases[0].Reasons = append(
|
res.mergeBases[0].Reasons = append(
|
||||||
res.mergeBases[0].Reasons,
|
res.mergeBases[0].Reasons,
|
||||||
Reason{
|
NewReason("", ro, path.ExchangeService, path.ContactsCategory))
|
||||||
ResourceOwner: ro,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.ContactsCategory,
|
|
||||||
})
|
|
||||||
res.assistBases = res.mergeBases
|
res.assistBases = res.mergeBases
|
||||||
|
|
||||||
return res
|
return res
|
||||||
|
|||||||
@ -29,39 +29,94 @@ const (
|
|||||||
userTagPrefix = "tag:"
|
userTagPrefix = "tag:"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Reason struct {
|
// TODO(ashmrtn): Move this into some inject package. Here to avoid import
|
||||||
ResourceOwner string
|
// cycles.
|
||||||
Service path.ServiceType
|
type Reasoner interface {
|
||||||
Category path.CategoryType
|
Tenant() string
|
||||||
|
ProtectedResource() string
|
||||||
|
Service() path.ServiceType
|
||||||
|
Category() path.CategoryType
|
||||||
|
// SubtreePath returns the path prefix for data in existing backups that have
|
||||||
|
// parameters (tenant, protected resourced, etc) that match this Reasoner.
|
||||||
|
SubtreePath() (path.Path, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r Reason) TagKeys() []string {
|
func NewReason(
|
||||||
|
tenant, resource string,
|
||||||
|
service path.ServiceType,
|
||||||
|
category path.CategoryType,
|
||||||
|
) Reasoner {
|
||||||
|
return reason{
|
||||||
|
tenant: tenant,
|
||||||
|
resource: resource,
|
||||||
|
service: service,
|
||||||
|
category: category,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type reason struct {
|
||||||
|
// tenant appears here so that when this is moved to an inject package nothing
|
||||||
|
// needs changed. However, kopia itself is blind to the fields in the reason
|
||||||
|
// struct and relies on helper functions to get the information it needs.
|
||||||
|
tenant string
|
||||||
|
resource string
|
||||||
|
service path.ServiceType
|
||||||
|
category path.CategoryType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r reason) Tenant() string {
|
||||||
|
return r.tenant
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r reason) ProtectedResource() string {
|
||||||
|
return r.resource
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r reason) Service() path.ServiceType {
|
||||||
|
return r.service
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r reason) Category() path.CategoryType {
|
||||||
|
return r.category
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r reason) SubtreePath() (path.Path, error) {
|
||||||
|
p, err := path.ServicePrefix(
|
||||||
|
r.Tenant(),
|
||||||
|
r.ProtectedResource(),
|
||||||
|
r.Service(),
|
||||||
|
r.Category())
|
||||||
|
|
||||||
|
return p, clues.Wrap(err, "building path").OrNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
func tagKeys(r Reasoner) []string {
|
||||||
return []string{
|
return []string{
|
||||||
r.ResourceOwner,
|
r.ProtectedResource(),
|
||||||
serviceCatString(r.Service, r.Category),
|
serviceCatString(r.Service(), r.Category()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Key is the concatenation of the ResourceOwner, Service, and Category.
|
// reasonKey returns the concatenation of the ProtectedResource, Service, and Category.
|
||||||
func (r Reason) Key() string {
|
func reasonKey(r Reasoner) string {
|
||||||
return r.ResourceOwner + r.Service.String() + r.Category.String()
|
return r.ProtectedResource() + r.Service().String() + r.Category().String()
|
||||||
}
|
}
|
||||||
|
|
||||||
type BackupEntry struct {
|
type BackupEntry struct {
|
||||||
*backup.Backup
|
*backup.Backup
|
||||||
Reasons []Reason
|
Reasons []Reasoner
|
||||||
}
|
}
|
||||||
|
|
||||||
type ManifestEntry struct {
|
type ManifestEntry struct {
|
||||||
*snapshot.Manifest
|
*snapshot.Manifest
|
||||||
// Reason contains the ResourceOwners and Service/Categories that caused this
|
// Reasons contains the ResourceOwners and Service/Categories that caused this
|
||||||
// snapshot to be selected as a base. We can't reuse OwnersCats here because
|
// snapshot to be selected as a base. We can't reuse OwnersCats here because
|
||||||
// it's possible some ResourceOwners will have a subset of the Categories as
|
// it's possible some ResourceOwners will have a subset of the Categories as
|
||||||
// the reason for selecting a snapshot. For example:
|
// the reason for selecting a snapshot. For example:
|
||||||
// 1. backup user1 email,contacts -> B1
|
// 1. backup user1 email,contacts -> B1
|
||||||
// 2. backup user1 contacts -> B2 (uses B1 as base)
|
// 2. backup user1 contacts -> B2 (uses B1 as base)
|
||||||
// 3. backup user1 email,contacts,events (uses B1 for email, B2 for contacts)
|
// 3. backup user1 email,contacts,events (uses B1 for email, B2 for contacts)
|
||||||
Reasons []Reason
|
Reasons []Reasoner
|
||||||
}
|
}
|
||||||
|
|
||||||
func (me ManifestEntry) GetTag(key string) (string, bool) {
|
func (me ManifestEntry) GetTag(key string) (string, bool) {
|
||||||
@ -157,7 +212,7 @@ func (b *baseFinder) getBackupModel(
|
|||||||
// most recent complete backup as the base.
|
// most recent complete backup as the base.
|
||||||
func (b *baseFinder) findBasesInSet(
|
func (b *baseFinder) findBasesInSet(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
reason Reason,
|
reason Reasoner,
|
||||||
metas []*manifest.EntryMetadata,
|
metas []*manifest.EntryMetadata,
|
||||||
) (*BackupEntry, *ManifestEntry, []ManifestEntry, error) {
|
) (*BackupEntry, *ManifestEntry, []ManifestEntry, error) {
|
||||||
// Sort manifests by time so we can go through them sequentially. The code in
|
// Sort manifests by time so we can go through them sequentially. The code in
|
||||||
@ -190,7 +245,7 @@ func (b *baseFinder) findBasesInSet(
|
|||||||
|
|
||||||
kopiaAssistSnaps = append(kopiaAssistSnaps, ManifestEntry{
|
kopiaAssistSnaps = append(kopiaAssistSnaps, ManifestEntry{
|
||||||
Manifest: man,
|
Manifest: man,
|
||||||
Reasons: []Reason{reason},
|
Reasons: []Reasoner{reason},
|
||||||
})
|
})
|
||||||
|
|
||||||
logger.Ctx(ictx).Info("found incomplete backup")
|
logger.Ctx(ictx).Info("found incomplete backup")
|
||||||
@ -211,7 +266,7 @@ func (b *baseFinder) findBasesInSet(
|
|||||||
|
|
||||||
kopiaAssistSnaps = append(kopiaAssistSnaps, ManifestEntry{
|
kopiaAssistSnaps = append(kopiaAssistSnaps, ManifestEntry{
|
||||||
Manifest: man,
|
Manifest: man,
|
||||||
Reasons: []Reason{reason},
|
Reasons: []Reasoner{reason},
|
||||||
})
|
})
|
||||||
|
|
||||||
logger.Ctx(ictx).Info("found incomplete backup")
|
logger.Ctx(ictx).Info("found incomplete backup")
|
||||||
@ -235,7 +290,7 @@ func (b *baseFinder) findBasesInSet(
|
|||||||
|
|
||||||
kopiaAssistSnaps = append(kopiaAssistSnaps, ManifestEntry{
|
kopiaAssistSnaps = append(kopiaAssistSnaps, ManifestEntry{
|
||||||
Manifest: man,
|
Manifest: man,
|
||||||
Reasons: []Reason{reason},
|
Reasons: []Reasoner{reason},
|
||||||
})
|
})
|
||||||
|
|
||||||
logger.Ctx(ictx).Infow(
|
logger.Ctx(ictx).Infow(
|
||||||
@ -253,13 +308,13 @@ func (b *baseFinder) findBasesInSet(
|
|||||||
|
|
||||||
me := ManifestEntry{
|
me := ManifestEntry{
|
||||||
Manifest: man,
|
Manifest: man,
|
||||||
Reasons: []Reason{reason},
|
Reasons: []Reasoner{reason},
|
||||||
}
|
}
|
||||||
kopiaAssistSnaps = append(kopiaAssistSnaps, me)
|
kopiaAssistSnaps = append(kopiaAssistSnaps, me)
|
||||||
|
|
||||||
return &BackupEntry{
|
return &BackupEntry{
|
||||||
Backup: bup,
|
Backup: bup,
|
||||||
Reasons: []Reason{reason},
|
Reasons: []Reasoner{reason},
|
||||||
}, &me, kopiaAssistSnaps, nil
|
}, &me, kopiaAssistSnaps, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -270,12 +325,12 @@ func (b *baseFinder) findBasesInSet(
|
|||||||
|
|
||||||
func (b *baseFinder) getBase(
|
func (b *baseFinder) getBase(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
reason Reason,
|
r Reasoner,
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
) (*BackupEntry, *ManifestEntry, []ManifestEntry, error) {
|
) (*BackupEntry, *ManifestEntry, []ManifestEntry, error) {
|
||||||
allTags := map[string]string{}
|
allTags := map[string]string{}
|
||||||
|
|
||||||
for _, k := range reason.TagKeys() {
|
for _, k := range tagKeys(r) {
|
||||||
allTags[k] = ""
|
allTags[k] = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -292,12 +347,12 @@ func (b *baseFinder) getBase(
|
|||||||
return nil, nil, nil, nil
|
return nil, nil, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return b.findBasesInSet(ctx, reason, metas)
|
return b.findBasesInSet(ctx, r, metas)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *baseFinder) FindBases(
|
func (b *baseFinder) FindBases(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
reasons []Reason,
|
reasons []Reasoner,
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
) BackupBases {
|
) BackupBases {
|
||||||
var (
|
var (
|
||||||
@ -310,14 +365,14 @@ func (b *baseFinder) FindBases(
|
|||||||
kopiaAssistSnaps = map[manifest.ID]ManifestEntry{}
|
kopiaAssistSnaps = map[manifest.ID]ManifestEntry{}
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, reason := range reasons {
|
for _, searchReason := range reasons {
|
||||||
ictx := clues.Add(
|
ictx := clues.Add(
|
||||||
ctx,
|
ctx,
|
||||||
"search_service", reason.Service.String(),
|
"search_service", searchReason.Service().String(),
|
||||||
"search_category", reason.Category.String())
|
"search_category", searchReason.Category().String())
|
||||||
logger.Ctx(ictx).Info("searching for previous manifests")
|
logger.Ctx(ictx).Info("searching for previous manifests")
|
||||||
|
|
||||||
baseBackup, baseSnap, assistSnaps, err := b.getBase(ictx, reason, tags)
|
baseBackup, baseSnap, assistSnaps, err := b.getBase(ictx, searchReason, tags)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Ctx(ctx).Info(
|
logger.Ctx(ctx).Info(
|
||||||
"getting base, falling back to full backup for reason",
|
"getting base, falling back to full backup for reason",
|
||||||
|
|||||||
@ -39,61 +39,24 @@ var (
|
|||||||
testUser2 = "user2"
|
testUser2 = "user2"
|
||||||
testUser3 = "user3"
|
testUser3 = "user3"
|
||||||
|
|
||||||
testAllUsersAllCats = []Reason{
|
testAllUsersAllCats = []Reasoner{
|
||||||
{
|
// User1 email and events.
|
||||||
ResourceOwner: testUser1,
|
NewReason("", testUser1, path.ExchangeService, path.EmailCategory),
|
||||||
Service: path.ExchangeService,
|
NewReason("", testUser1, path.ExchangeService, path.EventsCategory),
|
||||||
Category: path.EmailCategory,
|
// User2 email and events.
|
||||||
},
|
NewReason("", testUser2, path.ExchangeService, path.EmailCategory),
|
||||||
{
|
NewReason("", testUser2, path.ExchangeService, path.EventsCategory),
|
||||||
ResourceOwner: testUser1,
|
// User3 email and events.
|
||||||
Service: path.ExchangeService,
|
NewReason("", testUser3, path.ExchangeService, path.EmailCategory),
|
||||||
Category: path.EventsCategory,
|
NewReason("", testUser3, path.ExchangeService, path.EventsCategory),
|
||||||
},
|
|
||||||
{
|
|
||||||
ResourceOwner: testUser2,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EmailCategory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ResourceOwner: testUser2,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EventsCategory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ResourceOwner: testUser3,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EmailCategory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ResourceOwner: testUser3,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EventsCategory,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
testAllUsersMail = []Reason{
|
testAllUsersMail = []Reasoner{
|
||||||
{
|
NewReason("", testUser1, path.ExchangeService, path.EmailCategory),
|
||||||
ResourceOwner: testUser1,
|
NewReason("", testUser2, path.ExchangeService, path.EmailCategory),
|
||||||
Service: path.ExchangeService,
|
NewReason("", testUser3, path.ExchangeService, path.EmailCategory),
|
||||||
Category: path.EmailCategory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ResourceOwner: testUser2,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EmailCategory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ResourceOwner: testUser3,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EmailCategory,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
testUser1Mail = []Reason{
|
testUser1Mail = []Reasoner{
|
||||||
{
|
NewReason("", testUser1, path.ExchangeService, path.EmailCategory),
|
||||||
ResourceOwner: testUser1,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EmailCategory,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -322,12 +285,8 @@ func (suite *BaseFinderUnitSuite) TestNoResult_NoBackupsOrSnapshots() {
|
|||||||
sm: mockEmptySnapshotManager{},
|
sm: mockEmptySnapshotManager{},
|
||||||
bg: mockEmptyModelGetter{},
|
bg: mockEmptyModelGetter{},
|
||||||
}
|
}
|
||||||
reasons := []Reason{
|
reasons := []Reasoner{
|
||||||
{
|
NewReason("", "a-user", path.ExchangeService, path.EmailCategory),
|
||||||
ResourceOwner: "a-user",
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EmailCategory,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bb := bf.FindBases(ctx, reasons, nil)
|
bb := bf.FindBases(ctx, reasons, nil)
|
||||||
@ -345,12 +304,8 @@ func (suite *BaseFinderUnitSuite) TestNoResult_ErrorListingSnapshots() {
|
|||||||
sm: &mockSnapshotManager{findErr: assert.AnError},
|
sm: &mockSnapshotManager{findErr: assert.AnError},
|
||||||
bg: mockEmptyModelGetter{},
|
bg: mockEmptyModelGetter{},
|
||||||
}
|
}
|
||||||
reasons := []Reason{
|
reasons := []Reasoner{
|
||||||
{
|
NewReason("", "a-user", path.ExchangeService, path.EmailCategory),
|
||||||
ResourceOwner: "a-user",
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EmailCategory,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bb := bf.FindBases(ctx, reasons, nil)
|
bb := bf.FindBases(ctx, reasons, nil)
|
||||||
@ -361,14 +316,14 @@ func (suite *BaseFinderUnitSuite) TestNoResult_ErrorListingSnapshots() {
|
|||||||
func (suite *BaseFinderUnitSuite) TestGetBases() {
|
func (suite *BaseFinderUnitSuite) TestGetBases() {
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
input []Reason
|
input []Reasoner
|
||||||
manifestData []manifestInfo
|
manifestData []manifestInfo
|
||||||
// Use this to denote the Reasons a base backup or base manifest is
|
// Use this to denote the Reasons a base backup or base manifest is
|
||||||
// selected. The int maps to the index of the backup or manifest in data.
|
// selected. The int maps to the index of the backup or manifest in data.
|
||||||
expectedBaseReasons map[int][]Reason
|
expectedBaseReasons map[int][]Reasoner
|
||||||
// Use this to denote the Reasons a kopia assised incrementals manifest is
|
// Use this to denote the Reasons a kopia assised incrementals manifest is
|
||||||
// selected. The int maps to the index of the manifest in data.
|
// selected. The int maps to the index of the manifest in data.
|
||||||
expectedAssistManifestReasons map[int][]Reason
|
expectedAssistManifestReasons map[int][]Reasoner
|
||||||
backupData []backupInfo
|
backupData []backupInfo
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
@ -394,10 +349,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
|||||||
testUser1,
|
testUser1,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
expectedBaseReasons: map[int][]Reason{
|
expectedBaseReasons: map[int][]Reasoner{
|
||||||
1: testUser1Mail,
|
1: testUser1Mail,
|
||||||
},
|
},
|
||||||
expectedAssistManifestReasons: map[int][]Reason{
|
expectedAssistManifestReasons: map[int][]Reasoner{
|
||||||
1: testUser1Mail,
|
1: testUser1Mail,
|
||||||
},
|
},
|
||||||
backupData: []backupInfo{
|
backupData: []backupInfo{
|
||||||
@ -428,10 +383,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
|||||||
testUser1,
|
testUser1,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
expectedBaseReasons: map[int][]Reason{
|
expectedBaseReasons: map[int][]Reasoner{
|
||||||
1: testUser1Mail,
|
1: testUser1Mail,
|
||||||
},
|
},
|
||||||
expectedAssistManifestReasons: map[int][]Reason{
|
expectedAssistManifestReasons: map[int][]Reasoner{
|
||||||
0: testUser1Mail,
|
0: testUser1Mail,
|
||||||
1: testUser1Mail,
|
1: testUser1Mail,
|
||||||
},
|
},
|
||||||
@ -463,10 +418,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
|||||||
testUser1,
|
testUser1,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
expectedBaseReasons: map[int][]Reason{
|
expectedBaseReasons: map[int][]Reasoner{
|
||||||
1: testUser1Mail,
|
1: testUser1Mail,
|
||||||
},
|
},
|
||||||
expectedAssistManifestReasons: map[int][]Reason{
|
expectedAssistManifestReasons: map[int][]Reasoner{
|
||||||
0: testUser1Mail,
|
0: testUser1Mail,
|
||||||
1: testUser1Mail,
|
1: testUser1Mail,
|
||||||
},
|
},
|
||||||
@ -492,10 +447,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
|||||||
testUser3,
|
testUser3,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
expectedBaseReasons: map[int][]Reason{
|
expectedBaseReasons: map[int][]Reasoner{
|
||||||
0: testUser1Mail,
|
0: testUser1Mail,
|
||||||
},
|
},
|
||||||
expectedAssistManifestReasons: map[int][]Reason{
|
expectedAssistManifestReasons: map[int][]Reasoner{
|
||||||
0: testUser1Mail,
|
0: testUser1Mail,
|
||||||
},
|
},
|
||||||
backupData: []backupInfo{
|
backupData: []backupInfo{
|
||||||
@ -519,10 +474,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
|||||||
testUser3,
|
testUser3,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
expectedBaseReasons: map[int][]Reason{
|
expectedBaseReasons: map[int][]Reasoner{
|
||||||
0: testAllUsersAllCats,
|
0: testAllUsersAllCats,
|
||||||
},
|
},
|
||||||
expectedAssistManifestReasons: map[int][]Reason{
|
expectedAssistManifestReasons: map[int][]Reasoner{
|
||||||
0: testAllUsersAllCats,
|
0: testAllUsersAllCats,
|
||||||
},
|
},
|
||||||
backupData: []backupInfo{
|
backupData: []backupInfo{
|
||||||
@ -557,76 +512,28 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
|||||||
testUser3,
|
testUser3,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
expectedBaseReasons: map[int][]Reason{
|
expectedBaseReasons: map[int][]Reasoner{
|
||||||
0: {
|
0: {
|
||||||
{
|
NewReason("", testUser1, path.ExchangeService, path.EmailCategory),
|
||||||
ResourceOwner: testUser1,
|
NewReason("", testUser2, path.ExchangeService, path.EmailCategory),
|
||||||
Service: path.ExchangeService,
|
NewReason("", testUser3, path.ExchangeService, path.EmailCategory),
|
||||||
Category: path.EmailCategory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ResourceOwner: testUser2,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EmailCategory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ResourceOwner: testUser3,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EmailCategory,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
1: {
|
1: {
|
||||||
Reason{
|
NewReason("", testUser1, path.ExchangeService, path.EventsCategory),
|
||||||
ResourceOwner: testUser1,
|
NewReason("", testUser2, path.ExchangeService, path.EventsCategory),
|
||||||
Service: path.ExchangeService,
|
NewReason("", testUser3, path.ExchangeService, path.EventsCategory),
|
||||||
Category: path.EventsCategory,
|
|
||||||
},
|
|
||||||
Reason{
|
|
||||||
ResourceOwner: testUser2,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EventsCategory,
|
|
||||||
},
|
|
||||||
Reason{
|
|
||||||
ResourceOwner: testUser3,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EventsCategory,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
expectedAssistManifestReasons: map[int][]Reasoner{
|
||||||
expectedAssistManifestReasons: map[int][]Reason{
|
|
||||||
0: {
|
0: {
|
||||||
{
|
NewReason("", testUser1, path.ExchangeService, path.EmailCategory),
|
||||||
ResourceOwner: testUser1,
|
NewReason("", testUser2, path.ExchangeService, path.EmailCategory),
|
||||||
Service: path.ExchangeService,
|
NewReason("", testUser3, path.ExchangeService, path.EmailCategory),
|
||||||
Category: path.EmailCategory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ResourceOwner: testUser2,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EmailCategory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ResourceOwner: testUser3,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EmailCategory,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
1: {
|
1: {
|
||||||
Reason{
|
NewReason("", testUser1, path.ExchangeService, path.EventsCategory),
|
||||||
ResourceOwner: testUser1,
|
NewReason("", testUser2, path.ExchangeService, path.EventsCategory),
|
||||||
Service: path.ExchangeService,
|
NewReason("", testUser3, path.ExchangeService, path.EventsCategory),
|
||||||
Category: path.EventsCategory,
|
|
||||||
},
|
|
||||||
Reason{
|
|
||||||
ResourceOwner: testUser2,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EventsCategory,
|
|
||||||
},
|
|
||||||
Reason{
|
|
||||||
ResourceOwner: testUser3,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EventsCategory,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
backupData: []backupInfo{
|
backupData: []backupInfo{
|
||||||
@ -657,10 +564,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
|||||||
testUser1,
|
testUser1,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
expectedBaseReasons: map[int][]Reason{
|
expectedBaseReasons: map[int][]Reasoner{
|
||||||
0: testUser1Mail,
|
0: testUser1Mail,
|
||||||
},
|
},
|
||||||
expectedAssistManifestReasons: map[int][]Reason{
|
expectedAssistManifestReasons: map[int][]Reasoner{
|
||||||
0: testUser1Mail,
|
0: testUser1Mail,
|
||||||
1: testUser1Mail,
|
1: testUser1Mail,
|
||||||
},
|
},
|
||||||
@ -693,10 +600,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
|||||||
testUser1,
|
testUser1,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
expectedBaseReasons: map[int][]Reason{
|
expectedBaseReasons: map[int][]Reasoner{
|
||||||
1: testUser1Mail,
|
1: testUser1Mail,
|
||||||
},
|
},
|
||||||
expectedAssistManifestReasons: map[int][]Reason{
|
expectedAssistManifestReasons: map[int][]Reasoner{
|
||||||
1: testUser1Mail,
|
1: testUser1Mail,
|
||||||
},
|
},
|
||||||
backupData: []backupInfo{
|
backupData: []backupInfo{
|
||||||
@ -728,8 +635,8 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
|||||||
testUser1,
|
testUser1,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
expectedBaseReasons: map[int][]Reason{},
|
expectedBaseReasons: map[int][]Reasoner{},
|
||||||
expectedAssistManifestReasons: map[int][]Reason{
|
expectedAssistManifestReasons: map[int][]Reasoner{
|
||||||
1: testUser1Mail,
|
1: testUser1Mail,
|
||||||
},
|
},
|
||||||
backupData: []backupInfo{
|
backupData: []backupInfo{
|
||||||
@ -752,10 +659,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
|||||||
testUser1,
|
testUser1,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
expectedBaseReasons: map[int][]Reason{
|
expectedBaseReasons: map[int][]Reasoner{
|
||||||
0: testUser1Mail,
|
0: testUser1Mail,
|
||||||
},
|
},
|
||||||
expectedAssistManifestReasons: map[int][]Reason{
|
expectedAssistManifestReasons: map[int][]Reasoner{
|
||||||
0: testUser1Mail,
|
0: testUser1Mail,
|
||||||
},
|
},
|
||||||
backupData: []backupInfo{
|
backupData: []backupInfo{
|
||||||
@ -787,10 +694,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
|||||||
testUser1,
|
testUser1,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
expectedBaseReasons: map[int][]Reason{
|
expectedBaseReasons: map[int][]Reasoner{
|
||||||
0: testUser1Mail,
|
0: testUser1Mail,
|
||||||
},
|
},
|
||||||
expectedAssistManifestReasons: map[int][]Reason{
|
expectedAssistManifestReasons: map[int][]Reasoner{
|
||||||
0: testUser1Mail,
|
0: testUser1Mail,
|
||||||
},
|
},
|
||||||
backupData: []backupInfo{
|
backupData: []backupInfo{
|
||||||
@ -857,17 +764,17 @@ func (suite *BaseFinderUnitSuite) TestFindBases_CustomTags() {
|
|||||||
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
input []Reason
|
input []Reasoner
|
||||||
tags map[string]string
|
tags map[string]string
|
||||||
// Use this to denote which manifests in data should be expected. Allows
|
// Use this to denote which manifests in data should be expected. Allows
|
||||||
// defining data in a table while not repeating things between data and
|
// defining data in a table while not repeating things between data and
|
||||||
// expected.
|
// expected.
|
||||||
expectedIdxs map[int][]Reason
|
expectedIdxs map[int][]Reasoner
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "no tags specified",
|
name: "no tags specified",
|
||||||
tags: nil,
|
tags: nil,
|
||||||
expectedIdxs: map[int][]Reason{
|
expectedIdxs: map[int][]Reasoner{
|
||||||
0: testUser1Mail,
|
0: testUser1Mail,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -877,14 +784,14 @@ func (suite *BaseFinderUnitSuite) TestFindBases_CustomTags() {
|
|||||||
"fnords": "",
|
"fnords": "",
|
||||||
"smarf": "",
|
"smarf": "",
|
||||||
},
|
},
|
||||||
expectedIdxs: map[int][]Reason{
|
expectedIdxs: map[int][]Reasoner{
|
||||||
0: testUser1Mail,
|
0: testUser1Mail,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "subset of custom tags",
|
name: "subset of custom tags",
|
||||||
tags: map[string]string{"fnords": ""},
|
tags: map[string]string{"fnords": ""},
|
||||||
expectedIdxs: map[int][]Reason{
|
expectedIdxs: map[int][]Reasoner{
|
||||||
0: testUser1Mail,
|
0: testUser1Mail,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -925,7 +832,7 @@ func checkManifestEntriesMatch(
|
|||||||
t *testing.T,
|
t *testing.T,
|
||||||
retSnaps []ManifestEntry,
|
retSnaps []ManifestEntry,
|
||||||
allExpected []manifestInfo,
|
allExpected []manifestInfo,
|
||||||
expectedIdxsAndReasons map[int][]Reason,
|
expectedIdxsAndReasons map[int][]Reasoner,
|
||||||
) {
|
) {
|
||||||
// Check the proper snapshot manifests were returned.
|
// Check the proper snapshot manifests were returned.
|
||||||
expected := make([]*snapshot.Manifest, 0, len(expectedIdxsAndReasons))
|
expected := make([]*snapshot.Manifest, 0, len(expectedIdxsAndReasons))
|
||||||
@ -941,7 +848,7 @@ func checkManifestEntriesMatch(
|
|||||||
assert.ElementsMatch(t, expected, got)
|
assert.ElementsMatch(t, expected, got)
|
||||||
|
|
||||||
// Check the reasons for selecting each manifest are correct.
|
// Check the reasons for selecting each manifest are correct.
|
||||||
expectedReasons := make(map[manifest.ID][]Reason, len(expectedIdxsAndReasons))
|
expectedReasons := make(map[manifest.ID][]Reasoner, len(expectedIdxsAndReasons))
|
||||||
for idx, reasons := range expectedIdxsAndReasons {
|
for idx, reasons := range expectedIdxsAndReasons {
|
||||||
expectedReasons[allExpected[idx].man.ID] = reasons
|
expectedReasons[allExpected[idx].man.ID] = reasons
|
||||||
}
|
}
|
||||||
@ -967,7 +874,7 @@ func checkBackupEntriesMatch(
|
|||||||
t *testing.T,
|
t *testing.T,
|
||||||
retBups []BackupEntry,
|
retBups []BackupEntry,
|
||||||
allExpected []backupInfo,
|
allExpected []backupInfo,
|
||||||
expectedIdxsAndReasons map[int][]Reason,
|
expectedIdxsAndReasons map[int][]Reasoner,
|
||||||
) {
|
) {
|
||||||
// Check the proper snapshot manifests were returned.
|
// Check the proper snapshot manifests were returned.
|
||||||
expected := make([]*backup.Backup, 0, len(expectedIdxsAndReasons))
|
expected := make([]*backup.Backup, 0, len(expectedIdxsAndReasons))
|
||||||
@ -983,7 +890,7 @@ func checkBackupEntriesMatch(
|
|||||||
assert.ElementsMatch(t, expected, got)
|
assert.ElementsMatch(t, expected, got)
|
||||||
|
|
||||||
// Check the reasons for selecting each manifest are correct.
|
// Check the reasons for selecting each manifest are correct.
|
||||||
expectedReasons := make(map[model.StableID][]Reason, len(expectedIdxsAndReasons))
|
expectedReasons := make(map[model.StableID][]Reasoner, len(expectedIdxsAndReasons))
|
||||||
for idx, reasons := range expectedIdxsAndReasons {
|
for idx, reasons := range expectedIdxsAndReasons {
|
||||||
expectedReasons[allExpected[idx].b.ID] = reasons
|
expectedReasons[allExpected[idx].b.ID] = reasons
|
||||||
}
|
}
|
||||||
|
|||||||
@ -12,12 +12,16 @@ import (
|
|||||||
"github.com/kopia/kopia/repo/blob"
|
"github.com/kopia/kopia/repo/blob"
|
||||||
"github.com/kopia/kopia/repo/compression"
|
"github.com/kopia/kopia/repo/compression"
|
||||||
"github.com/kopia/kopia/repo/content"
|
"github.com/kopia/kopia/repo/content"
|
||||||
|
"github.com/kopia/kopia/repo/format"
|
||||||
|
"github.com/kopia/kopia/repo/maintenance"
|
||||||
"github.com/kopia/kopia/repo/manifest"
|
"github.com/kopia/kopia/repo/manifest"
|
||||||
"github.com/kopia/kopia/snapshot"
|
"github.com/kopia/kopia/snapshot"
|
||||||
"github.com/kopia/kopia/snapshot/policy"
|
"github.com/kopia/kopia/snapshot/policy"
|
||||||
"github.com/kopia/kopia/snapshot/snapshotfs"
|
"github.com/kopia/kopia/snapshot/snapshotfs"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
|
"github.com/alcionai/corso/src/internal/kopia/retention"
|
||||||
"github.com/alcionai/corso/src/pkg/control/repository"
|
"github.com/alcionai/corso/src/pkg/control/repository"
|
||||||
"github.com/alcionai/corso/src/pkg/storage"
|
"github.com/alcionai/corso/src/pkg/storage"
|
||||||
)
|
)
|
||||||
@ -326,12 +330,12 @@ func updateCompressionOnPolicy(compressor string, p *policy.Policy) (bool, error
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateRetentionOnPolicy(retention policy.RetentionPolicy, p *policy.Policy) bool {
|
func updateRetentionOnPolicy(retPolicy policy.RetentionPolicy, p *policy.Policy) bool {
|
||||||
if retention == p.RetentionPolicy {
|
if retPolicy == p.RetentionPolicy {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
p.RetentionPolicy = retention
|
p.RetentionPolicy = retPolicy
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@ -410,6 +414,118 @@ func checkCompressor(compressor compression.Name) error {
|
|||||||
return clues.Stack(clues.New("unknown compressor type"), clues.New(string(compressor)))
|
return clues.Stack(clues.New("unknown compressor type"), clues.New(string(compressor)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *conn) setRetentionParameters(
|
||||||
|
ctx context.Context,
|
||||||
|
rrOpts repository.Retention,
|
||||||
|
) error {
|
||||||
|
if rrOpts.Mode == nil && rrOpts.Duration == nil && rrOpts.Extend == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Somewhat confusing case, when we have no retention but a non-zero duration
|
||||||
|
// it acts like we passed in only the duration and returns an error about
|
||||||
|
// having to set both. Return a clearer error here instead.
|
||||||
|
if ptr.Val(rrOpts.Mode) == repository.NoRetention && ptr.Val(rrOpts.Duration) != 0 {
|
||||||
|
return clues.New("duration must be 0 if rrOpts is disabled").WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
dr, ok := w.Repository.(repo.DirectRepository)
|
||||||
|
if !ok {
|
||||||
|
return clues.New("getting handle to repo").WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
blobCfg, params, err := getRetentionConfigs(ctx, dr)
|
||||||
|
if err != nil {
|
||||||
|
return clues.Stack(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := retention.OptsFromConfigs(*blobCfg, *params)
|
||||||
|
if err := opts.Set(rrOpts); err != nil {
|
||||||
|
return clues.Stack(err).WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
return clues.Stack(persistRetentionConfigs(ctx, dr, opts)).OrNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
func getRetentionConfigs(
|
||||||
|
ctx context.Context,
|
||||||
|
dr repo.DirectRepository,
|
||||||
|
) (*format.BlobStorageConfiguration, *maintenance.Params, error) {
|
||||||
|
blobCfg, err := dr.FormatManager().BlobCfgBlob()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, clues.Wrap(err, "getting storage config").WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
params, err := maintenance.GetParams(ctx, dr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, clues.Wrap(err, "getting maintenance config").WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &blobCfg, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func persistRetentionConfigs(
|
||||||
|
ctx context.Context,
|
||||||
|
dr repo.DirectRepository,
|
||||||
|
opts *retention.Opts,
|
||||||
|
) error {
|
||||||
|
// Persist changes.
|
||||||
|
if !opts.BlobChanged() && !opts.ParamsChanged() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
blobCfg, params, err := opts.AsConfigs(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return clues.Stack(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mp, err := dr.FormatManager().GetMutableParameters()
|
||||||
|
if err != nil {
|
||||||
|
return clues.Wrap(err, "getting mutable parameters").WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
requiredFeatures, err := dr.FormatManager().RequiredFeatures()
|
||||||
|
if err != nil {
|
||||||
|
return clues.Wrap(err, "getting required features").WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must be the case that only blob changed.
|
||||||
|
if !opts.ParamsChanged() {
|
||||||
|
return clues.Wrap(
|
||||||
|
dr.FormatManager().SetParameters(ctx, mp, blobCfg, requiredFeatures),
|
||||||
|
"persisting storage config",
|
||||||
|
).WithClues(ctx).OrNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Both blob and maintenance changed. A DirectWriteSession is required to
|
||||||
|
// update the maintenance config but not the blob config.
|
||||||
|
err = repo.DirectWriteSession(
|
||||||
|
ctx,
|
||||||
|
dr,
|
||||||
|
repo.WriteSessionOptions{
|
||||||
|
Purpose: "Corso immutable backups config",
|
||||||
|
},
|
||||||
|
func(ctx context.Context, dw repo.DirectRepositoryWriter) error {
|
||||||
|
// Set the maintenance config first as we can bail out of the write
|
||||||
|
// session later.
|
||||||
|
if err := maintenance.SetParams(ctx, dw, ¶ms); err != nil {
|
||||||
|
return clues.Wrap(err, "maintenance config").
|
||||||
|
WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !opts.BlobChanged() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return clues.Wrap(
|
||||||
|
dr.FormatManager().SetParameters(ctx, mp, blobCfg, requiredFeatures),
|
||||||
|
"storage config",
|
||||||
|
).WithClues(ctx).OrNil()
|
||||||
|
})
|
||||||
|
|
||||||
|
return clues.Wrap(err, "persisting config changes").WithClues(ctx).OrNil()
|
||||||
|
}
|
||||||
|
|
||||||
func (w *conn) LoadSnapshot(
|
func (w *conn) LoadSnapshot(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
id manifest.ID,
|
id manifest.ID,
|
||||||
|
|||||||
@ -15,7 +15,8 @@ type (
|
|||||||
BackupConsumer interface {
|
BackupConsumer interface {
|
||||||
ConsumeBackupCollections(
|
ConsumeBackupCollections(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
bases []kopia.IncrementalBase,
|
backupReasons []kopia.Reasoner,
|
||||||
|
bases kopia.BackupBases,
|
||||||
cs []data.BackupCollection,
|
cs []data.BackupCollection,
|
||||||
pmr prefixmatcher.StringSetReader,
|
pmr prefixmatcher.StringSetReader,
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
@ -37,7 +38,7 @@ type (
|
|||||||
BaseFinder interface {
|
BaseFinder interface {
|
||||||
FindBases(
|
FindBases(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
reasons []kopia.Reason,
|
reasons []kopia.Reasoner,
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
) kopia.BackupBases
|
) kopia.BackupBases
|
||||||
}
|
}
|
||||||
|
|||||||
@ -70,7 +70,9 @@ func (mc *mergeCollection) Items(
|
|||||||
for _, c := range mc.cols {
|
for _, c := range mc.cols {
|
||||||
// Unfortunately doesn't seem to be a way right now to see if the
|
// Unfortunately doesn't seem to be a way right now to see if the
|
||||||
// iteration failed and we should be exiting early.
|
// iteration failed and we should be exiting early.
|
||||||
ictx := clues.Add(ctx, "merged_collection_storage_path", c.storagePath)
|
ictx := clues.Add(
|
||||||
|
ctx,
|
||||||
|
"merged_collection_storage_path", path.LoggableDir(c.storagePath))
|
||||||
logger.Ctx(ictx).Debug("sending items from merged collection")
|
logger.Ctx(ictx).Debug("sending items from merged collection")
|
||||||
|
|
||||||
for item := range c.Items(ictx, errs) {
|
for item := range c.Items(ictx, errs) {
|
||||||
@ -95,7 +97,9 @@ func (mc *mergeCollection) FetchItemByName(
|
|||||||
"merged_collection_count", len(mc.cols))
|
"merged_collection_count", len(mc.cols))
|
||||||
|
|
||||||
for _, c := range mc.cols {
|
for _, c := range mc.cols {
|
||||||
ictx := clues.Add(ctx, "merged_collection_storage_path", c.storagePath)
|
ictx := clues.Add(
|
||||||
|
ctx,
|
||||||
|
"merged_collection_storage_path", path.LoggableDir(c.storagePath))
|
||||||
|
|
||||||
logger.Ctx(ictx).Debug("looking for item in merged collection")
|
logger.Ctx(ictx).Debug("looking for item in merged collection")
|
||||||
|
|
||||||
|
|||||||
139
src/internal/kopia/retention/opts.go
Normal file
139
src/internal/kopia/retention/opts.go
Normal file
@ -0,0 +1,139 @@
|
|||||||
|
package retention
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
|
"github.com/kopia/kopia/repo/blob"
|
||||||
|
"github.com/kopia/kopia/repo/format"
|
||||||
|
"github.com/kopia/kopia/repo/maintenance"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/pkg/control/repository"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Opts struct {
|
||||||
|
blobCfg format.BlobStorageConfiguration
|
||||||
|
params maintenance.Params
|
||||||
|
|
||||||
|
blobChanged bool
|
||||||
|
paramsChanged bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewOpts() *Opts {
|
||||||
|
return &Opts{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func OptsFromConfigs(
|
||||||
|
blobCfg format.BlobStorageConfiguration,
|
||||||
|
params maintenance.Params,
|
||||||
|
) *Opts {
|
||||||
|
return &Opts{
|
||||||
|
blobCfg: blobCfg,
|
||||||
|
params: params,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Opts) AsConfigs(
|
||||||
|
ctx context.Context,
|
||||||
|
) (format.BlobStorageConfiguration, maintenance.Params, error) {
|
||||||
|
// Check the new config is valid.
|
||||||
|
if r.blobCfg.IsRetentionEnabled() {
|
||||||
|
if err := maintenance.CheckExtendRetention(ctx, r.blobCfg, &r.params); err != nil {
|
||||||
|
return format.BlobStorageConfiguration{}, maintenance.Params{}, clues.Wrap(
|
||||||
|
err,
|
||||||
|
"invalid retention config",
|
||||||
|
).WithClues(ctx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.blobCfg, r.params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Opts) BlobChanged() bool {
|
||||||
|
return r.blobChanged
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Opts) ParamsChanged() bool {
|
||||||
|
return r.paramsChanged
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Opts) Set(opts repository.Retention) error {
|
||||||
|
r.setMaintenanceParams(opts.Extend)
|
||||||
|
|
||||||
|
return clues.Wrap(
|
||||||
|
r.setBlobConfigParams(opts.Mode, opts.Duration),
|
||||||
|
"setting mode or duration",
|
||||||
|
).OrNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Opts) setMaintenanceParams(extend *bool) {
|
||||||
|
if extend != nil && r.params.ExtendObjectLocks != *extend {
|
||||||
|
r.params.ExtendObjectLocks = *extend
|
||||||
|
r.paramsChanged = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Opts) setBlobConfigParams(
|
||||||
|
mode *repository.RetentionMode,
|
||||||
|
duration *time.Duration,
|
||||||
|
) error {
|
||||||
|
err := r.setBlobConfigMode(mode)
|
||||||
|
if err != nil {
|
||||||
|
return clues.Stack(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.setBlobConfigDuration(duration)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Opts) setBlobConfigDuration(duration *time.Duration) {
|
||||||
|
if duration != nil && r.blobCfg.RetentionPeriod != *duration {
|
||||||
|
r.blobCfg.RetentionPeriod = *duration
|
||||||
|
r.blobChanged = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Opts) setBlobConfigMode(
|
||||||
|
mode *repository.RetentionMode,
|
||||||
|
) error {
|
||||||
|
if mode == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
startMode := r.blobCfg.RetentionMode
|
||||||
|
|
||||||
|
switch *mode {
|
||||||
|
case repository.NoRetention:
|
||||||
|
if !r.blobCfg.IsRetentionEnabled() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
r.blobCfg.RetentionMode = ""
|
||||||
|
r.blobCfg.RetentionPeriod = 0
|
||||||
|
|
||||||
|
case repository.GovernanceRetention:
|
||||||
|
r.blobCfg.RetentionMode = blob.Governance
|
||||||
|
|
||||||
|
case repository.ComplianceRetention:
|
||||||
|
r.blobCfg.RetentionMode = blob.Compliance
|
||||||
|
|
||||||
|
default:
|
||||||
|
return clues.New("unknown retention mode").
|
||||||
|
With("provided_retention_mode", mode.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only check if the retention mode is not empty. IsValid errors out if it's
|
||||||
|
// empty.
|
||||||
|
if len(r.blobCfg.RetentionMode) > 0 && !r.blobCfg.RetentionMode.IsValid() {
|
||||||
|
return clues.New("invalid retention mode").
|
||||||
|
With("retention_mode", r.blobCfg.RetentionMode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Take into account previous operations on r that could have already updated
|
||||||
|
// blobChanged.
|
||||||
|
r.blobChanged = r.blobChanged || startMode != r.blobCfg.RetentionMode
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
204
src/internal/kopia/retention/opts_test.go
Normal file
204
src/internal/kopia/retention/opts_test.go
Normal file
@ -0,0 +1,204 @@
|
|||||||
|
package retention_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
|
"github.com/kopia/kopia/repo/blob"
|
||||||
|
"github.com/kopia/kopia/repo/format"
|
||||||
|
"github.com/kopia/kopia/repo/maintenance"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
|
"github.com/alcionai/corso/src/internal/kopia/retention"
|
||||||
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
|
"github.com/alcionai/corso/src/pkg/control/repository"
|
||||||
|
)
|
||||||
|
|
||||||
|
type OptsUnitSuite struct {
|
||||||
|
tester.Suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOptsUnitSuite(t *testing.T) {
|
||||||
|
suite.Run(t, &OptsUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *OptsUnitSuite) TestOptsFromConfigs() {
|
||||||
|
var (
|
||||||
|
t = suite.T()
|
||||||
|
|
||||||
|
mode = blob.Governance
|
||||||
|
duration = time.Hour * 48
|
||||||
|
extend = true
|
||||||
|
|
||||||
|
blobCfgInput = format.BlobStorageConfiguration{
|
||||||
|
RetentionMode: mode,
|
||||||
|
RetentionPeriod: duration,
|
||||||
|
}
|
||||||
|
paramsInput = maintenance.Params{ExtendObjectLocks: extend}
|
||||||
|
)
|
||||||
|
|
||||||
|
ctx, flush := tester.NewContext(t)
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
opts := retention.OptsFromConfigs(blobCfgInput, paramsInput)
|
||||||
|
|
||||||
|
assert.False(t, opts.BlobChanged(), "BlobChanged")
|
||||||
|
assert.False(t, opts.ParamsChanged(), "ParamsChanged")
|
||||||
|
|
||||||
|
blobCfg, params, err := opts.AsConfigs(ctx)
|
||||||
|
require.NoError(t, err, "AsConfigs: %v", clues.ToCore(err))
|
||||||
|
assert.Equal(t, blobCfgInput, blobCfg)
|
||||||
|
assert.Equal(t, paramsInput, params)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *OptsUnitSuite) TestSet() {
|
||||||
|
var (
|
||||||
|
kopiaMode = blob.Governance
|
||||||
|
mode = repository.GovernanceRetention
|
||||||
|
duration = time.Hour * 48
|
||||||
|
)
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
inputBlob format.BlobStorageConfiguration
|
||||||
|
inputParams maintenance.Params
|
||||||
|
ctrlOpts repository.Retention
|
||||||
|
setErr require.ErrorAssertionFunc
|
||||||
|
expectMode blob.RetentionMode
|
||||||
|
expectDuration time.Duration
|
||||||
|
expectExtend bool
|
||||||
|
expectBlobChanged bool
|
||||||
|
expectParamsChanged bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "All Nils",
|
||||||
|
setErr: require.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "All Off",
|
||||||
|
ctrlOpts: repository.Retention{
|
||||||
|
Mode: ptr.To(repository.NoRetention),
|
||||||
|
Duration: ptr.To(time.Duration(0)),
|
||||||
|
Extend: ptr.To(false),
|
||||||
|
},
|
||||||
|
setErr: require.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "UnknownRetention",
|
||||||
|
ctrlOpts: repository.Retention{
|
||||||
|
Mode: ptr.To(repository.UnknownRetention),
|
||||||
|
Duration: ptr.To(duration),
|
||||||
|
},
|
||||||
|
setErr: require.Error,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Invalid Retention Mode",
|
||||||
|
ctrlOpts: repository.Retention{
|
||||||
|
Mode: ptr.To(repository.RetentionMode(-1)),
|
||||||
|
Duration: ptr.To(duration),
|
||||||
|
},
|
||||||
|
setErr: require.Error,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Valid Set All",
|
||||||
|
ctrlOpts: repository.Retention{
|
||||||
|
Mode: ptr.To(mode),
|
||||||
|
Duration: ptr.To(duration),
|
||||||
|
Extend: ptr.To(true),
|
||||||
|
},
|
||||||
|
setErr: require.NoError,
|
||||||
|
expectMode: kopiaMode,
|
||||||
|
expectDuration: duration,
|
||||||
|
expectExtend: true,
|
||||||
|
expectBlobChanged: true,
|
||||||
|
expectParamsChanged: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Valid Set BlobConfig",
|
||||||
|
ctrlOpts: repository.Retention{
|
||||||
|
Mode: ptr.To(mode),
|
||||||
|
Duration: ptr.To(duration),
|
||||||
|
},
|
||||||
|
setErr: require.NoError,
|
||||||
|
expectMode: kopiaMode,
|
||||||
|
expectDuration: duration,
|
||||||
|
expectBlobChanged: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Valid Set Params",
|
||||||
|
ctrlOpts: repository.Retention{
|
||||||
|
Extend: ptr.To(true),
|
||||||
|
},
|
||||||
|
setErr: require.NoError,
|
||||||
|
expectExtend: true,
|
||||||
|
expectParamsChanged: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Partial BlobConfig Change",
|
||||||
|
inputBlob: format.BlobStorageConfiguration{
|
||||||
|
RetentionMode: kopiaMode,
|
||||||
|
RetentionPeriod: duration,
|
||||||
|
},
|
||||||
|
ctrlOpts: repository.Retention{
|
||||||
|
Duration: ptr.To(duration + time.Hour),
|
||||||
|
},
|
||||||
|
setErr: require.NoError,
|
||||||
|
expectMode: kopiaMode,
|
||||||
|
expectDuration: duration + time.Hour,
|
||||||
|
expectBlobChanged: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "No BlobConfig Change",
|
||||||
|
inputBlob: format.BlobStorageConfiguration{
|
||||||
|
RetentionMode: kopiaMode,
|
||||||
|
RetentionPeriod: duration,
|
||||||
|
},
|
||||||
|
ctrlOpts: repository.Retention{
|
||||||
|
Mode: ptr.To(mode),
|
||||||
|
Duration: ptr.To(duration),
|
||||||
|
},
|
||||||
|
setErr: require.NoError,
|
||||||
|
expectMode: kopiaMode,
|
||||||
|
expectDuration: duration,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "No Params Change",
|
||||||
|
inputParams: maintenance.Params{ExtendObjectLocks: true},
|
||||||
|
ctrlOpts: repository.Retention{
|
||||||
|
Extend: ptr.To(true),
|
||||||
|
},
|
||||||
|
setErr: require.NoError,
|
||||||
|
expectExtend: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
ctx, flush := tester.NewContext(t)
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
opts := retention.OptsFromConfigs(test.inputBlob, test.inputParams)
|
||||||
|
err := opts.Set(test.ctrlOpts)
|
||||||
|
test.setErr(t, err, "setting params: %v", clues.ToCore(err))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
blobCfg, params, err := opts.AsConfigs(ctx)
|
||||||
|
require.NoError(t, err, "getting configs: %v", clues.ToCore(err))
|
||||||
|
|
||||||
|
assert.Equal(t, test.expectMode, blobCfg.RetentionMode, "mode")
|
||||||
|
assert.Equal(t, test.expectDuration, blobCfg.RetentionPeriod, "duration")
|
||||||
|
assert.Equal(t, test.expectExtend, params.ExtendObjectLocks, "extend locks")
|
||||||
|
assert.Equal(t, test.expectBlobChanged, opts.BlobChanged(), "blob changed")
|
||||||
|
assert.Equal(t, test.expectParamsChanged, opts.ParamsChanged(), "params changed")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -20,6 +20,7 @@ import (
|
|||||||
"github.com/kopia/kopia/fs/virtualfs"
|
"github.com/kopia/kopia/fs/virtualfs"
|
||||||
"github.com/kopia/kopia/repo/manifest"
|
"github.com/kopia/kopia/repo/manifest"
|
||||||
"github.com/kopia/kopia/snapshot/snapshotfs"
|
"github.com/kopia/kopia/snapshot/snapshotfs"
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
@ -970,10 +971,32 @@ func traverseBaseDir(
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func logBaseInfo(ctx context.Context, m ManifestEntry) {
|
||||||
|
svcs := map[string]struct{}{}
|
||||||
|
cats := map[string]struct{}{}
|
||||||
|
|
||||||
|
for _, r := range m.Reasons {
|
||||||
|
svcs[r.Service().String()] = struct{}{}
|
||||||
|
cats[r.Category().String()] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
mbID, _ := m.GetTag(TagBackupID)
|
||||||
|
if len(mbID) == 0 {
|
||||||
|
mbID = "no_backup_id_tag"
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Ctx(ctx).Infow(
|
||||||
|
"using base for backup",
|
||||||
|
"base_snapshot_id", m.ID,
|
||||||
|
"services", maps.Keys(svcs),
|
||||||
|
"categories", maps.Keys(cats),
|
||||||
|
"base_backup_id", mbID)
|
||||||
|
}
|
||||||
|
|
||||||
func inflateBaseTree(
|
func inflateBaseTree(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
loader snapshotLoader,
|
loader snapshotLoader,
|
||||||
snap IncrementalBase,
|
snap ManifestEntry,
|
||||||
updatedPaths map[string]path.Path,
|
updatedPaths map[string]path.Path,
|
||||||
roots map[string]*treeMap,
|
roots map[string]*treeMap,
|
||||||
) error {
|
) error {
|
||||||
@ -996,13 +1019,25 @@ func inflateBaseTree(
|
|||||||
return clues.New("snapshot root is not a directory").WithClues(ctx)
|
return clues.New("snapshot root is not a directory").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Some logging to help track things.
|
||||||
|
logBaseInfo(ctx, snap)
|
||||||
|
|
||||||
// For each subtree corresponding to the tuple
|
// For each subtree corresponding to the tuple
|
||||||
// (resource owner, service, category) merge the directories in the base with
|
// (resource owner, service, category) merge the directories in the base with
|
||||||
// what has been reported in the collections we got.
|
// what has been reported in the collections we got.
|
||||||
for _, subtreePath := range snap.SubtreePaths {
|
for _, r := range snap.Reasons {
|
||||||
|
ictx := clues.Add(
|
||||||
|
ctx,
|
||||||
|
"subtree_service", r.Service().String(),
|
||||||
|
"subtree_category", r.Category().String())
|
||||||
|
|
||||||
|
subtreePath, err := r.SubtreePath()
|
||||||
|
if err != nil {
|
||||||
|
return clues.Wrap(err, "building subtree path").WithClues(ictx)
|
||||||
|
}
|
||||||
|
|
||||||
// We're starting from the root directory so don't need it in the path.
|
// We're starting from the root directory so don't need it in the path.
|
||||||
pathElems := encodeElements(subtreePath.PopFront().Elements()...)
|
pathElems := encodeElements(subtreePath.PopFront().Elements()...)
|
||||||
ictx := clues.Add(ctx, "subtree_path", subtreePath)
|
|
||||||
|
|
||||||
ent, err := snapshotfs.GetNestedEntry(ictx, dir, pathElems)
|
ent, err := snapshotfs.GetNestedEntry(ictx, dir, pathElems)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1022,7 +1057,7 @@ func inflateBaseTree(
|
|||||||
// This ensures that a migration on the directory prefix can complete.
|
// This ensures that a migration on the directory prefix can complete.
|
||||||
// The prefix is the tenant/service/owner/category set, which remains
|
// The prefix is the tenant/service/owner/category set, which remains
|
||||||
// otherwise unchecked in tree inflation below this point.
|
// otherwise unchecked in tree inflation below this point.
|
||||||
newSubtreePath := subtreePath
|
newSubtreePath := subtreePath.ToBuilder()
|
||||||
if p, ok := updatedPaths[subtreePath.String()]; ok {
|
if p, ok := updatedPaths[subtreePath.String()]; ok {
|
||||||
newSubtreePath = p.ToBuilder()
|
newSubtreePath = p.ToBuilder()
|
||||||
}
|
}
|
||||||
@ -1031,7 +1066,7 @@ func inflateBaseTree(
|
|||||||
ictx,
|
ictx,
|
||||||
0,
|
0,
|
||||||
updatedPaths,
|
updatedPaths,
|
||||||
subtreePath.Dir(),
|
subtreePath.ToBuilder().Dir(),
|
||||||
newSubtreePath.Dir(),
|
newSubtreePath.Dir(),
|
||||||
subtreeDir,
|
subtreeDir,
|
||||||
roots,
|
roots,
|
||||||
@ -1059,7 +1094,7 @@ func inflateBaseTree(
|
|||||||
func inflateDirTree(
|
func inflateDirTree(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
loader snapshotLoader,
|
loader snapshotLoader,
|
||||||
baseSnaps []IncrementalBase,
|
baseSnaps []ManifestEntry,
|
||||||
collections []data.BackupCollection,
|
collections []data.BackupCollection,
|
||||||
globalExcludeSet prefixmatcher.StringSetReader,
|
globalExcludeSet prefixmatcher.StringSetReader,
|
||||||
progress *corsoProgress,
|
progress *corsoProgress,
|
||||||
|
|||||||
@ -946,21 +946,22 @@ func (msw *mockSnapshotWalker) SnapshotRoot(*snapshot.Manifest) (fs.Entry, error
|
|||||||
return msw.snapshotRoot, nil
|
return msw.snapshotRoot, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func mockIncrementalBase(
|
func makeManifestEntry(
|
||||||
id, tenant, resourceOwner string,
|
id, tenant, resourceOwner string,
|
||||||
service path.ServiceType,
|
service path.ServiceType,
|
||||||
categories ...path.CategoryType,
|
categories ...path.CategoryType,
|
||||||
) IncrementalBase {
|
) ManifestEntry {
|
||||||
stps := []*path.Builder{}
|
var reasons []Reasoner
|
||||||
|
|
||||||
for _, c := range categories {
|
for _, c := range categories {
|
||||||
stps = append(stps, path.Builder{}.Append(tenant, service.String(), resourceOwner, c.String()))
|
reasons = append(reasons, NewReason(tenant, resourceOwner, service, c))
|
||||||
}
|
}
|
||||||
|
|
||||||
return IncrementalBase{
|
return ManifestEntry{
|
||||||
Manifest: &snapshot.Manifest{
|
Manifest: &snapshot.Manifest{
|
||||||
ID: manifest.ID(id),
|
ID: manifest.ID(id),
|
||||||
},
|
},
|
||||||
SubtreePaths: stps,
|
Reasons: reasons,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1331,8 +1332,8 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() {
|
|||||||
dirTree, err := inflateDirTree(
|
dirTree, err := inflateDirTree(
|
||||||
ctx,
|
ctx,
|
||||||
msw,
|
msw,
|
||||||
[]IncrementalBase{
|
[]ManifestEntry{
|
||||||
mockIncrementalBase("", testTenant, testUser, path.ExchangeService, path.EmailCategory),
|
makeManifestEntry("", testTenant, testUser, path.ExchangeService, path.EmailCategory),
|
||||||
},
|
},
|
||||||
test.inputCollections(),
|
test.inputCollections(),
|
||||||
pmMock.NewPrefixMap(nil),
|
pmMock.NewPrefixMap(nil),
|
||||||
@ -2260,8 +2261,8 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
|||||||
dirTree, err := inflateDirTree(
|
dirTree, err := inflateDirTree(
|
||||||
ctx,
|
ctx,
|
||||||
msw,
|
msw,
|
||||||
[]IncrementalBase{
|
[]ManifestEntry{
|
||||||
mockIncrementalBase("", testTenant, testUser, path.ExchangeService, path.EmailCategory),
|
makeManifestEntry("", testTenant, testUser, path.ExchangeService, path.EmailCategory),
|
||||||
},
|
},
|
||||||
test.inputCollections(t),
|
test.inputCollections(t),
|
||||||
ie,
|
ie,
|
||||||
@ -2425,8 +2426,8 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSkipsDeletedSubtre
|
|||||||
dirTree, err := inflateDirTree(
|
dirTree, err := inflateDirTree(
|
||||||
ctx,
|
ctx,
|
||||||
msw,
|
msw,
|
||||||
[]IncrementalBase{
|
[]ManifestEntry{
|
||||||
mockIncrementalBase("", testTenant, testUser, path.ExchangeService, path.EmailCategory),
|
makeManifestEntry("", testTenant, testUser, path.ExchangeService, path.EmailCategory),
|
||||||
},
|
},
|
||||||
collections,
|
collections,
|
||||||
pmMock.NewPrefixMap(nil),
|
pmMock.NewPrefixMap(nil),
|
||||||
@ -2531,8 +2532,8 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_HandleEmptyBase()
|
|||||||
dirTree, err := inflateDirTree(
|
dirTree, err := inflateDirTree(
|
||||||
ctx,
|
ctx,
|
||||||
msw,
|
msw,
|
||||||
[]IncrementalBase{
|
[]ManifestEntry{
|
||||||
mockIncrementalBase("", testTenant, testUser, path.ExchangeService, path.EmailCategory),
|
makeManifestEntry("", testTenant, testUser, path.ExchangeService, path.EmailCategory),
|
||||||
},
|
},
|
||||||
collections,
|
collections,
|
||||||
pmMock.NewPrefixMap(nil),
|
pmMock.NewPrefixMap(nil),
|
||||||
@ -2782,9 +2783,9 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsCorrectSubt
|
|||||||
dirTree, err := inflateDirTree(
|
dirTree, err := inflateDirTree(
|
||||||
ctx,
|
ctx,
|
||||||
msw,
|
msw,
|
||||||
[]IncrementalBase{
|
[]ManifestEntry{
|
||||||
mockIncrementalBase("id1", testTenant, testUser, path.ExchangeService, path.ContactsCategory),
|
makeManifestEntry("id1", testTenant, testUser, path.ExchangeService, path.ContactsCategory),
|
||||||
mockIncrementalBase("id2", testTenant, testUser, path.ExchangeService, path.EmailCategory),
|
makeManifestEntry("id2", testTenant, testUser, path.ExchangeService, path.EmailCategory),
|
||||||
},
|
},
|
||||||
collections,
|
collections,
|
||||||
pmMock.NewPrefixMap(nil),
|
pmMock.NewPrefixMap(nil),
|
||||||
@ -2948,8 +2949,8 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsMigrateSubt
|
|||||||
dirTree, err := inflateDirTree(
|
dirTree, err := inflateDirTree(
|
||||||
ctx,
|
ctx,
|
||||||
msw,
|
msw,
|
||||||
[]IncrementalBase{
|
[]ManifestEntry{
|
||||||
mockIncrementalBase("id1", testTenant, testUser, path.ExchangeService, path.EmailCategory, path.ContactsCategory),
|
makeManifestEntry("id1", testTenant, testUser, path.ExchangeService, path.EmailCategory, path.ContactsCategory),
|
||||||
},
|
},
|
||||||
[]data.BackupCollection{mce, mcc},
|
[]data.BackupCollection{mce, mcc},
|
||||||
pmMock.NewPrefixMap(nil),
|
pmMock.NewPrefixMap(nil),
|
||||||
|
|||||||
@ -4,22 +4,19 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
"github.com/kopia/kopia/fs"
|
"github.com/kopia/kopia/fs"
|
||||||
"github.com/kopia/kopia/repo"
|
"github.com/kopia/kopia/repo"
|
||||||
"github.com/kopia/kopia/repo/blob"
|
|
||||||
"github.com/kopia/kopia/repo/format"
|
|
||||||
"github.com/kopia/kopia/repo/maintenance"
|
"github.com/kopia/kopia/repo/maintenance"
|
||||||
"github.com/kopia/kopia/repo/manifest"
|
"github.com/kopia/kopia/repo/manifest"
|
||||||
"github.com/kopia/kopia/snapshot"
|
"github.com/kopia/kopia/snapshot"
|
||||||
"github.com/kopia/kopia/snapshot/policy"
|
"github.com/kopia/kopia/snapshot/policy"
|
||||||
"github.com/kopia/kopia/snapshot/snapshotfs"
|
"github.com/kopia/kopia/snapshot/snapshotfs"
|
||||||
"github.com/kopia/kopia/snapshot/snapshotmaintenance"
|
"github.com/kopia/kopia/snapshot/snapshotmaintenance"
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/diagnostics"
|
"github.com/alcionai/corso/src/internal/diagnostics"
|
||||||
"github.com/alcionai/corso/src/internal/observe"
|
"github.com/alcionai/corso/src/internal/observe"
|
||||||
@ -132,11 +129,6 @@ func (w *Wrapper) Close(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type IncrementalBase struct {
|
|
||||||
*snapshot.Manifest
|
|
||||||
SubtreePaths []*path.Builder
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConsumeBackupCollections takes a set of collections and creates a kopia snapshot
|
// ConsumeBackupCollections takes a set of collections and creates a kopia snapshot
|
||||||
// with the data that they contain. previousSnapshots is used for incremental
|
// with the data that they contain. previousSnapshots is used for incremental
|
||||||
// backups and should represent the base snapshot from which metadata is sourced
|
// backups and should represent the base snapshot from which metadata is sourced
|
||||||
@ -145,10 +137,11 @@ type IncrementalBase struct {
|
|||||||
// complete backup of all data.
|
// complete backup of all data.
|
||||||
func (w Wrapper) ConsumeBackupCollections(
|
func (w Wrapper) ConsumeBackupCollections(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
previousSnapshots []IncrementalBase,
|
backupReasons []Reasoner,
|
||||||
|
bases BackupBases,
|
||||||
collections []data.BackupCollection,
|
collections []data.BackupCollection,
|
||||||
globalExcludeSet prefixmatcher.StringSetReader,
|
globalExcludeSet prefixmatcher.StringSetReader,
|
||||||
tags map[string]string,
|
additionalTags map[string]string,
|
||||||
buildTreeWithBase bool,
|
buildTreeWithBase bool,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) (*BackupStats, *details.Builder, DetailsMergeInfoer, error) {
|
) (*BackupStats, *details.Builder, DetailsMergeInfoer, error) {
|
||||||
@ -174,15 +167,23 @@ func (w Wrapper) ConsumeBackupCollections(
|
|||||||
// When running an incremental backup, we need to pass the prior
|
// When running an incremental backup, we need to pass the prior
|
||||||
// snapshot bases into inflateDirTree so that the new snapshot
|
// snapshot bases into inflateDirTree so that the new snapshot
|
||||||
// includes historical data.
|
// includes historical data.
|
||||||
var base []IncrementalBase
|
var (
|
||||||
|
mergeBase []ManifestEntry
|
||||||
|
assistBase []ManifestEntry
|
||||||
|
)
|
||||||
|
|
||||||
|
if bases != nil {
|
||||||
if buildTreeWithBase {
|
if buildTreeWithBase {
|
||||||
base = previousSnapshots
|
mergeBase = bases.MergeBases()
|
||||||
|
}
|
||||||
|
|
||||||
|
assistBase = bases.AssistBases()
|
||||||
}
|
}
|
||||||
|
|
||||||
dirTree, err := inflateDirTree(
|
dirTree, err := inflateDirTree(
|
||||||
ctx,
|
ctx,
|
||||||
w.c,
|
w.c,
|
||||||
base,
|
mergeBase,
|
||||||
collections,
|
collections,
|
||||||
globalExcludeSet,
|
globalExcludeSet,
|
||||||
progress)
|
progress)
|
||||||
@ -190,9 +191,22 @@ func (w Wrapper) ConsumeBackupCollections(
|
|||||||
return nil, nil, nil, clues.Wrap(err, "building kopia directories")
|
return nil, nil, nil, clues.Wrap(err, "building kopia directories")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add some extra tags so we can look things up by reason.
|
||||||
|
tags := maps.Clone(additionalTags)
|
||||||
|
if tags == nil {
|
||||||
|
// Some platforms seem to return nil if the input is nil.
|
||||||
|
tags = map[string]string{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, r := range backupReasons {
|
||||||
|
for _, k := range tagKeys(r) {
|
||||||
|
tags[k] = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
s, err := w.makeSnapshotWithRoot(
|
s, err := w.makeSnapshotWithRoot(
|
||||||
ctx,
|
ctx,
|
||||||
previousSnapshots,
|
assistBase,
|
||||||
dirTree,
|
dirTree,
|
||||||
tags,
|
tags,
|
||||||
progress)
|
progress)
|
||||||
@ -205,7 +219,7 @@ func (w Wrapper) ConsumeBackupCollections(
|
|||||||
|
|
||||||
func (w Wrapper) makeSnapshotWithRoot(
|
func (w Wrapper) makeSnapshotWithRoot(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
prevSnapEntries []IncrementalBase,
|
prevSnapEntries []ManifestEntry,
|
||||||
root fs.Directory,
|
root fs.Directory,
|
||||||
addlTags map[string]string,
|
addlTags map[string]string,
|
||||||
progress *corsoProgress,
|
progress *corsoProgress,
|
||||||
@ -225,8 +239,8 @@ func (w Wrapper) makeSnapshotWithRoot(
|
|||||||
|
|
||||||
ctx = clues.Add(
|
ctx = clues.Add(
|
||||||
ctx,
|
ctx,
|
||||||
"len_prev_base_snapshots", len(prevSnapEntries),
|
"num_assist_snapshots", len(prevSnapEntries),
|
||||||
"assist_snap_ids", snapIDs,
|
"assist_snapshot_ids", snapIDs,
|
||||||
"additional_tags", addlTags)
|
"additional_tags", addlTags)
|
||||||
|
|
||||||
if len(snapIDs) > 0 {
|
if len(snapIDs) > 0 {
|
||||||
@ -722,202 +736,5 @@ func (w *Wrapper) SetRetentionParameters(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
retention repository.Retention,
|
retention repository.Retention,
|
||||||
) error {
|
) error {
|
||||||
if retention.Mode == nil && retention.Duration == nil && retention.Extend == nil {
|
return clues.Stack(w.c.setRetentionParameters(ctx, retention)).OrNil()
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Somewhat confusing case, when we have no retention but a non-zero duration
|
|
||||||
// it acts like we passed in only the duration and returns an error about
|
|
||||||
// having to set both. Return a clearer error here instead. Check if mode is
|
|
||||||
// set so we still allow changing duration if mode is already set.
|
|
||||||
if m, ok := ptr.ValOK(retention.Mode); ok && m == repository.NoRetention && ptr.Val(retention.Duration) != 0 {
|
|
||||||
return clues.New("duration must be 0 if retention is disabled").WithClues(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
dr, ok := w.c.Repository.(repo.DirectRepository)
|
|
||||||
if !ok {
|
|
||||||
return clues.New("getting handle to repo").WithClues(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
blobCfg, params, err := getRetentionConfigs(ctx, dr)
|
|
||||||
if err != nil {
|
|
||||||
return clues.Stack(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update blob config information.
|
|
||||||
blobChanged, err := w.setBlobConfigParams(retention.Mode, retention.Duration, blobCfg)
|
|
||||||
if err != nil {
|
|
||||||
return clues.Wrap(err, "setting retention mode or duration").WithClues(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update maintenance config information.
|
|
||||||
var maintenanceChanged bool
|
|
||||||
|
|
||||||
if retention.Extend != nil && params.ExtendObjectLocks != *retention.Extend {
|
|
||||||
params.ExtendObjectLocks = *retention.Extend
|
|
||||||
maintenanceChanged = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the new config is valid.
|
|
||||||
if blobCfg.IsRetentionEnabled() {
|
|
||||||
if err := maintenance.CheckExtendRetention(ctx, *blobCfg, params); err != nil {
|
|
||||||
return clues.Wrap(err, "invalid retention config").WithClues(ctx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return clues.Stack(persistRetentionConfigs(
|
|
||||||
ctx,
|
|
||||||
dr,
|
|
||||||
blobCfg,
|
|
||||||
blobChanged,
|
|
||||||
params,
|
|
||||||
maintenanceChanged,
|
|
||||||
)).OrNil()
|
|
||||||
}
|
|
||||||
|
|
||||||
func getRetentionConfigs(
|
|
||||||
ctx context.Context,
|
|
||||||
dr repo.DirectRepository,
|
|
||||||
) (*format.BlobStorageConfiguration, *maintenance.Params, error) {
|
|
||||||
blobCfg, err := dr.FormatManager().BlobCfgBlob()
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, clues.Wrap(err, "getting storage config").WithClues(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
params, err := maintenance.GetParams(ctx, dr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, clues.Wrap(err, "getting maintenance config").WithClues(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &blobCfg, params, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func persistRetentionConfigs(
|
|
||||||
ctx context.Context,
|
|
||||||
dr repo.DirectRepository,
|
|
||||||
blobCfg *format.BlobStorageConfiguration,
|
|
||||||
blobChanged bool,
|
|
||||||
params *maintenance.Params,
|
|
||||||
maintenanceChanged bool,
|
|
||||||
) error {
|
|
||||||
// Persist changes.
|
|
||||||
if !blobChanged && !maintenanceChanged {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
mp, err := dr.FormatManager().GetMutableParameters()
|
|
||||||
if err != nil {
|
|
||||||
return clues.Wrap(err, "getting mutable parameters")
|
|
||||||
}
|
|
||||||
|
|
||||||
requiredFeatures, err := dr.FormatManager().RequiredFeatures()
|
|
||||||
if err != nil {
|
|
||||||
return clues.Wrap(err, "getting required features").WithClues(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Must be the case that only blob changed.
|
|
||||||
if !maintenanceChanged {
|
|
||||||
return clues.Wrap(
|
|
||||||
dr.FormatManager().SetParameters(ctx, mp, *blobCfg, requiredFeatures),
|
|
||||||
"persisting storage config",
|
|
||||||
).WithClues(ctx).OrNil()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Both blob and maintenance changed. A DirectWriteSession is required to
|
|
||||||
// update the maintenance config but not the blob config.
|
|
||||||
err = repo.DirectWriteSession(
|
|
||||||
ctx,
|
|
||||||
dr,
|
|
||||||
repo.WriteSessionOptions{
|
|
||||||
Purpose: "Corso immutable backups config",
|
|
||||||
},
|
|
||||||
func(ctx context.Context, dw repo.DirectRepositoryWriter) error {
|
|
||||||
// Set the maintenance config first as we can bail out of the write
|
|
||||||
// session later.
|
|
||||||
if err := maintenance.SetParams(ctx, dw, params); err != nil {
|
|
||||||
return clues.Wrap(err, "maintenance config").
|
|
||||||
WithClues(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !blobChanged {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return clues.Wrap(
|
|
||||||
dr.FormatManager().SetParameters(ctx, mp, *blobCfg, requiredFeatures),
|
|
||||||
"storage config",
|
|
||||||
).WithClues(ctx).OrNil()
|
|
||||||
})
|
|
||||||
|
|
||||||
return clues.Wrap(err, "persisting config changes").WithClues(ctx).OrNil()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w Wrapper) setBlobConfigParams(
|
|
||||||
mode *repository.RetentionMode,
|
|
||||||
duration *time.Duration,
|
|
||||||
blobCfg *format.BlobStorageConfiguration,
|
|
||||||
) (bool, error) {
|
|
||||||
changed, err := setBlobConfigMode(mode, blobCfg)
|
|
||||||
if err != nil {
|
|
||||||
return false, clues.Stack(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
tmp := setBlobConfigDuration(duration, blobCfg)
|
|
||||||
changed = changed || tmp
|
|
||||||
|
|
||||||
return changed, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func setBlobConfigDuration(
|
|
||||||
duration *time.Duration,
|
|
||||||
blobCfg *format.BlobStorageConfiguration,
|
|
||||||
) bool {
|
|
||||||
var changed bool
|
|
||||||
|
|
||||||
if duration != nil && blobCfg.RetentionPeriod != *duration {
|
|
||||||
blobCfg.RetentionPeriod = *duration
|
|
||||||
changed = true
|
|
||||||
}
|
|
||||||
|
|
||||||
return changed
|
|
||||||
}
|
|
||||||
|
|
||||||
func setBlobConfigMode(
|
|
||||||
mode *repository.RetentionMode,
|
|
||||||
blobCfg *format.BlobStorageConfiguration,
|
|
||||||
) (bool, error) {
|
|
||||||
if mode == nil {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
startMode := blobCfg.RetentionMode
|
|
||||||
|
|
||||||
switch *mode {
|
|
||||||
case repository.NoRetention:
|
|
||||||
if !blobCfg.IsRetentionEnabled() {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
blobCfg.RetentionMode = ""
|
|
||||||
blobCfg.RetentionPeriod = 0
|
|
||||||
|
|
||||||
case repository.GovernanceRetention:
|
|
||||||
blobCfg.RetentionMode = blob.Governance
|
|
||||||
|
|
||||||
case repository.ComplianceRetention:
|
|
||||||
blobCfg.RetentionMode = blob.Compliance
|
|
||||||
|
|
||||||
default:
|
|
||||||
return false, clues.New("unknown retention mode").
|
|
||||||
With("provided_retention_mode", mode.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only check if the retention mode is not empty. IsValid errors out if it's
|
|
||||||
// empty.
|
|
||||||
if len(blobCfg.RetentionMode) > 0 && !blobCfg.RetentionMode.IsValid() {
|
|
||||||
return false, clues.New("invalid retention mode").
|
|
||||||
With("retention_mode", blobCfg.RetentionMode)
|
|
||||||
}
|
|
||||||
|
|
||||||
return startMode != blobCfg.RetentionMode, nil
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -696,6 +696,24 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections() {
|
|||||||
42),
|
42),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c1 := exchMock.NewCollection(
|
||||||
|
suite.storePath1,
|
||||||
|
suite.locPath1,
|
||||||
|
0)
|
||||||
|
c1.ColState = data.NotMovedState
|
||||||
|
c1.PrevPath = suite.storePath1
|
||||||
|
|
||||||
|
c2 := exchMock.NewCollection(
|
||||||
|
suite.storePath2,
|
||||||
|
suite.locPath2,
|
||||||
|
0)
|
||||||
|
c2.ColState = data.NotMovedState
|
||||||
|
c2.PrevPath = suite.storePath2
|
||||||
|
|
||||||
|
// Make empty collections at the same locations to force a backup with no
|
||||||
|
// changes. Needed to ensure we force a backup even if nothing has changed.
|
||||||
|
emptyCollections := []data.BackupCollection{c1, c2}
|
||||||
|
|
||||||
// tags that are supplied by the caller. This includes basic tags to support
|
// tags that are supplied by the caller. This includes basic tags to support
|
||||||
// lookups and extra tags the caller may want to apply.
|
// lookups and extra tags the caller may want to apply.
|
||||||
tags := map[string]string{
|
tags := map[string]string{
|
||||||
@ -703,108 +721,246 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections() {
|
|||||||
"brunhilda": "",
|
"brunhilda": "",
|
||||||
}
|
}
|
||||||
|
|
||||||
reasons := []Reason{
|
reasons := []Reasoner{
|
||||||
{
|
NewReason(
|
||||||
ResourceOwner: suite.storePath1.ResourceOwner(),
|
testTenant,
|
||||||
Service: suite.storePath1.Service(),
|
suite.storePath1.ResourceOwner(),
|
||||||
Category: suite.storePath1.Category(),
|
suite.storePath1.Service(),
|
||||||
},
|
suite.storePath1.Category(),
|
||||||
{
|
),
|
||||||
ResourceOwner: suite.storePath2.ResourceOwner(),
|
NewReason(
|
||||||
Service: suite.storePath2.Service(),
|
testTenant,
|
||||||
Category: suite.storePath2.Category(),
|
suite.storePath2.ResourceOwner(),
|
||||||
},
|
suite.storePath2.Service(),
|
||||||
}
|
suite.storePath2.Category(),
|
||||||
|
),
|
||||||
for _, r := range reasons {
|
|
||||||
for _, k := range r.TagKeys() {
|
|
||||||
tags[k] = ""
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedTags := map[string]string{}
|
expectedTags := map[string]string{}
|
||||||
|
|
||||||
maps.Copy(expectedTags, normalizeTagKVs(tags))
|
maps.Copy(expectedTags, tags)
|
||||||
|
|
||||||
table := []struct {
|
for _, r := range reasons {
|
||||||
name string
|
for _, k := range tagKeys(r) {
|
||||||
expectedUploadedFiles int
|
expectedTags[k] = ""
|
||||||
expectedCachedFiles int
|
}
|
||||||
// Whether entries in the resulting details should be marked as updated.
|
|
||||||
deetsUpdated bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "Uncached",
|
|
||||||
expectedUploadedFiles: 47,
|
|
||||||
expectedCachedFiles: 0,
|
|
||||||
deetsUpdated: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Cached",
|
|
||||||
expectedUploadedFiles: 0,
|
|
||||||
expectedCachedFiles: 47,
|
|
||||||
deetsUpdated: false,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
prevSnaps := []IncrementalBase{}
|
expectedTags = normalizeTagKVs(expectedTags)
|
||||||
|
|
||||||
|
type testCase struct {
|
||||||
|
name string
|
||||||
|
baseBackups func(base ManifestEntry) BackupBases
|
||||||
|
collections []data.BackupCollection
|
||||||
|
expectedUploadedFiles int
|
||||||
|
expectedCachedFiles int
|
||||||
|
// We're either going to get details entries or entries in the details
|
||||||
|
// merger. Details is populated when there's entries in the collection. The
|
||||||
|
// details merger is populated for cached entries. The details merger
|
||||||
|
// doesn't count folders, only items.
|
||||||
|
//
|
||||||
|
// Setting this to true looks for details merger entries. Setting it to
|
||||||
|
// false looks for details entries.
|
||||||
|
expectMerge bool
|
||||||
|
// Whether entries in the resulting details should be marked as updated.
|
||||||
|
deetsUpdated assert.BoolAssertionFunc
|
||||||
|
hashedBytesCheck assert.ValueAssertionFunc
|
||||||
|
// Range of bytes (inclusive) to expect as uploaded. A little fragile, but
|
||||||
|
// allows us to differentiate between content that wasn't uploaded due to
|
||||||
|
// being cached/deduped/skipped due to existing dir entries and stuff that
|
||||||
|
// was actually pushed to S3.
|
||||||
|
uploadedBytes []int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initial backup. All files should be considered new by kopia.
|
||||||
|
baseBackupCase := testCase{
|
||||||
|
name: "Uncached",
|
||||||
|
baseBackups: func(ManifestEntry) BackupBases {
|
||||||
|
return NewMockBackupBases()
|
||||||
|
},
|
||||||
|
collections: collections,
|
||||||
|
expectedUploadedFiles: 47,
|
||||||
|
expectedCachedFiles: 0,
|
||||||
|
deetsUpdated: assert.True,
|
||||||
|
hashedBytesCheck: assert.NotZero,
|
||||||
|
uploadedBytes: []int64{8000, 10000},
|
||||||
|
}
|
||||||
|
|
||||||
|
runAndTestBackup := func(test testCase, base ManifestEntry) ManifestEntry {
|
||||||
|
var res ManifestEntry
|
||||||
|
|
||||||
for _, test := range table {
|
|
||||||
suite.Run(test.name, func() {
|
suite.Run(test.name, func() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
|
||||||
stats, deets, _, err := suite.w.ConsumeBackupCollections(
|
ctx, flush := tester.NewContext(t)
|
||||||
suite.ctx,
|
defer flush()
|
||||||
prevSnaps,
|
|
||||||
collections,
|
bbs := test.baseBackups(base)
|
||||||
|
|
||||||
|
stats, deets, deetsMerger, err := suite.w.ConsumeBackupCollections(
|
||||||
|
ctx,
|
||||||
|
reasons,
|
||||||
|
bbs,
|
||||||
|
test.collections,
|
||||||
nil,
|
nil,
|
||||||
tags,
|
tags,
|
||||||
true,
|
true,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
assert.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
assert.Equal(t, test.expectedUploadedFiles, stats.TotalFileCount, "total files")
|
assert.Equal(t, test.expectedUploadedFiles, stats.TotalFileCount, "total files")
|
||||||
assert.Equal(t, test.expectedUploadedFiles, stats.UncachedFileCount, "uncached files")
|
assert.Equal(t, test.expectedUploadedFiles, stats.UncachedFileCount, "uncached files")
|
||||||
assert.Equal(t, test.expectedCachedFiles, stats.CachedFileCount, "cached files")
|
assert.Equal(t, test.expectedCachedFiles, stats.CachedFileCount, "cached files")
|
||||||
assert.Equal(t, 6, stats.TotalDirectoryCount)
|
assert.Equal(t, 4+len(test.collections), stats.TotalDirectoryCount, "directory count")
|
||||||
assert.Equal(t, 0, stats.IgnoredErrorCount)
|
assert.Equal(t, 0, stats.IgnoredErrorCount)
|
||||||
assert.Equal(t, 0, stats.ErrorCount)
|
assert.Equal(t, 0, stats.ErrorCount)
|
||||||
assert.False(t, stats.Incomplete)
|
assert.False(t, stats.Incomplete)
|
||||||
|
test.hashedBytesCheck(t, stats.TotalHashedBytes, "hashed bytes")
|
||||||
|
assert.LessOrEqual(
|
||||||
|
t,
|
||||||
|
test.uploadedBytes[0],
|
||||||
|
stats.TotalUploadedBytes,
|
||||||
|
"low end of uploaded bytes")
|
||||||
|
assert.GreaterOrEqual(
|
||||||
|
t,
|
||||||
|
test.uploadedBytes[1],
|
||||||
|
stats.TotalUploadedBytes,
|
||||||
|
"high end of uploaded bytes")
|
||||||
|
|
||||||
|
if test.expectMerge {
|
||||||
|
assert.Empty(t, deets.Details().Entries, "details entries")
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
test.expectedUploadedFiles+test.expectedCachedFiles,
|
||||||
|
deetsMerger.ItemsToMerge(),
|
||||||
|
"details merger entries")
|
||||||
|
} else {
|
||||||
|
assert.Zero(t, deetsMerger.ItemsToMerge(), "details merger entries")
|
||||||
|
|
||||||
// 47 file and 2 folder entries.
|
|
||||||
details := deets.Details().Entries
|
details := deets.Details().Entries
|
||||||
assert.Len(
|
assert.Len(
|
||||||
t,
|
t,
|
||||||
details,
|
details,
|
||||||
|
// 47 file and 2 folder entries.
|
||||||
test.expectedUploadedFiles+test.expectedCachedFiles+2,
|
test.expectedUploadedFiles+test.expectedCachedFiles+2,
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, entry := range details {
|
for _, entry := range details {
|
||||||
assert.Equal(t, test.deetsUpdated, entry.Updated)
|
test.deetsUpdated(t, entry.Updated)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
checkSnapshotTags(
|
checkSnapshotTags(
|
||||||
t,
|
t,
|
||||||
suite.ctx,
|
ctx,
|
||||||
suite.w.c,
|
suite.w.c,
|
||||||
expectedTags,
|
expectedTags,
|
||||||
stats.SnapshotID,
|
stats.SnapshotID,
|
||||||
)
|
)
|
||||||
|
|
||||||
snap, err := snapshot.LoadSnapshot(
|
snap, err := snapshot.LoadSnapshot(
|
||||||
suite.ctx,
|
ctx,
|
||||||
suite.w.c,
|
suite.w.c,
|
||||||
manifest.ID(stats.SnapshotID),
|
manifest.ID(stats.SnapshotID),
|
||||||
)
|
)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
prevSnaps = append(prevSnaps, IncrementalBase{
|
res = ManifestEntry{
|
||||||
Manifest: snap,
|
Manifest: snap,
|
||||||
SubtreePaths: []*path.Builder{
|
Reasons: reasons,
|
||||||
suite.storePath1.ToBuilder().Dir(),
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
base := runAndTestBackup(baseBackupCase, ManifestEntry{})
|
||||||
|
|
||||||
|
table := []testCase{
|
||||||
|
{
|
||||||
|
name: "Kopia Assist And Merge All Files Changed",
|
||||||
|
baseBackups: func(base ManifestEntry) BackupBases {
|
||||||
|
return NewMockBackupBases().WithMergeBases(base)
|
||||||
},
|
},
|
||||||
})
|
collections: collections,
|
||||||
})
|
expectedUploadedFiles: 0,
|
||||||
|
expectedCachedFiles: 47,
|
||||||
|
deetsUpdated: assert.False,
|
||||||
|
hashedBytesCheck: assert.Zero,
|
||||||
|
uploadedBytes: []int64{4000, 6000},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Kopia Assist And Merge No Files Changed",
|
||||||
|
baseBackups: func(base ManifestEntry) BackupBases {
|
||||||
|
return NewMockBackupBases().WithMergeBases(base)
|
||||||
|
},
|
||||||
|
// Pass in empty collections to force a backup. Otherwise we'll skip
|
||||||
|
// actually trying to do anything because we'll see there's nothing that
|
||||||
|
// changed. The real goal is to get it to deal with the merged collections
|
||||||
|
// again though.
|
||||||
|
collections: emptyCollections,
|
||||||
|
// Should hit cached check prior to dir entry check so we see them as
|
||||||
|
// cached.
|
||||||
|
expectedUploadedFiles: 0,
|
||||||
|
expectedCachedFiles: 47,
|
||||||
|
// Entries go into the details merger because we never materialize details
|
||||||
|
// info for the items since they're from the base.
|
||||||
|
expectMerge: true,
|
||||||
|
// Not used since there's no details entries.
|
||||||
|
deetsUpdated: assert.False,
|
||||||
|
hashedBytesCheck: assert.Zero,
|
||||||
|
uploadedBytes: []int64{4000, 6000},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Kopia Assist Only",
|
||||||
|
baseBackups: func(base ManifestEntry) BackupBases {
|
||||||
|
return NewMockBackupBases().WithAssistBases(base)
|
||||||
|
},
|
||||||
|
collections: collections,
|
||||||
|
expectedUploadedFiles: 0,
|
||||||
|
expectedCachedFiles: 47,
|
||||||
|
deetsUpdated: assert.False,
|
||||||
|
hashedBytesCheck: assert.Zero,
|
||||||
|
uploadedBytes: []int64{4000, 6000},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Merge Only",
|
||||||
|
baseBackups: func(base ManifestEntry) BackupBases {
|
||||||
|
return NewMockBackupBases().WithMergeBases(base).ClearMockAssistBases()
|
||||||
|
},
|
||||||
|
// Pass in empty collections to force a backup. Otherwise we'll skip
|
||||||
|
// actually trying to do anything because we'll see there's nothing that
|
||||||
|
// changed. The real goal is to get it to deal with the merged collections
|
||||||
|
// again though.
|
||||||
|
collections: emptyCollections,
|
||||||
|
expectedUploadedFiles: 47,
|
||||||
|
expectedCachedFiles: 0,
|
||||||
|
expectMerge: true,
|
||||||
|
// Not used since there's no details entries.
|
||||||
|
deetsUpdated: assert.False,
|
||||||
|
// Kopia still counts these bytes as "hashed" even though it shouldn't
|
||||||
|
// read the file data since they already have dir entries it can reuse.
|
||||||
|
hashedBytesCheck: assert.NotZero,
|
||||||
|
uploadedBytes: []int64{4000, 6000},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Content Hash Only",
|
||||||
|
baseBackups: func(base ManifestEntry) BackupBases {
|
||||||
|
return NewMockBackupBases()
|
||||||
|
},
|
||||||
|
collections: collections,
|
||||||
|
expectedUploadedFiles: 47,
|
||||||
|
expectedCachedFiles: 0,
|
||||||
|
// Marked as updated because we still fall into the uploadFile handler in
|
||||||
|
// kopia instead of the cachedFile handler.
|
||||||
|
deetsUpdated: assert.True,
|
||||||
|
hashedBytesCheck: assert.NotZero,
|
||||||
|
uploadedBytes: []int64{4000, 6000},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range table {
|
||||||
|
runAndTestBackup(test, base)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -837,23 +993,25 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() {
|
|||||||
"brunhilda": "",
|
"brunhilda": "",
|
||||||
}
|
}
|
||||||
|
|
||||||
reasons := []Reason{
|
reasons := []Reasoner{
|
||||||
{
|
NewReason(
|
||||||
ResourceOwner: storePath.ResourceOwner(),
|
testTenant,
|
||||||
Service: storePath.Service(),
|
storePath.ResourceOwner(),
|
||||||
Category: storePath.Category(),
|
storePath.Service(),
|
||||||
},
|
storePath.Category()),
|
||||||
}
|
|
||||||
|
|
||||||
for _, r := range reasons {
|
|
||||||
for _, k := range r.TagKeys() {
|
|
||||||
tags[k] = ""
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedTags := map[string]string{}
|
expectedTags := map[string]string{}
|
||||||
|
|
||||||
maps.Copy(expectedTags, normalizeTagKVs(tags))
|
maps.Copy(expectedTags, tags)
|
||||||
|
|
||||||
|
for _, r := range reasons {
|
||||||
|
for _, k := range tagKeys(r) {
|
||||||
|
expectedTags[k] = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedTags = normalizeTagKVs(expectedTags)
|
||||||
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
@ -931,7 +1089,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
prevSnaps := []IncrementalBase{}
|
prevSnaps := NewMockBackupBases()
|
||||||
|
|
||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
suite.Run(test.name, func() {
|
suite.Run(test.name, func() {
|
||||||
@ -940,6 +1098,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() {
|
|||||||
|
|
||||||
stats, deets, prevShortRefs, err := suite.w.ConsumeBackupCollections(
|
stats, deets, prevShortRefs, err := suite.w.ConsumeBackupCollections(
|
||||||
suite.ctx,
|
suite.ctx,
|
||||||
|
reasons,
|
||||||
prevSnaps,
|
prevSnaps,
|
||||||
collections,
|
collections,
|
||||||
nil,
|
nil,
|
||||||
@ -992,12 +1151,12 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() {
|
|||||||
manifest.ID(stats.SnapshotID))
|
manifest.ID(stats.SnapshotID))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
prevSnaps = append(prevSnaps, IncrementalBase{
|
prevSnaps.WithMergeBases(
|
||||||
|
ManifestEntry{
|
||||||
Manifest: snap,
|
Manifest: snap,
|
||||||
SubtreePaths: []*path.Builder{
|
Reasons: reasons,
|
||||||
storePath.ToBuilder().Dir(),
|
|
||||||
},
|
},
|
||||||
})
|
)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1016,16 +1175,7 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
|
|||||||
|
|
||||||
w := &Wrapper{k}
|
w := &Wrapper{k}
|
||||||
|
|
||||||
tags := map[string]string{}
|
r := NewReason(testTenant, testUser, path.ExchangeService, path.EmailCategory)
|
||||||
reason := Reason{
|
|
||||||
ResourceOwner: testUser,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EmailCategory,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, k := range reason.TagKeys() {
|
|
||||||
tags[k] = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
dc1 := exchMock.NewCollection(suite.storePath1, suite.locPath1, 1)
|
dc1 := exchMock.NewCollection(suite.storePath1, suite.locPath1, 1)
|
||||||
dc2 := exchMock.NewCollection(suite.storePath2, suite.locPath2, 1)
|
dc2 := exchMock.NewCollection(suite.storePath2, suite.locPath2, 1)
|
||||||
@ -1038,10 +1188,11 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
|
|||||||
|
|
||||||
stats, _, _, err := w.ConsumeBackupCollections(
|
stats, _, _, err := w.ConsumeBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
|
[]Reasoner{r},
|
||||||
nil,
|
nil,
|
||||||
[]data.BackupCollection{dc1, dc2},
|
[]data.BackupCollection{dc1, dc2},
|
||||||
nil,
|
nil,
|
||||||
tags,
|
nil,
|
||||||
true,
|
true,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
@ -1112,16 +1263,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
|||||||
|
|
||||||
loc1 := path.Builder{}.Append(suite.storePath1.Folders()...)
|
loc1 := path.Builder{}.Append(suite.storePath1.Folders()...)
|
||||||
loc2 := path.Builder{}.Append(suite.storePath2.Folders()...)
|
loc2 := path.Builder{}.Append(suite.storePath2.Folders()...)
|
||||||
tags := map[string]string{}
|
r := NewReason(testTenant, testUser, path.ExchangeService, path.EmailCategory)
|
||||||
reason := Reason{
|
|
||||||
ResourceOwner: testUser,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EmailCategory,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, k := range reason.TagKeys() {
|
|
||||||
tags[k] = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
collections := []data.BackupCollection{
|
collections := []data.BackupCollection{
|
||||||
&mockBackupCollection{
|
&mockBackupCollection{
|
||||||
@ -1164,10 +1306,11 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
|||||||
|
|
||||||
stats, deets, _, err := suite.w.ConsumeBackupCollections(
|
stats, deets, _, err := suite.w.ConsumeBackupCollections(
|
||||||
suite.ctx,
|
suite.ctx,
|
||||||
|
[]Reasoner{r},
|
||||||
nil,
|
nil,
|
||||||
collections,
|
collections,
|
||||||
nil,
|
nil,
|
||||||
tags,
|
nil,
|
||||||
true,
|
true,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.Error(t, err, clues.ToCore(err))
|
require.Error(t, err, clues.ToCore(err))
|
||||||
@ -1239,6 +1382,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollectionsHandlesNoCollections()
|
|||||||
s, d, _, err := suite.w.ConsumeBackupCollections(
|
s, d, _, err := suite.w.ConsumeBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
nil,
|
nil,
|
||||||
|
nil,
|
||||||
test.collections,
|
test.collections,
|
||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
@ -1391,23 +1535,15 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
|
|||||||
collections = append(collections, collection)
|
collections = append(collections, collection)
|
||||||
}
|
}
|
||||||
|
|
||||||
tags := map[string]string{}
|
r := NewReason(testTenant, testUser, path.ExchangeService, path.EmailCategory)
|
||||||
reason := Reason{
|
|
||||||
ResourceOwner: testUser,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EmailCategory,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, k := range reason.TagKeys() {
|
|
||||||
tags[k] = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
stats, deets, _, err := suite.w.ConsumeBackupCollections(
|
stats, deets, _, err := suite.w.ConsumeBackupCollections(
|
||||||
suite.ctx,
|
suite.ctx,
|
||||||
|
[]Reasoner{r},
|
||||||
nil,
|
nil,
|
||||||
collections,
|
collections,
|
||||||
nil,
|
nil,
|
||||||
tags,
|
nil,
|
||||||
false,
|
false,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
@ -1437,32 +1573,11 @@ func (c *i64counter) Count(i int64) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
||||||
reason := Reason{
|
r := NewReason(testTenant, testUser, path.ExchangeService, path.EmailCategory)
|
||||||
ResourceOwner: testUser,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EmailCategory,
|
|
||||||
}
|
|
||||||
|
|
||||||
subtreePathTmp, err := path.Build(
|
|
||||||
testTenant,
|
|
||||||
testUser,
|
|
||||||
path.ExchangeService,
|
|
||||||
path.EmailCategory,
|
|
||||||
false,
|
|
||||||
"tmp")
|
|
||||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
|
||||||
|
|
||||||
subtreePath := subtreePathTmp.ToBuilder().Dir()
|
|
||||||
|
|
||||||
man, err := suite.w.c.LoadSnapshot(suite.ctx, suite.snapshotID)
|
man, err := suite.w.c.LoadSnapshot(suite.ctx, suite.snapshotID)
|
||||||
require.NoError(suite.T(), err, "getting base snapshot: %v", clues.ToCore(err))
|
require.NoError(suite.T(), err, "getting base snapshot: %v", clues.ToCore(err))
|
||||||
|
|
||||||
tags := map[string]string{}
|
|
||||||
|
|
||||||
for _, k := range reason.TagKeys() {
|
|
||||||
tags[k] = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
excludeItem bool
|
excludeItem bool
|
||||||
@ -1551,17 +1666,16 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
|||||||
|
|
||||||
stats, _, _, err := suite.w.ConsumeBackupCollections(
|
stats, _, _, err := suite.w.ConsumeBackupCollections(
|
||||||
suite.ctx,
|
suite.ctx,
|
||||||
[]IncrementalBase{
|
[]Reasoner{r},
|
||||||
{
|
NewMockBackupBases().WithMergeBases(
|
||||||
|
ManifestEntry{
|
||||||
Manifest: man,
|
Manifest: man,
|
||||||
SubtreePaths: []*path.Builder{
|
Reasons: []Reasoner{r},
|
||||||
subtreePath,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
|
),
|
||||||
test.cols(),
|
test.cols(),
|
||||||
excluded,
|
excluded,
|
||||||
tags,
|
nil,
|
||||||
true,
|
true,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|||||||
@ -57,7 +57,7 @@ func (suite *DataCollectionIntgSuite) SetupSuite() {
|
|||||||
|
|
||||||
suite.tenantID = creds.AzureTenantID
|
suite.tenantID = creds.AzureTenantID
|
||||||
|
|
||||||
suite.ac, err = api.NewClient(creds)
|
suite.ac, err = api.NewClient(creds, control.Defaults())
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -69,7 +69,7 @@ func NewController(
|
|||||||
return nil, clues.Wrap(err, "retrieving m365 account configuration").WithClues(ctx)
|
return nil, clues.Wrap(err, "retrieving m365 account configuration").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
ac, err := api.NewClient(creds)
|
ac, err := api.NewClient(creds, co)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Wrap(err, "creating api client").WithClues(ctx)
|
return nil, clues.Wrap(err, "creating api client").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -692,6 +692,7 @@ func runRestoreBackupTestVersions(
|
|||||||
tenant string,
|
tenant string,
|
||||||
resourceOwners []string,
|
resourceOwners []string,
|
||||||
opts control.Options,
|
opts control.Options,
|
||||||
|
crc control.RestoreConfig,
|
||||||
) {
|
) {
|
||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
@ -702,7 +703,7 @@ func runRestoreBackupTestVersions(
|
|||||||
Service: test.service,
|
Service: test.service,
|
||||||
Tenant: tenant,
|
Tenant: tenant,
|
||||||
ResourceOwners: resourceOwners,
|
ResourceOwners: resourceOwners,
|
||||||
RestoreCfg: testdata.DefaultRestoreConfig(""),
|
RestoreCfg: crc,
|
||||||
}
|
}
|
||||||
|
|
||||||
totalItems, _, collections, _, err := stub.GetCollectionsAndExpected(
|
totalItems, _, collections, _, err := stub.GetCollectionsAndExpected(
|
||||||
|
|||||||
@ -414,7 +414,7 @@ func (suite *BackupIntgSuite) SetupSuite() {
|
|||||||
creds, err := acct.M365Config()
|
creds, err := acct.M365Config()
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
suite.ac, err = api.NewClient(creds)
|
suite.ac, err = api.NewClient(creds, control.Defaults())
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
suite.tenantID = creds.AzureTenantID
|
suite.tenantID = creds.AzureTenantID
|
||||||
|
|||||||
@ -17,6 +17,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
@ -698,7 +699,7 @@ func (suite *ContainerResolverSuite) SetupSuite() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (suite *ContainerResolverSuite) TestPopulate() {
|
func (suite *ContainerResolverSuite) TestPopulate() {
|
||||||
ac, err := api.NewClient(suite.credentials)
|
ac, err := api.NewClient(suite.credentials, control.Defaults())
|
||||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||||
|
|
||||||
eventFunc := func(t *testing.T) graph.ContainerResolver {
|
eventFunc := func(t *testing.T) graph.ContainerResolver {
|
||||||
|
|||||||
@ -9,6 +9,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -30,7 +31,7 @@ func newIntegrationTesterSetup(t *testing.T) intgTesterSetup {
|
|||||||
|
|
||||||
its.creds = creds
|
its.creds = creds
|
||||||
|
|
||||||
its.ac, err = api.NewClient(creds)
|
its.ac, err = api.NewClient(creds, control.Defaults())
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
its.userID = tconfig.GetM365UserID(ctx)
|
its.userID = tconfig.GetM365UserID(ctx)
|
||||||
|
|||||||
@ -12,6 +12,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
)
|
)
|
||||||
@ -83,7 +84,7 @@ func (suite *MailFolderCacheIntegrationSuite) TestDeltaFetch() {
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
ac, err := api.NewClient(suite.credentials)
|
ac, err := api.NewClient(suite.credentials, control.Defaults())
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
acm := ac.Mail()
|
acm := ac.Mail()
|
||||||
|
|||||||
@ -44,7 +44,7 @@ func (suite *RestoreIntgSuite) SetupSuite() {
|
|||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
suite.credentials = m365
|
suite.credentials = m365
|
||||||
suite.ac, err = api.NewClient(m365)
|
suite.ac, err = api.NewClient(m365, control.Defaults())
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -93,7 +93,7 @@ func (suite *HTTPWrapperUnitSuite) TestNewHTTPWrapper_redirectMiddleware() {
|
|||||||
hdr.Set("Location", "localhost:99999999/smarfs")
|
hdr.Set("Location", "localhost:99999999/smarfs")
|
||||||
|
|
||||||
toResp := &http.Response{
|
toResp := &http.Response{
|
||||||
StatusCode: 302,
|
StatusCode: http.StatusFound,
|
||||||
Header: hdr,
|
Header: hdr,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -313,7 +313,7 @@ func (suite *OneDriveIntgSuite) SetupSuite() {
|
|||||||
|
|
||||||
suite.creds = creds
|
suite.creds = creds
|
||||||
|
|
||||||
suite.ac, err = api.NewClient(creds)
|
suite.ac, err = api.NewClient(creds, control.Defaults())
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -9,6 +9,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/m365/support"
|
"github.com/alcionai/corso/src/internal/m365/support"
|
||||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -20,7 +21,7 @@ type oneDriveService struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewOneDriveService(credentials account.M365Config) (*oneDriveService, error) {
|
func NewOneDriveService(credentials account.M365Config) (*oneDriveService, error) {
|
||||||
ac, err := api.NewClient(credentials)
|
ac, err := api.NewClient(credentials, control.Defaults())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -53,7 +53,7 @@ func (suite *URLCacheIntegrationSuite) SetupSuite() {
|
|||||||
creds, err := acct.M365Config()
|
creds, err := acct.M365Config()
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
suite.ac, err = api.NewClient(creds)
|
suite.ac, err = api.NewClient(creds, control.Defaults())
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
drive, err := suite.ac.Users().GetDefaultDrive(ctx, suite.user)
|
drive, err := suite.ac.Users().GetDefaultDrive(ctx, suite.user)
|
||||||
|
|||||||
@ -22,6 +22,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
"github.com/alcionai/corso/src/internal/version"
|
"github.com/alcionai/corso/src/internal/version"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
|
"github.com/alcionai/corso/src/pkg/control/testdata"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
)
|
)
|
||||||
@ -516,6 +517,9 @@ func testRestoreAndBackupMultipleFilesAndFoldersNoPermissions(
|
|||||||
collectionsLatest: expected,
|
collectionsLatest: expected,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rc := testdata.DefaultRestoreConfig("od_restore_and_backup_multi")
|
||||||
|
rc.OnCollision = control.Replace
|
||||||
|
|
||||||
runRestoreBackupTestVersions(
|
runRestoreBackupTestVersions(
|
||||||
t,
|
t,
|
||||||
testData,
|
testData,
|
||||||
@ -524,7 +528,8 @@ func testRestoreAndBackupMultipleFilesAndFoldersNoPermissions(
|
|||||||
control.Options{
|
control.Options{
|
||||||
RestorePermissions: true,
|
RestorePermissions: true,
|
||||||
ToggleFeatures: control.Toggles{},
|
ToggleFeatures: control.Toggles{},
|
||||||
})
|
},
|
||||||
|
rc)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -763,6 +768,9 @@ func testPermissionsRestoreAndBackup(suite oneDriveSuite, startVersion int) {
|
|||||||
collectionsLatest: expected,
|
collectionsLatest: expected,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rc := testdata.DefaultRestoreConfig("perms_restore_and_backup")
|
||||||
|
rc.OnCollision = control.Replace
|
||||||
|
|
||||||
runRestoreBackupTestVersions(
|
runRestoreBackupTestVersions(
|
||||||
t,
|
t,
|
||||||
testData,
|
testData,
|
||||||
@ -771,7 +779,8 @@ func testPermissionsRestoreAndBackup(suite oneDriveSuite, startVersion int) {
|
|||||||
control.Options{
|
control.Options{
|
||||||
RestorePermissions: true,
|
RestorePermissions: true,
|
||||||
ToggleFeatures: control.Toggles{},
|
ToggleFeatures: control.Toggles{},
|
||||||
})
|
},
|
||||||
|
rc)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -851,6 +860,9 @@ func testPermissionsBackupAndNoRestore(suite oneDriveSuite, startVersion int) {
|
|||||||
collectionsLatest: expected,
|
collectionsLatest: expected,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rc := testdata.DefaultRestoreConfig("perms_backup_no_restore")
|
||||||
|
rc.OnCollision = control.Replace
|
||||||
|
|
||||||
runRestoreBackupTestVersions(
|
runRestoreBackupTestVersions(
|
||||||
t,
|
t,
|
||||||
testData,
|
testData,
|
||||||
@ -859,7 +871,8 @@ func testPermissionsBackupAndNoRestore(suite oneDriveSuite, startVersion int) {
|
|||||||
control.Options{
|
control.Options{
|
||||||
RestorePermissions: false,
|
RestorePermissions: false,
|
||||||
ToggleFeatures: control.Toggles{},
|
ToggleFeatures: control.Toggles{},
|
||||||
})
|
},
|
||||||
|
rc)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1054,6 +1067,9 @@ func testPermissionsInheritanceRestoreAndBackup(suite oneDriveSuite, startVersio
|
|||||||
collectionsLatest: expected,
|
collectionsLatest: expected,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rc := testdata.DefaultRestoreConfig("perms_inherit_restore_and_backup")
|
||||||
|
rc.OnCollision = control.Replace
|
||||||
|
|
||||||
runRestoreBackupTestVersions(
|
runRestoreBackupTestVersions(
|
||||||
t,
|
t,
|
||||||
testData,
|
testData,
|
||||||
@ -1062,7 +1078,8 @@ func testPermissionsInheritanceRestoreAndBackup(suite oneDriveSuite, startVersio
|
|||||||
control.Options{
|
control.Options{
|
||||||
RestorePermissions: true,
|
RestorePermissions: true,
|
||||||
ToggleFeatures: control.Toggles{},
|
ToggleFeatures: control.Toggles{},
|
||||||
})
|
},
|
||||||
|
rc)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1247,6 +1264,9 @@ func testLinkSharesInheritanceRestoreAndBackup(suite oneDriveSuite, startVersion
|
|||||||
collectionsLatest: expected,
|
collectionsLatest: expected,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rc := testdata.DefaultRestoreConfig("linkshares_inherit_restore_and_backup")
|
||||||
|
rc.OnCollision = control.Replace
|
||||||
|
|
||||||
runRestoreBackupTestVersions(
|
runRestoreBackupTestVersions(
|
||||||
t,
|
t,
|
||||||
testData,
|
testData,
|
||||||
@ -1255,7 +1275,8 @@ func testLinkSharesInheritanceRestoreAndBackup(suite oneDriveSuite, startVersion
|
|||||||
control.Options{
|
control.Options{
|
||||||
RestorePermissions: true,
|
RestorePermissions: true,
|
||||||
ToggleFeatures: control.Toggles{},
|
ToggleFeatures: control.Toggles{},
|
||||||
})
|
},
|
||||||
|
rc)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -36,7 +36,7 @@ func (ctrl *Controller) ConsumeRestoreCollections(
|
|||||||
defer end()
|
defer end()
|
||||||
|
|
||||||
ctx = graph.BindRateLimiterConfig(ctx, graph.LimiterCfg{Service: sels.PathService()})
|
ctx = graph.BindRateLimiterConfig(ctx, graph.LimiterCfg{Service: sels.PathService()})
|
||||||
ctx = clues.Add(ctx, "restore_config", restoreCfg) // TODO(rkeepers): needs PII control
|
ctx = clues.Add(ctx, "restore_config", restoreCfg)
|
||||||
|
|
||||||
if len(dcs) == 0 {
|
if len(dcs) == 0 {
|
||||||
return nil, clues.New("no data collections to restore")
|
return nil, clues.New("no data collections to restore")
|
||||||
|
|||||||
@ -201,7 +201,7 @@ func (suite *SharePointPagesSuite) TestCollectPages() {
|
|||||||
creds, err := a.M365Config()
|
creds, err := a.M365Config()
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
ac, err := api.NewClient(creds)
|
ac, err := api.NewClient(creds, control.Defaults())
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
col, err := collectPages(
|
col, err := collectPages(
|
||||||
|
|||||||
@ -43,7 +43,7 @@ func (suite *SharePointCollectionSuite) SetupSuite() {
|
|||||||
|
|
||||||
suite.creds = m365
|
suite.creds = m365
|
||||||
|
|
||||||
ac, err := api.NewClient(m365)
|
ac, err := api.NewClient(m365, control.Defaults())
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
suite.ac = ac
|
suite.ac = ac
|
||||||
|
|||||||
@ -6,7 +6,6 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/kopia/kopia/repo/manifest"
|
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/crash"
|
"github.com/alcionai/corso/src/internal/common/crash"
|
||||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||||
@ -280,8 +279,8 @@ func (op *BackupOperation) do(
|
|||||||
backupID model.StableID,
|
backupID model.StableID,
|
||||||
) (*details.Builder, error) {
|
) (*details.Builder, error) {
|
||||||
var (
|
var (
|
||||||
reasons = selectorToReasons(op.Selectors, false)
|
reasons = selectorToReasons(op.account.ID(), op.Selectors, false)
|
||||||
fallbackReasons = makeFallbackReasons(op.Selectors)
|
fallbackReasons = makeFallbackReasons(op.account.ID(), op.Selectors)
|
||||||
lastBackupVersion = version.NoBackup
|
lastBackupVersion = version.NoBackup
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -370,10 +369,10 @@ func (op *BackupOperation) do(
|
|||||||
return deets, nil
|
return deets, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeFallbackReasons(sel selectors.Selector) []kopia.Reason {
|
func makeFallbackReasons(tenant string, sel selectors.Selector) []kopia.Reasoner {
|
||||||
if sel.PathService() != path.SharePointService &&
|
if sel.PathService() != path.SharePointService &&
|
||||||
sel.DiscreteOwner != sel.DiscreteOwnerName {
|
sel.DiscreteOwner != sel.DiscreteOwnerName {
|
||||||
return selectorToReasons(sel, true)
|
return selectorToReasons(tenant, sel, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -420,9 +419,13 @@ func produceBackupDataCollections(
|
|||||||
// Consumer funcs
|
// Consumer funcs
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
func selectorToReasons(sel selectors.Selector, useOwnerNameForID bool) []kopia.Reason {
|
func selectorToReasons(
|
||||||
|
tenant string,
|
||||||
|
sel selectors.Selector,
|
||||||
|
useOwnerNameForID bool,
|
||||||
|
) []kopia.Reasoner {
|
||||||
service := sel.PathService()
|
service := sel.PathService()
|
||||||
reasons := []kopia.Reason{}
|
reasons := []kopia.Reasoner{}
|
||||||
|
|
||||||
pcs, err := sel.PathCategories()
|
pcs, err := sel.PathCategories()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -438,43 +441,19 @@ func selectorToReasons(sel selectors.Selector, useOwnerNameForID bool) []kopia.R
|
|||||||
|
|
||||||
for _, sl := range [][]path.CategoryType{pcs.Includes, pcs.Filters} {
|
for _, sl := range [][]path.CategoryType{pcs.Includes, pcs.Filters} {
|
||||||
for _, cat := range sl {
|
for _, cat := range sl {
|
||||||
reasons = append(reasons, kopia.Reason{
|
reasons = append(reasons, kopia.NewReason(tenant, owner, service, cat))
|
||||||
ResourceOwner: owner,
|
|
||||||
Service: service,
|
|
||||||
Category: cat,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return reasons
|
return reasons
|
||||||
}
|
}
|
||||||
|
|
||||||
func builderFromReason(ctx context.Context, tenant string, r kopia.Reason) (*path.Builder, error) {
|
|
||||||
ctx = clues.Add(ctx, "category", r.Category.String())
|
|
||||||
|
|
||||||
// This is hacky, but we want the path package to format the path the right
|
|
||||||
// way (e.x. proper order for service, category, etc), but we don't care about
|
|
||||||
// the folders after the prefix.
|
|
||||||
p, err := path.Build(
|
|
||||||
tenant,
|
|
||||||
r.ResourceOwner,
|
|
||||||
r.Service,
|
|
||||||
r.Category,
|
|
||||||
false,
|
|
||||||
"tmp")
|
|
||||||
if err != nil {
|
|
||||||
return nil, clues.Wrap(err, "building path").WithClues(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
return p.ToBuilder().Dir(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// calls kopia to backup the collections of data
|
// calls kopia to backup the collections of data
|
||||||
func consumeBackupCollections(
|
func consumeBackupCollections(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
bc kinject.BackupConsumer,
|
bc kinject.BackupConsumer,
|
||||||
tenantID string,
|
tenantID string,
|
||||||
reasons []kopia.Reason,
|
reasons []kopia.Reasoner,
|
||||||
bbs kopia.BackupBases,
|
bbs kopia.BackupBases,
|
||||||
cs []data.BackupCollection,
|
cs []data.BackupCollection,
|
||||||
pmr prefixmatcher.StringSetReader,
|
pmr prefixmatcher.StringSetReader,
|
||||||
@ -495,90 +474,10 @@ func consumeBackupCollections(
|
|||||||
kopia.TagBackupCategory: "",
|
kopia.TagBackupCategory: "",
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, reason := range reasons {
|
|
||||||
for _, k := range reason.TagKeys() {
|
|
||||||
tags[k] = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AssistBases should be the upper bound for how many snapshots we pass in.
|
|
||||||
bases := make([]kopia.IncrementalBase, 0, len(bbs.AssistBases()))
|
|
||||||
// Track IDs we've seen already so we don't accidentally duplicate some
|
|
||||||
// manifests. This can be removed when we move the code below into the kopia
|
|
||||||
// package.
|
|
||||||
ids := map[manifest.ID]struct{}{}
|
|
||||||
|
|
||||||
var mb []kopia.ManifestEntry
|
|
||||||
|
|
||||||
if bbs != nil {
|
|
||||||
mb = bbs.MergeBases()
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(ashmrtn): Make a wrapper for Reson that allows adding a tenant and
|
|
||||||
// make a function that will spit out a prefix that includes the tenant. With
|
|
||||||
// that done this code can be moved to kopia wrapper since it's really more
|
|
||||||
// specific to that.
|
|
||||||
for _, m := range mb {
|
|
||||||
paths := make([]*path.Builder, 0, len(m.Reasons))
|
|
||||||
services := map[string]struct{}{}
|
|
||||||
categories := map[string]struct{}{}
|
|
||||||
|
|
||||||
for _, reason := range m.Reasons {
|
|
||||||
pb, err := builderFromReason(ctx, tenantID, reason)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, clues.Wrap(err, "getting subtree paths for bases")
|
|
||||||
}
|
|
||||||
|
|
||||||
paths = append(paths, pb)
|
|
||||||
services[reason.Service.String()] = struct{}{}
|
|
||||||
categories[reason.Category.String()] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
ids[m.ID] = struct{}{}
|
|
||||||
|
|
||||||
bases = append(bases, kopia.IncrementalBase{
|
|
||||||
Manifest: m.Manifest,
|
|
||||||
SubtreePaths: paths,
|
|
||||||
})
|
|
||||||
|
|
||||||
svcs := make([]string, 0, len(services))
|
|
||||||
for k := range services {
|
|
||||||
svcs = append(svcs, k)
|
|
||||||
}
|
|
||||||
|
|
||||||
cats := make([]string, 0, len(categories))
|
|
||||||
for k := range categories {
|
|
||||||
cats = append(cats, k)
|
|
||||||
}
|
|
||||||
|
|
||||||
mbID, ok := m.GetTag(kopia.TagBackupID)
|
|
||||||
if !ok {
|
|
||||||
mbID = "no_backup_id_tag"
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Ctx(ctx).Infow(
|
|
||||||
"using base for backup",
|
|
||||||
"base_snapshot_id", m.ID,
|
|
||||||
"services", svcs,
|
|
||||||
"categories", cats,
|
|
||||||
"base_backup_id", mbID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// At the moment kopia assisted snapshots are in the same set as merge bases.
|
|
||||||
// When we fixup generating subtree paths we can remove this.
|
|
||||||
if bbs != nil {
|
|
||||||
for _, ab := range bbs.AssistBases() {
|
|
||||||
if _, ok := ids[ab.ID]; ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
bases = append(bases, kopia.IncrementalBase{Manifest: ab.Manifest})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
kopiaStats, deets, itemsSourcedFromBase, err := bc.ConsumeBackupCollections(
|
kopiaStats, deets, itemsSourcedFromBase, err := bc.ConsumeBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
bases,
|
reasons,
|
||||||
|
bbs,
|
||||||
cs,
|
cs,
|
||||||
pmr,
|
pmr,
|
||||||
tags,
|
tags,
|
||||||
@ -586,7 +485,7 @@ func consumeBackupCollections(
|
|||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if kopiaStats == nil {
|
if kopiaStats == nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, clues.Stack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil, nil, clues.Stack(err).With(
|
return nil, nil, nil, clues.Stack(err).With(
|
||||||
@ -609,11 +508,11 @@ func consumeBackupCollections(
|
|||||||
return kopiaStats, deets, itemsSourcedFromBase, err
|
return kopiaStats, deets, itemsSourcedFromBase, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func matchesReason(reasons []kopia.Reason, p path.Path) bool {
|
func matchesReason(reasons []kopia.Reasoner, p path.Path) bool {
|
||||||
for _, reason := range reasons {
|
for _, reason := range reasons {
|
||||||
if p.ResourceOwner() == reason.ResourceOwner &&
|
if p.ResourceOwner() == reason.ProtectedResource() &&
|
||||||
p.Service() == reason.Service &&
|
p.Service() == reason.Service() &&
|
||||||
p.Category() == reason.Category {
|
p.Category() == reason.Category() {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -107,7 +107,8 @@ func checkPaths(t *testing.T, expected, got []path.Path) {
|
|||||||
|
|
||||||
type mockBackupConsumer struct {
|
type mockBackupConsumer struct {
|
||||||
checkFunc func(
|
checkFunc func(
|
||||||
bases []kopia.IncrementalBase,
|
backupReasons []kopia.Reasoner,
|
||||||
|
bases kopia.BackupBases,
|
||||||
cs []data.BackupCollection,
|
cs []data.BackupCollection,
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
buildTreeWithBase bool)
|
buildTreeWithBase bool)
|
||||||
@ -115,7 +116,8 @@ type mockBackupConsumer struct {
|
|||||||
|
|
||||||
func (mbu mockBackupConsumer) ConsumeBackupCollections(
|
func (mbu mockBackupConsumer) ConsumeBackupCollections(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
bases []kopia.IncrementalBase,
|
backupReasons []kopia.Reasoner,
|
||||||
|
bases kopia.BackupBases,
|
||||||
cs []data.BackupCollection,
|
cs []data.BackupCollection,
|
||||||
excluded prefixmatcher.StringSetReader,
|
excluded prefixmatcher.StringSetReader,
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
@ -123,7 +125,7 @@ func (mbu mockBackupConsumer) ConsumeBackupCollections(
|
|||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) (*kopia.BackupStats, *details.Builder, kopia.DetailsMergeInfoer, error) {
|
) (*kopia.BackupStats, *details.Builder, kopia.DetailsMergeInfoer, error) {
|
||||||
if mbu.checkFunc != nil {
|
if mbu.checkFunc != nil {
|
||||||
mbu.checkFunc(bases, cs, tags, buildTreeWithBase)
|
mbu.checkFunc(backupReasons, bases, cs, tags, buildTreeWithBase)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &kopia.BackupStats{}, &details.Builder{}, nil, nil
|
return &kopia.BackupStats{}, &details.Builder{}, nil, nil
|
||||||
@ -388,31 +390,25 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_PersistResults() {
|
|||||||
|
|
||||||
func (suite *BackupOpUnitSuite) TestBackupOperation_ConsumeBackupDataCollections_Paths() {
|
func (suite *BackupOpUnitSuite) TestBackupOperation_ConsumeBackupDataCollections_Paths() {
|
||||||
var (
|
var (
|
||||||
|
t = suite.T()
|
||||||
|
|
||||||
tenant = "a-tenant"
|
tenant = "a-tenant"
|
||||||
resourceOwner = "a-user"
|
resourceOwner = "a-user"
|
||||||
|
|
||||||
emailBuilder = path.Builder{}.Append(
|
emailReason = kopia.NewReason(
|
||||||
tenant,
|
tenant,
|
||||||
path.ExchangeService.String(),
|
|
||||||
resourceOwner,
|
resourceOwner,
|
||||||
path.EmailCategory.String(),
|
path.ExchangeService,
|
||||||
)
|
path.EmailCategory)
|
||||||
contactsBuilder = path.Builder{}.Append(
|
contactsReason = kopia.NewReason(
|
||||||
tenant,
|
tenant,
|
||||||
path.ExchangeService.String(),
|
|
||||||
resourceOwner,
|
resourceOwner,
|
||||||
path.ContactsCategory.String(),
|
path.ExchangeService,
|
||||||
)
|
path.ContactsCategory)
|
||||||
|
|
||||||
emailReason = kopia.Reason{
|
reasons = []kopia.Reasoner{
|
||||||
ResourceOwner: resourceOwner,
|
emailReason,
|
||||||
Service: path.ExchangeService,
|
contactsReason,
|
||||||
Category: path.EmailCategory,
|
|
||||||
}
|
|
||||||
contactsReason = kopia.Reason{
|
|
||||||
ResourceOwner: resourceOwner,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.ContactsCategory,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
manifest1 = &snapshot.Manifest{
|
manifest1 = &snapshot.Manifest{
|
||||||
@ -421,147 +417,57 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_ConsumeBackupDataCollections
|
|||||||
manifest2 = &snapshot.Manifest{
|
manifest2 = &snapshot.Manifest{
|
||||||
ID: "id2",
|
ID: "id2",
|
||||||
}
|
}
|
||||||
)
|
|
||||||
|
|
||||||
table := []struct {
|
bases = kopia.NewMockBackupBases().WithMergeBases(
|
||||||
name string
|
|
||||||
// Backup model is untouched in this test so there's no need to populate it.
|
|
||||||
input kopia.BackupBases
|
|
||||||
expected []kopia.IncrementalBase
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "SingleManifestSingleReason",
|
|
||||||
input: kopia.NewMockBackupBases().WithMergeBases(
|
|
||||||
kopia.ManifestEntry{
|
kopia.ManifestEntry{
|
||||||
Manifest: manifest1,
|
Manifest: manifest1,
|
||||||
Reasons: []kopia.Reason{
|
Reasons: []kopia.Reasoner{
|
||||||
emailReason,
|
|
||||||
},
|
|
||||||
}).ClearMockAssistBases(),
|
|
||||||
expected: []kopia.IncrementalBase{
|
|
||||||
{
|
|
||||||
Manifest: manifest1,
|
|
||||||
SubtreePaths: []*path.Builder{
|
|
||||||
emailBuilder,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "SingleManifestMultipleReasons",
|
|
||||||
input: kopia.NewMockBackupBases().WithMergeBases(
|
|
||||||
kopia.ManifestEntry{
|
|
||||||
Manifest: manifest1,
|
|
||||||
Reasons: []kopia.Reason{
|
|
||||||
emailReason,
|
|
||||||
contactsReason,
|
|
||||||
},
|
|
||||||
}).ClearMockAssistBases(),
|
|
||||||
expected: []kopia.IncrementalBase{
|
|
||||||
{
|
|
||||||
Manifest: manifest1,
|
|
||||||
SubtreePaths: []*path.Builder{
|
|
||||||
emailBuilder,
|
|
||||||
contactsBuilder,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "MultipleManifestsMultipleReasons",
|
|
||||||
input: kopia.NewMockBackupBases().WithMergeBases(
|
|
||||||
kopia.ManifestEntry{
|
|
||||||
Manifest: manifest1,
|
|
||||||
Reasons: []kopia.Reason{
|
|
||||||
emailReason,
|
|
||||||
contactsReason,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
kopia.ManifestEntry{
|
|
||||||
Manifest: manifest2,
|
|
||||||
Reasons: []kopia.Reason{
|
|
||||||
emailReason,
|
|
||||||
contactsReason,
|
|
||||||
},
|
|
||||||
}).ClearMockAssistBases(),
|
|
||||||
expected: []kopia.IncrementalBase{
|
|
||||||
{
|
|
||||||
Manifest: manifest1,
|
|
||||||
SubtreePaths: []*path.Builder{
|
|
||||||
emailBuilder,
|
|
||||||
contactsBuilder,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Manifest: manifest2,
|
|
||||||
SubtreePaths: []*path.Builder{
|
|
||||||
emailBuilder,
|
|
||||||
contactsBuilder,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Single Manifest Single Reason With Assist Base",
|
|
||||||
input: kopia.NewMockBackupBases().WithMergeBases(
|
|
||||||
kopia.ManifestEntry{
|
|
||||||
Manifest: manifest1,
|
|
||||||
Reasons: []kopia.Reason{
|
|
||||||
emailReason,
|
emailReason,
|
||||||
},
|
},
|
||||||
}).WithAssistBases(
|
}).WithAssistBases(
|
||||||
kopia.ManifestEntry{
|
kopia.ManifestEntry{
|
||||||
Manifest: manifest2,
|
Manifest: manifest2,
|
||||||
Reasons: []kopia.Reason{
|
Reasons: []kopia.Reasoner{
|
||||||
contactsReason,
|
contactsReason,
|
||||||
},
|
},
|
||||||
}),
|
})
|
||||||
expected: []kopia.IncrementalBase{
|
|
||||||
{
|
backupID = model.StableID("foo")
|
||||||
Manifest: manifest1,
|
expectedTags = map[string]string{
|
||||||
SubtreePaths: []*path.Builder{
|
kopia.TagBackupID: string(backupID),
|
||||||
emailBuilder,
|
kopia.TagBackupCategory: "",
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Manifest: manifest2,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
)
|
||||||
for _, test := range table {
|
|
||||||
suite.Run(test.name, func() {
|
|
||||||
t := suite.T()
|
|
||||||
|
|
||||||
ctx, flush := tester.NewContext(t)
|
|
||||||
defer flush()
|
|
||||||
|
|
||||||
mbu := &mockBackupConsumer{
|
mbu := &mockBackupConsumer{
|
||||||
checkFunc: func(
|
checkFunc: func(
|
||||||
bases []kopia.IncrementalBase,
|
backupReasons []kopia.Reasoner,
|
||||||
|
gotBases kopia.BackupBases,
|
||||||
cs []data.BackupCollection,
|
cs []data.BackupCollection,
|
||||||
tags map[string]string,
|
gotTags map[string]string,
|
||||||
buildTreeWithBase bool,
|
buildTreeWithBase bool,
|
||||||
) {
|
) {
|
||||||
assert.ElementsMatch(t, test.expected, bases)
|
kopia.AssertBackupBasesEqual(t, bases, gotBases)
|
||||||
|
assert.Equal(t, expectedTags, gotTags)
|
||||||
|
assert.ElementsMatch(t, reasons, backupReasons)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx, flush := tester.NewContext(t)
|
||||||
|
defer flush()
|
||||||
|
|
||||||
//nolint:errcheck
|
//nolint:errcheck
|
||||||
consumeBackupCollections(
|
consumeBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
mbu,
|
mbu,
|
||||||
tenant,
|
tenant,
|
||||||
nil,
|
reasons,
|
||||||
test.input,
|
bases,
|
||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
model.StableID(""),
|
backupID,
|
||||||
true,
|
true,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems() {
|
func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems() {
|
||||||
@ -629,16 +535,16 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
|||||||
DetailsID: "did2",
|
DetailsID: "did2",
|
||||||
}
|
}
|
||||||
|
|
||||||
pathReason1 = kopia.Reason{
|
pathReason1 = kopia.NewReason(
|
||||||
ResourceOwner: itemPath1.ResourceOwner(),
|
"",
|
||||||
Service: itemPath1.Service(),
|
itemPath1.ResourceOwner(),
|
||||||
Category: itemPath1.Category(),
|
itemPath1.Service(),
|
||||||
}
|
itemPath1.Category())
|
||||||
pathReason3 = kopia.Reason{
|
pathReason3 = kopia.NewReason(
|
||||||
ResourceOwner: itemPath3.ResourceOwner(),
|
"",
|
||||||
Service: itemPath3.Service(),
|
itemPath3.ResourceOwner(),
|
||||||
Category: itemPath3.Category(),
|
itemPath3.Service(),
|
||||||
}
|
itemPath3.Category())
|
||||||
)
|
)
|
||||||
|
|
||||||
itemParents1, err := path.GetDriveFolderPath(itemPath1)
|
itemParents1, err := path.GetDriveFolderPath(itemPath1)
|
||||||
@ -684,7 +590,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
|||||||
},
|
},
|
||||||
DetailsID: "foo",
|
DetailsID: "foo",
|
||||||
},
|
},
|
||||||
Reasons: []kopia.Reason{
|
Reasons: []kopia.Reasoner{
|
||||||
pathReason1,
|
pathReason1,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -703,7 +609,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
|||||||
inputBackups: []kopia.BackupEntry{
|
inputBackups: []kopia.BackupEntry{
|
||||||
{
|
{
|
||||||
Backup: &backup1,
|
Backup: &backup1,
|
||||||
Reasons: []kopia.Reason{
|
Reasons: []kopia.Reasoner{
|
||||||
pathReason1,
|
pathReason1,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -730,13 +636,13 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
|||||||
inputBackups: []kopia.BackupEntry{
|
inputBackups: []kopia.BackupEntry{
|
||||||
{
|
{
|
||||||
Backup: &backup1,
|
Backup: &backup1,
|
||||||
Reasons: []kopia.Reason{
|
Reasons: []kopia.Reasoner{
|
||||||
pathReason1,
|
pathReason1,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Backup: &backup1,
|
Backup: &backup1,
|
||||||
Reasons: []kopia.Reason{
|
Reasons: []kopia.Reasoner{
|
||||||
pathReason1,
|
pathReason1,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -763,7 +669,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
|||||||
inputBackups: []kopia.BackupEntry{
|
inputBackups: []kopia.BackupEntry{
|
||||||
{
|
{
|
||||||
Backup: &backup1,
|
Backup: &backup1,
|
||||||
Reasons: []kopia.Reason{
|
Reasons: []kopia.Reasoner{
|
||||||
pathReason1,
|
pathReason1,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -822,7 +728,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
|||||||
inputBackups: []kopia.BackupEntry{
|
inputBackups: []kopia.BackupEntry{
|
||||||
{
|
{
|
||||||
Backup: &backup1,
|
Backup: &backup1,
|
||||||
Reasons: []kopia.Reason{
|
Reasons: []kopia.Reasoner{
|
||||||
pathReason1,
|
pathReason1,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -849,7 +755,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
|||||||
inputBackups: []kopia.BackupEntry{
|
inputBackups: []kopia.BackupEntry{
|
||||||
{
|
{
|
||||||
Backup: &backup1,
|
Backup: &backup1,
|
||||||
Reasons: []kopia.Reason{
|
Reasons: []kopia.Reasoner{
|
||||||
pathReason1,
|
pathReason1,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -879,7 +785,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
|||||||
inputBackups: []kopia.BackupEntry{
|
inputBackups: []kopia.BackupEntry{
|
||||||
{
|
{
|
||||||
Backup: &backup1,
|
Backup: &backup1,
|
||||||
Reasons: []kopia.Reason{
|
Reasons: []kopia.Reasoner{
|
||||||
pathReason1,
|
pathReason1,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -909,7 +815,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
|||||||
inputBackups: []kopia.BackupEntry{
|
inputBackups: []kopia.BackupEntry{
|
||||||
{
|
{
|
||||||
Backup: &backup1,
|
Backup: &backup1,
|
||||||
Reasons: []kopia.Reason{
|
Reasons: []kopia.Reasoner{
|
||||||
pathReason1,
|
pathReason1,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -940,7 +846,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
|||||||
inputBackups: []kopia.BackupEntry{
|
inputBackups: []kopia.BackupEntry{
|
||||||
{
|
{
|
||||||
Backup: &backup1,
|
Backup: &backup1,
|
||||||
Reasons: []kopia.Reason{
|
Reasons: []kopia.Reasoner{
|
||||||
pathReason1,
|
pathReason1,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -971,13 +877,13 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
|||||||
inputBackups: []kopia.BackupEntry{
|
inputBackups: []kopia.BackupEntry{
|
||||||
{
|
{
|
||||||
Backup: &backup1,
|
Backup: &backup1,
|
||||||
Reasons: []kopia.Reason{
|
Reasons: []kopia.Reasoner{
|
||||||
pathReason1,
|
pathReason1,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Backup: &backup2,
|
Backup: &backup2,
|
||||||
Reasons: []kopia.Reason{
|
Reasons: []kopia.Reasoner{
|
||||||
pathReason3,
|
pathReason3,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1064,11 +970,11 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
|
|||||||
|
|
||||||
locPath1 = path.Builder{}.Append(itemPath1.Folders()...)
|
locPath1 = path.Builder{}.Append(itemPath1.Folders()...)
|
||||||
|
|
||||||
pathReason1 = kopia.Reason{
|
pathReason1 = kopia.NewReason(
|
||||||
ResourceOwner: itemPath1.ResourceOwner(),
|
"",
|
||||||
Service: itemPath1.Service(),
|
itemPath1.ResourceOwner(),
|
||||||
Category: itemPath1.Category(),
|
itemPath1.Service(),
|
||||||
}
|
itemPath1.Category())
|
||||||
|
|
||||||
backup1 = kopia.BackupEntry{
|
backup1 = kopia.BackupEntry{
|
||||||
Backup: &backup.Backup{
|
Backup: &backup.Backup{
|
||||||
@ -1077,7 +983,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
|
|||||||
},
|
},
|
||||||
DetailsID: "did1",
|
DetailsID: "did1",
|
||||||
},
|
},
|
||||||
Reasons: []kopia.Reason{
|
Reasons: []kopia.Reasoner{
|
||||||
pathReason1,
|
pathReason1,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -1231,7 +1137,7 @@ func (suite *BackupOpIntegrationSuite) SetupSuite() {
|
|||||||
creds, err := a.M365Config()
|
creds, err := a.M365Config()
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
suite.ac, err = api.NewClient(creds)
|
suite.ac, err = api.NewClient(creds, control.Defaults())
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -23,7 +23,7 @@ func produceManifestsAndMetadata(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
bf inject.BaseFinder,
|
bf inject.BaseFinder,
|
||||||
rp inject.RestoreProducer,
|
rp inject.RestoreProducer,
|
||||||
reasons, fallbackReasons []kopia.Reason,
|
reasons, fallbackReasons []kopia.Reasoner,
|
||||||
tenantID string,
|
tenantID string,
|
||||||
getMetadata bool,
|
getMetadata bool,
|
||||||
) (kopia.BackupBases, []data.RestoreCollection, bool, error) {
|
) (kopia.BackupBases, []data.RestoreCollection, bool, error) {
|
||||||
@ -47,8 +47,8 @@ func produceManifestsAndMetadata(
|
|||||||
bb = bb.MergeBackupBases(
|
bb = bb.MergeBackupBases(
|
||||||
ctx,
|
ctx,
|
||||||
fbb,
|
fbb,
|
||||||
func(r kopia.Reason) string {
|
func(r kopia.Reasoner) string {
|
||||||
return r.Service.String() + r.Category.String()
|
return r.Service().String() + r.Category().String()
|
||||||
})
|
})
|
||||||
|
|
||||||
if !getMetadata {
|
if !getMetadata {
|
||||||
@ -115,9 +115,9 @@ func collectMetadata(
|
|||||||
Append(fn).
|
Append(fn).
|
||||||
ToServiceCategoryMetadataPath(
|
ToServiceCategoryMetadataPath(
|
||||||
tenantID,
|
tenantID,
|
||||||
reason.ResourceOwner,
|
reason.ProtectedResource(),
|
||||||
reason.Service,
|
reason.Service(),
|
||||||
reason.Category,
|
reason.Category(),
|
||||||
true)
|
true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.
|
return nil, clues.
|
||||||
|
|||||||
@ -47,7 +47,7 @@ type mockBackupFinder struct {
|
|||||||
|
|
||||||
func (bf *mockBackupFinder) FindBases(
|
func (bf *mockBackupFinder) FindBases(
|
||||||
_ context.Context,
|
_ context.Context,
|
||||||
reasons []kopia.Reason,
|
reasons []kopia.Reasoner,
|
||||||
_ map[string]string,
|
_ map[string]string,
|
||||||
) kopia.BackupBases {
|
) kopia.BackupBases {
|
||||||
if len(reasons) == 0 {
|
if len(reasons) == 0 {
|
||||||
@ -58,7 +58,7 @@ func (bf *mockBackupFinder) FindBases(
|
|||||||
return kopia.NewMockBackupBases()
|
return kopia.NewMockBackupBases()
|
||||||
}
|
}
|
||||||
|
|
||||||
b := bf.data[reasons[0].ResourceOwner]
|
b := bf.data[reasons[0].ProtectedResource()]
|
||||||
if b == nil {
|
if b == nil {
|
||||||
return kopia.NewMockBackupBases()
|
return kopia.NewMockBackupBases()
|
||||||
}
|
}
|
||||||
@ -102,7 +102,7 @@ func (suite *OperationsManifestsUnitSuite) TestCollectMetadata() {
|
|||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
manID string
|
manID string
|
||||||
reasons []kopia.Reason
|
reasons []kopia.Reasoner
|
||||||
fileNames []string
|
fileNames []string
|
||||||
expectPaths func(*testing.T, []string) []path.Path
|
expectPaths func(*testing.T, []string) []path.Path
|
||||||
expectErr error
|
expectErr error
|
||||||
@ -110,12 +110,8 @@ func (suite *OperationsManifestsUnitSuite) TestCollectMetadata() {
|
|||||||
{
|
{
|
||||||
name: "single reason, single file",
|
name: "single reason, single file",
|
||||||
manID: "single single",
|
manID: "single single",
|
||||||
reasons: []kopia.Reason{
|
reasons: []kopia.Reasoner{
|
||||||
{
|
kopia.NewReason(tid, ro, path.ExchangeService, path.EmailCategory),
|
||||||
ResourceOwner: ro,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EmailCategory,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
expectPaths: func(t *testing.T, files []string) []path.Path {
|
expectPaths: func(t *testing.T, files []string) []path.Path {
|
||||||
ps := make([]path.Path, 0, len(files))
|
ps := make([]path.Path, 0, len(files))
|
||||||
@ -133,12 +129,8 @@ func (suite *OperationsManifestsUnitSuite) TestCollectMetadata() {
|
|||||||
{
|
{
|
||||||
name: "single reason, multiple files",
|
name: "single reason, multiple files",
|
||||||
manID: "single multi",
|
manID: "single multi",
|
||||||
reasons: []kopia.Reason{
|
reasons: []kopia.Reasoner{
|
||||||
{
|
kopia.NewReason(tid, ro, path.ExchangeService, path.EmailCategory),
|
||||||
ResourceOwner: ro,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EmailCategory,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
expectPaths: func(t *testing.T, files []string) []path.Path {
|
expectPaths: func(t *testing.T, files []string) []path.Path {
|
||||||
ps := make([]path.Path, 0, len(files))
|
ps := make([]path.Path, 0, len(files))
|
||||||
@ -156,17 +148,9 @@ func (suite *OperationsManifestsUnitSuite) TestCollectMetadata() {
|
|||||||
{
|
{
|
||||||
name: "multiple reasons, single file",
|
name: "multiple reasons, single file",
|
||||||
manID: "multi single",
|
manID: "multi single",
|
||||||
reasons: []kopia.Reason{
|
reasons: []kopia.Reasoner{
|
||||||
{
|
kopia.NewReason(tid, ro, path.ExchangeService, path.EmailCategory),
|
||||||
ResourceOwner: ro,
|
kopia.NewReason(tid, ro, path.ExchangeService, path.ContactsCategory),
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EmailCategory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ResourceOwner: ro,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.ContactsCategory,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
expectPaths: func(t *testing.T, files []string) []path.Path {
|
expectPaths: func(t *testing.T, files []string) []path.Path {
|
||||||
ps := make([]path.Path, 0, len(files))
|
ps := make([]path.Path, 0, len(files))
|
||||||
@ -187,17 +171,9 @@ func (suite *OperationsManifestsUnitSuite) TestCollectMetadata() {
|
|||||||
{
|
{
|
||||||
name: "multiple reasons, multiple file",
|
name: "multiple reasons, multiple file",
|
||||||
manID: "multi multi",
|
manID: "multi multi",
|
||||||
reasons: []kopia.Reason{
|
reasons: []kopia.Reasoner{
|
||||||
{
|
kopia.NewReason(tid, ro, path.ExchangeService, path.EmailCategory),
|
||||||
ResourceOwner: ro,
|
kopia.NewReason(tid, ro, path.ExchangeService, path.ContactsCategory),
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EmailCategory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ResourceOwner: ro,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.ContactsCategory,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
expectPaths: func(t *testing.T, files []string) []path.Path {
|
expectPaths: func(t *testing.T, files []string) []path.Path {
|
||||||
ps := make([]path.Path, 0, len(files))
|
ps := make([]path.Path, 0, len(files))
|
||||||
@ -243,17 +219,13 @@ func buildReasons(
|
|||||||
ro string,
|
ro string,
|
||||||
service path.ServiceType,
|
service path.ServiceType,
|
||||||
cats ...path.CategoryType,
|
cats ...path.CategoryType,
|
||||||
) []kopia.Reason {
|
) []kopia.Reasoner {
|
||||||
var reasons []kopia.Reason
|
var reasons []kopia.Reasoner
|
||||||
|
|
||||||
for _, cat := range cats {
|
for _, cat := range cats {
|
||||||
reasons = append(
|
reasons = append(
|
||||||
reasons,
|
reasons,
|
||||||
kopia.Reason{
|
kopia.NewReason("", ro, service, cat))
|
||||||
ResourceOwner: ro,
|
|
||||||
Service: service,
|
|
||||||
Category: cat,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return reasons
|
return reasons
|
||||||
@ -280,7 +252,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
|
|||||||
name string
|
name string
|
||||||
bf *mockBackupFinder
|
bf *mockBackupFinder
|
||||||
rp mockRestoreProducer
|
rp mockRestoreProducer
|
||||||
reasons []kopia.Reason
|
reasons []kopia.Reasoner
|
||||||
getMeta bool
|
getMeta bool
|
||||||
assertErr assert.ErrorAssertionFunc
|
assertErr assert.ErrorAssertionFunc
|
||||||
assertB assert.BoolAssertionFunc
|
assertB assert.BoolAssertionFunc
|
||||||
@ -291,7 +263,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
|
|||||||
{
|
{
|
||||||
name: "don't get metadata, no mans",
|
name: "don't get metadata, no mans",
|
||||||
rp: mockRestoreProducer{},
|
rp: mockRestoreProducer{},
|
||||||
reasons: []kopia.Reason{},
|
reasons: []kopia.Reasoner{},
|
||||||
getMeta: false,
|
getMeta: false,
|
||||||
assertErr: assert.NoError,
|
assertErr: assert.NoError,
|
||||||
assertB: assert.False,
|
assertB: assert.False,
|
||||||
@ -308,12 +280,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
rp: mockRestoreProducer{},
|
rp: mockRestoreProducer{},
|
||||||
reasons: []kopia.Reason{
|
reasons: []kopia.Reasoner{
|
||||||
{
|
kopia.NewReason("", ro, path.ExchangeService, path.EmailCategory),
|
||||||
ResourceOwner: ro,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EmailCategory,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
getMeta: false,
|
getMeta: false,
|
||||||
assertErr: assert.NoError,
|
assertErr: assert.NoError,
|
||||||
@ -333,12 +301,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
rp: mockRestoreProducer{},
|
rp: mockRestoreProducer{},
|
||||||
reasons: []kopia.Reason{
|
reasons: []kopia.Reasoner{
|
||||||
{
|
kopia.NewReason("", ro, path.ExchangeService, path.EmailCategory),
|
||||||
ResourceOwner: ro,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EmailCategory,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
getMeta: true,
|
getMeta: true,
|
||||||
assertErr: assert.NoError,
|
assertErr: assert.NoError,
|
||||||
@ -365,17 +329,9 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
|
|||||||
"id1": {data.NoFetchRestoreCollection{Collection: mockColl{id: "id1"}}},
|
"id1": {data.NoFetchRestoreCollection{Collection: mockColl{id: "id1"}}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
reasons: []kopia.Reason{
|
reasons: []kopia.Reasoner{
|
||||||
{
|
kopia.NewReason("", ro, path.ExchangeService, path.EmailCategory),
|
||||||
ResourceOwner: ro,
|
kopia.NewReason("", ro, path.ExchangeService, path.ContactsCategory),
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EmailCategory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ResourceOwner: ro,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.ContactsCategory,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
getMeta: true,
|
getMeta: true,
|
||||||
assertErr: assert.NoError,
|
assertErr: assert.NoError,
|
||||||
@ -421,12 +377,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
|
|||||||
"id2": {data.NoFetchRestoreCollection{Collection: mockColl{id: "id2"}}},
|
"id2": {data.NoFetchRestoreCollection{Collection: mockColl{id: "id2"}}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
reasons: []kopia.Reason{
|
reasons: []kopia.Reasoner{
|
||||||
{
|
kopia.NewReason("", ro, path.ExchangeService, path.EmailCategory),
|
||||||
ResourceOwner: ro,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EmailCategory,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
getMeta: true,
|
getMeta: true,
|
||||||
assertErr: assert.NoError,
|
assertErr: assert.NoError,
|
||||||
@ -454,12 +406,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
|
|||||||
"id2": {data.NoFetchRestoreCollection{Collection: mockColl{id: "id2"}}},
|
"id2": {data.NoFetchRestoreCollection{Collection: mockColl{id: "id2"}}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
reasons: []kopia.Reason{
|
reasons: []kopia.Reasoner{
|
||||||
{
|
kopia.NewReason("", ro, path.ExchangeService, path.EmailCategory),
|
||||||
ResourceOwner: ro,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EmailCategory,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
getMeta: true,
|
getMeta: true,
|
||||||
assertErr: assert.NoError,
|
assertErr: assert.NoError,
|
||||||
@ -480,12 +428,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
rp: mockRestoreProducer{err: assert.AnError},
|
rp: mockRestoreProducer{err: assert.AnError},
|
||||||
reasons: []kopia.Reason{
|
reasons: []kopia.Reasoner{
|
||||||
{
|
kopia.NewReason("", ro, path.ExchangeService, path.EmailCategory),
|
||||||
ResourceOwner: ro,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.EmailCategory,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
getMeta: true,
|
getMeta: true,
|
||||||
assertErr: assert.Error,
|
assertErr: assert.Error,
|
||||||
@ -588,24 +532,24 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
emailReason := kopia.Reason{
|
emailReason := kopia.NewReason(
|
||||||
ResourceOwner: ro,
|
"",
|
||||||
Service: path.ExchangeService,
|
ro,
|
||||||
Category: path.EmailCategory,
|
path.ExchangeService,
|
||||||
}
|
path.EmailCategory)
|
||||||
|
|
||||||
fbEmailReason := kopia.Reason{
|
fbEmailReason := kopia.NewReason(
|
||||||
ResourceOwner: fbro,
|
"",
|
||||||
Service: path.ExchangeService,
|
fbro,
|
||||||
Category: path.EmailCategory,
|
path.ExchangeService,
|
||||||
}
|
path.EmailCategory)
|
||||||
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
bf *mockBackupFinder
|
bf *mockBackupFinder
|
||||||
rp mockRestoreProducer
|
rp mockRestoreProducer
|
||||||
reasons []kopia.Reason
|
reasons []kopia.Reasoner
|
||||||
fallbackReasons []kopia.Reason
|
fallbackReasons []kopia.Reasoner
|
||||||
getMeta bool
|
getMeta bool
|
||||||
assertErr assert.ErrorAssertionFunc
|
assertErr assert.ErrorAssertionFunc
|
||||||
assertB assert.BoolAssertionFunc
|
assertB assert.BoolAssertionFunc
|
||||||
@ -624,7 +568,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
rp: mockRestoreProducer{},
|
rp: mockRestoreProducer{},
|
||||||
fallbackReasons: []kopia.Reason{fbEmailReason},
|
fallbackReasons: []kopia.Reasoner{fbEmailReason},
|
||||||
getMeta: false,
|
getMeta: false,
|
||||||
assertErr: assert.NoError,
|
assertErr: assert.NoError,
|
||||||
assertB: assert.False,
|
assertB: assert.False,
|
||||||
@ -649,7 +593,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb
|
|||||||
"fb_id1": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id1"}}},
|
"fb_id1": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id1"}}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
fallbackReasons: []kopia.Reason{fbEmailReason},
|
fallbackReasons: []kopia.Reasoner{fbEmailReason},
|
||||||
getMeta: true,
|
getMeta: true,
|
||||||
assertErr: assert.NoError,
|
assertErr: assert.NoError,
|
||||||
assertB: assert.True,
|
assertB: assert.True,
|
||||||
@ -680,8 +624,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb
|
|||||||
"fb_id1": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id1"}}},
|
"fb_id1": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id1"}}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
reasons: []kopia.Reason{emailReason},
|
reasons: []kopia.Reasoner{emailReason},
|
||||||
fallbackReasons: []kopia.Reason{fbEmailReason},
|
fallbackReasons: []kopia.Reasoner{fbEmailReason},
|
||||||
getMeta: true,
|
getMeta: true,
|
||||||
assertErr: assert.NoError,
|
assertErr: assert.NoError,
|
||||||
assertB: assert.True,
|
assertB: assert.True,
|
||||||
@ -708,8 +652,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb
|
|||||||
"fb_id2": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id2"}}},
|
"fb_id2": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id2"}}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
reasons: []kopia.Reason{emailReason},
|
reasons: []kopia.Reasoner{emailReason},
|
||||||
fallbackReasons: []kopia.Reason{fbEmailReason},
|
fallbackReasons: []kopia.Reasoner{fbEmailReason},
|
||||||
getMeta: true,
|
getMeta: true,
|
||||||
assertErr: assert.NoError,
|
assertErr: assert.NoError,
|
||||||
assertB: assert.True,
|
assertB: assert.True,
|
||||||
@ -744,8 +688,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb
|
|||||||
"fb_id2": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id2"}}},
|
"fb_id2": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id2"}}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
reasons: []kopia.Reason{emailReason},
|
reasons: []kopia.Reasoner{emailReason},
|
||||||
fallbackReasons: []kopia.Reason{fbEmailReason},
|
fallbackReasons: []kopia.Reasoner{fbEmailReason},
|
||||||
getMeta: true,
|
getMeta: true,
|
||||||
assertErr: assert.NoError,
|
assertErr: assert.NoError,
|
||||||
assertB: assert.True,
|
assertB: assert.True,
|
||||||
@ -776,8 +720,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb
|
|||||||
"fb_id1": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id1"}}},
|
"fb_id1": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id1"}}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
reasons: []kopia.Reason{emailReason},
|
reasons: []kopia.Reasoner{emailReason},
|
||||||
fallbackReasons: []kopia.Reason{fbEmailReason},
|
fallbackReasons: []kopia.Reasoner{fbEmailReason},
|
||||||
getMeta: true,
|
getMeta: true,
|
||||||
assertErr: assert.NoError,
|
assertErr: assert.NoError,
|
||||||
assertB: assert.True,
|
assertB: assert.True,
|
||||||
@ -808,8 +752,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb
|
|||||||
"fb_id2": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id2"}}},
|
"fb_id2": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id2"}}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
reasons: []kopia.Reason{emailReason},
|
reasons: []kopia.Reasoner{emailReason},
|
||||||
fallbackReasons: []kopia.Reason{fbEmailReason},
|
fallbackReasons: []kopia.Reasoner{fbEmailReason},
|
||||||
getMeta: true,
|
getMeta: true,
|
||||||
assertErr: assert.NoError,
|
assertErr: assert.NoError,
|
||||||
assertB: assert.True,
|
assertB: assert.True,
|
||||||
@ -838,21 +782,13 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb
|
|||||||
"fb_id1": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id1"}}},
|
"fb_id1": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id1"}}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
reasons: []kopia.Reason{
|
reasons: []kopia.Reasoner{
|
||||||
emailReason,
|
emailReason,
|
||||||
{
|
kopia.NewReason("", ro, path.ExchangeService, path.ContactsCategory),
|
||||||
ResourceOwner: ro,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.ContactsCategory,
|
|
||||||
},
|
},
|
||||||
},
|
fallbackReasons: []kopia.Reasoner{
|
||||||
fallbackReasons: []kopia.Reason{
|
|
||||||
fbEmailReason,
|
fbEmailReason,
|
||||||
{
|
kopia.NewReason("", fbro, path.ExchangeService, path.ContactsCategory),
|
||||||
ResourceOwner: fbro,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.ContactsCategory,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
getMeta: true,
|
getMeta: true,
|
||||||
assertErr: assert.NoError,
|
assertErr: assert.NoError,
|
||||||
@ -882,13 +818,9 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb
|
|||||||
"fb_id1": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id1"}}},
|
"fb_id1": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id1"}}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
reasons: []kopia.Reason{emailReason},
|
reasons: []kopia.Reasoner{emailReason},
|
||||||
fallbackReasons: []kopia.Reason{
|
fallbackReasons: []kopia.Reasoner{
|
||||||
{
|
kopia.NewReason("", fbro, path.ExchangeService, path.ContactsCategory),
|
||||||
ResourceOwner: fbro,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.ContactsCategory,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
getMeta: true,
|
getMeta: true,
|
||||||
assertErr: assert.NoError,
|
assertErr: assert.NoError,
|
||||||
@ -921,21 +853,13 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb
|
|||||||
"fb_id1": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id1"}}},
|
"fb_id1": {data.NoFetchRestoreCollection{Collection: mockColl{id: "fb_id1"}}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
reasons: []kopia.Reason{
|
reasons: []kopia.Reasoner{
|
||||||
emailReason,
|
emailReason,
|
||||||
{
|
kopia.NewReason("", ro, path.ExchangeService, path.ContactsCategory),
|
||||||
ResourceOwner: ro,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.ContactsCategory,
|
|
||||||
},
|
},
|
||||||
},
|
fallbackReasons: []kopia.Reasoner{
|
||||||
fallbackReasons: []kopia.Reason{
|
|
||||||
fbEmailReason,
|
fbEmailReason,
|
||||||
{
|
kopia.NewReason("", fbro, path.ExchangeService, path.ContactsCategory),
|
||||||
ResourceOwner: fbro,
|
|
||||||
Service: path.ExchangeService,
|
|
||||||
Category: path.ContactsCategory,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
getMeta: true,
|
getMeta: true,
|
||||||
assertErr: assert.NoError,
|
assertErr: assert.NoError,
|
||||||
|
|||||||
@ -278,7 +278,7 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
|
|||||||
creds, err := acct.M365Config()
|
creds, err := acct.M365Config()
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
ac, err := api.NewClient(creds)
|
ac, err := api.NewClient(creds, control.Defaults())
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
// generate 3 new folders with two items each.
|
// generate 3 new folders with two items each.
|
||||||
|
|||||||
@ -242,13 +242,7 @@ func checkBackupIsInManifests(
|
|||||||
for _, category := range categories {
|
for _, category := range categories {
|
||||||
t.Run(category.String(), func(t *testing.T) {
|
t.Run(category.String(), func(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
reasons = []kopia.Reason{
|
r = kopia.NewReason("", resourceOwner, sel.PathService(), category)
|
||||||
{
|
|
||||||
ResourceOwner: resourceOwner,
|
|
||||||
Service: sel.PathService(),
|
|
||||||
Category: category,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
tags = map[string]string{kopia.TagBackupCategory: ""}
|
tags = map[string]string{kopia.TagBackupCategory: ""}
|
||||||
found bool
|
found bool
|
||||||
)
|
)
|
||||||
@ -256,7 +250,7 @@ func checkBackupIsInManifests(
|
|||||||
bf, err := kw.NewBaseFinder(sw)
|
bf, err := kw.NewBaseFinder(sw)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
mans := bf.FindBases(ctx, reasons, tags)
|
mans := bf.FindBases(ctx, []kopia.Reasoner{r}, tags)
|
||||||
for _, man := range mans.MergeBases() {
|
for _, man := range mans.MergeBases() {
|
||||||
bID, ok := man.GetTag(kopia.TagBackupID)
|
bID, ok := man.GetTag(kopia.TagBackupID)
|
||||||
if !assert.Truef(t, ok, "snapshot manifest %s missing backup ID tag", man.ID) {
|
if !assert.Truef(t, ok, "snapshot manifest %s missing backup ID tag", man.ID) {
|
||||||
@ -591,7 +585,7 @@ func newIntegrationTesterSetup(t *testing.T) intgTesterSetup {
|
|||||||
creds, err := a.M365Config()
|
creds, err := a.M365Config()
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
its.ac, err = api.NewClient(creds)
|
its.ac, err = api.NewClient(creds, control.Defaults())
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
its.gockAC, err = mock.NewClient(creds)
|
its.gockAC, err = mock.NewClient(creds)
|
||||||
|
|||||||
@ -234,6 +234,7 @@ func write(
|
|||||||
backupStats, _, _, err := bup.ConsumeBackupCollections(
|
backupStats, _, _, err := bup.ConsumeBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
nil,
|
nil,
|
||||||
|
nil,
|
||||||
dbcs,
|
dbcs,
|
||||||
prefixmatcher.NopReader[map[string]struct{}](),
|
prefixmatcher.NopReader[map[string]struct{}](),
|
||||||
nil,
|
nil,
|
||||||
|
|||||||
@ -112,7 +112,7 @@ func ReadTestConfig() (map[string]string, error) {
|
|||||||
testEnv := map[string]string{}
|
testEnv := map[string]string{}
|
||||||
fallbackTo(testEnv, TestCfgStorageProvider, vpr.GetString(TestCfgStorageProvider))
|
fallbackTo(testEnv, TestCfgStorageProvider, vpr.GetString(TestCfgStorageProvider))
|
||||||
fallbackTo(testEnv, TestCfgAccountProvider, vpr.GetString(TestCfgAccountProvider))
|
fallbackTo(testEnv, TestCfgAccountProvider, vpr.GetString(TestCfgAccountProvider))
|
||||||
fallbackTo(testEnv, TestCfgBucket, os.Getenv("S3_BUCKET"), vpr.GetString(TestCfgBucket), "test-corso-repo-init")
|
fallbackTo(testEnv, TestCfgBucket, os.Getenv("S3_BUCKET"), vpr.GetString(TestCfgBucket))
|
||||||
fallbackTo(testEnv, TestCfgEndpoint, vpr.GetString(TestCfgEndpoint), "s3.amazonaws.com")
|
fallbackTo(testEnv, TestCfgEndpoint, vpr.GetString(TestCfgEndpoint), "s3.amazonaws.com")
|
||||||
fallbackTo(testEnv, TestCfgPrefix, vpr.GetString(TestCfgPrefix))
|
fallbackTo(testEnv, TestCfgPrefix, vpr.GetString(TestCfgPrefix))
|
||||||
fallbackTo(testEnv, TestCfgAzureTenantID, os.Getenv(account.AzureTenantID), vpr.GetString(TestCfgAzureTenantID))
|
fallbackTo(testEnv, TestCfgAzureTenantID, os.Getenv(account.AzureTenantID), vpr.GetString(TestCfgAzureTenantID))
|
||||||
|
|||||||
@ -7,14 +7,17 @@ import (
|
|||||||
|
|
||||||
// Options holds the optional configurations for a process
|
// Options holds the optional configurations for a process
|
||||||
type Options struct {
|
type Options struct {
|
||||||
|
// DeltaPageSize controls the quantity of items fetched in each page
|
||||||
|
// during multi-page queries, such as graph api delta endpoints.
|
||||||
|
DeltaPageSize int32 `json:"deltaPageSize"`
|
||||||
DisableMetrics bool `json:"disableMetrics"`
|
DisableMetrics bool `json:"disableMetrics"`
|
||||||
FailureHandling FailurePolicy `json:"failureHandling"`
|
FailureHandling FailurePolicy `json:"failureHandling"`
|
||||||
|
ItemExtensionFactory []extensions.CreateItemExtensioner `json:"-"`
|
||||||
|
Parallelism Parallelism `json:"parallelism"`
|
||||||
|
Repo repository.Options `json:"repo"`
|
||||||
RestorePermissions bool `json:"restorePermissions"`
|
RestorePermissions bool `json:"restorePermissions"`
|
||||||
SkipReduce bool `json:"skipReduce"`
|
SkipReduce bool `json:"skipReduce"`
|
||||||
ToggleFeatures Toggles `json:"toggleFeatures"`
|
ToggleFeatures Toggles `json:"toggleFeatures"`
|
||||||
Parallelism Parallelism `json:"parallelism"`
|
|
||||||
Repo repository.Options `json:"repo"`
|
|
||||||
ItemExtensionFactory []extensions.CreateItemExtensioner `json:"-"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type Parallelism struct {
|
type Parallelism struct {
|
||||||
@ -39,6 +42,7 @@ const (
|
|||||||
func Defaults() Options {
|
func Defaults() Options {
|
||||||
return Options{
|
return Options{
|
||||||
FailureHandling: FailAfterRecovery,
|
FailureHandling: FailAfterRecovery,
|
||||||
|
DeltaPageSize: 500,
|
||||||
ToggleFeatures: Toggles{},
|
ToggleFeatures: Toggles{},
|
||||||
Parallelism: Parallelism{
|
Parallelism: Parallelism{
|
||||||
CollectionBuffer: 4,
|
CollectionBuffer: 4,
|
||||||
|
|||||||
@ -2,13 +2,17 @@ package control
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
"golang.org/x/exp/maps"
|
"golang.org/x/exp/maps"
|
||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -39,24 +43,24 @@ const RootLocation = "/"
|
|||||||
type RestoreConfig struct {
|
type RestoreConfig struct {
|
||||||
// Defines the per-item collision handling policy.
|
// Defines the per-item collision handling policy.
|
||||||
// Defaults to Skip.
|
// Defaults to Skip.
|
||||||
OnCollision CollisionPolicy
|
OnCollision CollisionPolicy `json:"onCollision"`
|
||||||
|
|
||||||
// ProtectedResource specifies which resource the data will be restored to.
|
// ProtectedResource specifies which resource the data will be restored to.
|
||||||
// If empty, restores to the same resource that was backed up.
|
// If empty, restores to the same resource that was backed up.
|
||||||
// Defaults to empty.
|
// Defaults to empty.
|
||||||
ProtectedResource string
|
ProtectedResource string `json:"protectedResource"`
|
||||||
|
|
||||||
// Location specifies the container into which the data will be restored.
|
// Location specifies the container into which the data will be restored.
|
||||||
// Only accepts container names, does not accept IDs.
|
// Only accepts container names, does not accept IDs.
|
||||||
// If empty or "/", data will get restored in place, beginning at the root.
|
// If empty or "/", data will get restored in place, beginning at the root.
|
||||||
// Defaults to "Corso_Restore_<current_dttm>"
|
// Defaults to "Corso_Restore_<current_dttm>"
|
||||||
Location string
|
Location string `json:"location"`
|
||||||
|
|
||||||
// Drive specifies the name of the drive into which the data will be
|
// Drive specifies the name of the drive into which the data will be
|
||||||
// restored. If empty, data is restored to the same drive that was backed
|
// restored. If empty, data is restored to the same drive that was backed
|
||||||
// up.
|
// up.
|
||||||
// Defaults to empty.
|
// Defaults to empty.
|
||||||
Drive string
|
Drive string `json:"drive"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func DefaultRestoreConfig(timeFormat dttm.TimeFormat) RestoreConfig {
|
func DefaultRestoreConfig(timeFormat dttm.TimeFormat) RestoreConfig {
|
||||||
@ -90,3 +94,58 @@ func EnsureRestoreConfigDefaults(
|
|||||||
|
|
||||||
return rc
|
return rc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// pii control
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
var (
|
||||||
|
// interface compliance required for handling PII
|
||||||
|
_ clues.Concealer = &RestoreConfig{}
|
||||||
|
_ fmt.Stringer = &RestoreConfig{}
|
||||||
|
|
||||||
|
// interface compliance for the observe package to display
|
||||||
|
// values without concealing PII.
|
||||||
|
_ clues.PlainStringer = &RestoreConfig{}
|
||||||
|
)
|
||||||
|
|
||||||
|
func (rc RestoreConfig) marshal() string {
|
||||||
|
bs, err := json.Marshal(rc)
|
||||||
|
if err != nil {
|
||||||
|
return "err marshalling"
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(bs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rc RestoreConfig) concealed() RestoreConfig {
|
||||||
|
return RestoreConfig{
|
||||||
|
OnCollision: rc.OnCollision,
|
||||||
|
ProtectedResource: clues.Hide(rc.ProtectedResource).Conceal(),
|
||||||
|
Location: path.LoggableDir(rc.Location),
|
||||||
|
Drive: clues.Hide(rc.Drive).Conceal(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Conceal produces a concealed representation of the config, suitable for
|
||||||
|
// logging, storing in errors, and other output.
|
||||||
|
func (rc RestoreConfig) Conceal() string {
|
||||||
|
return rc.concealed().marshal()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format produces a concealed representation of the config, even when
|
||||||
|
// used within a PrintF, suitable for logging, storing in errors,
|
||||||
|
// and other output.
|
||||||
|
func (rc RestoreConfig) Format(fs fmt.State, _ rune) {
|
||||||
|
fmt.Fprint(fs, rc.concealed())
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a plain text version of the restoreConfig.
|
||||||
|
func (rc RestoreConfig) String() string {
|
||||||
|
return rc.PlainString()
|
||||||
|
}
|
||||||
|
|
||||||
|
// PlainString returns an unescaped, unmodified string of the restore configuration.
|
||||||
|
func (rc RestoreConfig) PlainString() string {
|
||||||
|
return rc.marshal()
|
||||||
|
}
|
||||||
|
|||||||
@ -8,6 +8,7 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -36,11 +37,13 @@ type Client struct {
|
|||||||
// arbitrary urls instead of constructing queries using the
|
// arbitrary urls instead of constructing queries using the
|
||||||
// graph api client.
|
// graph api client.
|
||||||
Requester graph.Requester
|
Requester graph.Requester
|
||||||
|
|
||||||
|
options control.Options
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClient produces a new exchange api client. Must be used in
|
// NewClient produces a new exchange api client. Must be used in
|
||||||
// place of creating an ad-hoc client struct.
|
// place of creating an ad-hoc client struct.
|
||||||
func NewClient(creds account.M365Config) (Client, error) {
|
func NewClient(creds account.M365Config, co control.Options) (Client, error) {
|
||||||
s, err := NewService(creds)
|
s, err := NewService(creds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Client{}, err
|
return Client{}, err
|
||||||
@ -53,7 +56,11 @@ func NewClient(creds account.M365Config) (Client, error) {
|
|||||||
|
|
||||||
rqr := graph.NewNoTimeoutHTTPWrapper()
|
rqr := graph.NewNoTimeoutHTTPWrapper()
|
||||||
|
|
||||||
return Client{creds, s, li, rqr}, nil
|
if co.DeltaPageSize < 1 || co.DeltaPageSize > maxDeltaPageSize {
|
||||||
|
co.DeltaPageSize = maxDeltaPageSize
|
||||||
|
}
|
||||||
|
|
||||||
|
return Client{creds, s, li, rqr, co}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// initConcurrencyLimit ensures that the graph concurrency limiter is
|
// initConcurrencyLimit ensures that the graph concurrency limiter is
|
||||||
|
|||||||
@ -277,7 +277,7 @@ func (c Contacts) NewContactDeltaIDsPager(
|
|||||||
Select: idAnd(parentFolderID),
|
Select: idAnd(parentFolderID),
|
||||||
// do NOT set Top. It limits the total items received.
|
// do NOT set Top. It limits the total items received.
|
||||||
},
|
},
|
||||||
Headers: newPreferHeaders(preferPageSize(maxDeltaPageSize), preferImmutableIDs(immutableIDs)),
|
Headers: newPreferHeaders(preferPageSize(c.options.DeltaPageSize), preferImmutableIDs(immutableIDs)),
|
||||||
}
|
}
|
||||||
|
|
||||||
var builder *users.ItemContactFoldersItemContactsDeltaRequestBuilder
|
var builder *users.ItemContactFoldersItemContactsDeltaRequestBuilder
|
||||||
|
|||||||
@ -244,7 +244,7 @@ func (c Events) NewEventDeltaIDsPager(
|
|||||||
immutableIDs bool,
|
immutableIDs bool,
|
||||||
) (itemIDPager, error) {
|
) (itemIDPager, error) {
|
||||||
options := &users.ItemCalendarsItemEventsDeltaRequestBuilderGetRequestConfiguration{
|
options := &users.ItemCalendarsItemEventsDeltaRequestBuilderGetRequestConfiguration{
|
||||||
Headers: newPreferHeaders(preferPageSize(maxDeltaPageSize), preferImmutableIDs(immutableIDs)),
|
Headers: newPreferHeaders(preferPageSize(c.options.DeltaPageSize), preferImmutableIDs(immutableIDs)),
|
||||||
QueryParameters: &users.ItemCalendarsItemEventsDeltaRequestBuilderGetQueryParameters{
|
QueryParameters: &users.ItemCalendarsItemEventsDeltaRequestBuilderGetQueryParameters{
|
||||||
// do NOT set Top. It limits the total items received.
|
// do NOT set Top. It limits the total items received.
|
||||||
},
|
},
|
||||||
|
|||||||
@ -16,6 +16,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api/mock"
|
"github.com/alcionai/corso/src/pkg/services/m365/api/mock"
|
||||||
)
|
)
|
||||||
@ -97,7 +98,7 @@ func newIntegrationTesterSetup(t *testing.T) intgTesterSetup {
|
|||||||
creds, err := a.M365Config()
|
creds, err := a.M365Config()
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
its.ac, err = api.NewClient(creds)
|
its.ac, err = api.NewClient(creds, control.Defaults())
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
its.gockAC, err = mock.NewClient(creds)
|
its.gockAC, err = mock.NewClient(creds)
|
||||||
|
|||||||
@ -310,7 +310,7 @@ func (c Mail) NewMailDeltaIDsPager(
|
|||||||
Select: idAnd("isRead"),
|
Select: idAnd("isRead"),
|
||||||
// do NOT set Top. It limits the total items received.
|
// do NOT set Top. It limits the total items received.
|
||||||
},
|
},
|
||||||
Headers: newPreferHeaders(preferPageSize(maxDeltaPageSize), preferImmutableIDs(immutableIDs)),
|
Headers: newPreferHeaders(preferPageSize(c.options.DeltaPageSize), preferImmutableIDs(immutableIDs)),
|
||||||
}
|
}
|
||||||
|
|
||||||
var builder *users.ItemMailFoldersItemMessagesDeltaRequestBuilder
|
var builder *users.ItemMailFoldersItemMessagesDeltaRequestBuilder
|
||||||
|
|||||||
@ -225,13 +225,13 @@ func ValidateSite(item models.Siteable) error {
|
|||||||
|
|
||||||
wURL := ptr.Val(item.GetWebUrl())
|
wURL := ptr.Val(item.GetWebUrl())
|
||||||
if len(wURL) == 0 {
|
if len(wURL) == 0 {
|
||||||
return clues.New("missing webURL").With("site_id", id) // TODO: pii
|
return clues.New("missing webURL").With("site_id", clues.Hide(id))
|
||||||
}
|
}
|
||||||
|
|
||||||
// personal (ie: oneDrive) sites have to be filtered out server-side.
|
// personal (ie: oneDrive) sites have to be filtered out server-side.
|
||||||
if strings.Contains(wURL, PersonalSitePath) {
|
if strings.Contains(wURL, PersonalSitePath) {
|
||||||
return clues.Stack(ErrKnownSkippableCase).
|
return clues.Stack(ErrKnownSkippableCase).
|
||||||
With("site_id", id, "site_web_url", wURL) // TODO: pii
|
With("site_id", clues.Hide(id), "site_web_url", clues.Hide(wURL))
|
||||||
}
|
}
|
||||||
|
|
||||||
name := ptr.Val(item.GetDisplayName())
|
name := ptr.Val(item.GetDisplayName())
|
||||||
@ -239,10 +239,10 @@ func ValidateSite(item models.Siteable) error {
|
|||||||
// the built-in site at "https://{tenant-domain}/search" never has a name.
|
// the built-in site at "https://{tenant-domain}/search" never has a name.
|
||||||
if strings.HasSuffix(wURL, "/search") {
|
if strings.HasSuffix(wURL, "/search") {
|
||||||
return clues.Stack(ErrKnownSkippableCase).
|
return clues.Stack(ErrKnownSkippableCase).
|
||||||
With("site_id", id, "site_web_url", wURL) // TODO: pii
|
With("site_id", clues.Hide(id), "site_web_url", clues.Hide(wURL))
|
||||||
}
|
}
|
||||||
|
|
||||||
return clues.New("missing site display name").With("site_id", id)
|
return clues.New("missing site display name").With("site_id", clues.Hide(id))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@ -10,6 +10,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
@ -328,7 +329,7 @@ func makeAC(
|
|||||||
return api.Client{}, clues.Wrap(err, "getting m365 account creds")
|
return api.Client{}, clues.Wrap(err, "getting m365 account creds")
|
||||||
}
|
}
|
||||||
|
|
||||||
cli, err := api.NewClient(creds)
|
cli, err := api.NewClient(creds, control.Defaults())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return api.Client{}, clues.Wrap(err, "constructing api client")
|
return api.Client{}, clues.Wrap(err, "constructing api client")
|
||||||
}
|
}
|
||||||
|
|||||||
175
website/blog/2023-07-24-multi-tenant-backup-with-corso.md
Normal file
175
website/blog/2023-07-24-multi-tenant-backup-with-corso.md
Normal file
@ -0,0 +1,175 @@
|
|||||||
|
---
|
||||||
|
slug: multi-tenant-backup-with-corso
|
||||||
|
title: "Using Corso to Build a Self-Hosted Multi-Tenant Office 365 Backup Solution"
|
||||||
|
description: ""
|
||||||
|
authors:
|
||||||
|
- name: meuchels
|
||||||
|
title: Corso Community Member, IT Lead
|
||||||
|
url: https://github.com/meuchels
|
||||||
|
image_url: https://avatars.githubusercontent.com/u/77171293?v=4
|
||||||
|
tags: [corso, microsoft 365, backups, msp, multi-tenant]
|
||||||
|
date: 2023-07-24
|
||||||
|
image: ./images/data-center.jpg
|
||||||
|
---
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
This community-contributed blog post shows how MSPs in the community are using Corso to build out a multi-tenant backup
|
||||||
|
solution for their Microsoft 365 customers. If you have questions, come find the author (or us) on
|
||||||
|
[Discord](https://www.alcion.ai/discord).
|
||||||
|
|
||||||
|
<!-- truncate -->
|
||||||
|
|
||||||
|
First of all, I offer a fully managed backup solution. My clients have no access to the backup software or the data. I
|
||||||
|
require them to request recovery in a ticket. For my use case I have a self-hosted instance of MinIO that I won't be
|
||||||
|
going over but there is [another blog post on it](./2023-2-4-where-to-store-corso.md#local-s3-testing). I will show the
|
||||||
|
layout and an example of how to backup emails using the exchange option in Corso.
|
||||||
|
|
||||||
|
## Organizing the file structure on your storage
|
||||||
|
|
||||||
|
I wanted my S3 bucket to be laid out in the following fashion utilizing 1 bucket with prefixes for the tenants. For now,
|
||||||
|
all I did is create a bucket with access to a user for corso. While it's possible to use a single bucket and use prefix
|
||||||
|
paths per tenant within it, I didn't do that in my setup. The will be generated later with the backup initialization.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
BUCKET
|
||||||
|
tenant1-exchange
|
||||||
|
tenant1-onedrive
|
||||||
|
tenant1-sharepoint
|
||||||
|
tenant2-exchange
|
||||||
|
tenant2-onedrive
|
||||||
|
tenant2-sharepoint
|
||||||
|
```
|
||||||
|
|
||||||
|
If I don’t backup a particular service for a client, it will be clear by looking at whether the bucket exists or not.
|
||||||
|
|
||||||
|
I have a short name for each tenant to differentiate them.
|
||||||
|
|
||||||
|
## The backup compute server layout
|
||||||
|
|
||||||
|
I utilize Ubuntu Server for this task. In my setup, everything is done as the root user. I have put the corso
|
||||||
|
executable in `/opt/corso/` and will be building everything under there. Here is the folder layout before I go into
|
||||||
|
usage.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# For logs
|
||||||
|
/opt/corso/logs
|
||||||
|
# For config files
|
||||||
|
/opt/corso/toml
|
||||||
|
# Root of the scripts folder
|
||||||
|
/opt/corso/scripts
|
||||||
|
# For building out the environment loaders
|
||||||
|
/opt/corso/scripts/environments
|
||||||
|
# For building out the backup scripts
|
||||||
|
/opt/corso/scripts/back-available
|
||||||
|
# For adding a link to the backups that will be run
|
||||||
|
/opt/corso/scripts/back-active
|
||||||
|
```
|
||||||
|
|
||||||
|
## The environment files
|
||||||
|
|
||||||
|
For [configuration](../../docs/setup/configuration/), create an environment file
|
||||||
|
`/opt/corso/scripts/environments/blank-exchange` with the following content for a template. You can copy this template
|
||||||
|
to `<tenantshortname>-exchange` in the same folder to setup your client exchange backup environment.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
#####################################
|
||||||
|
#EDIT THIS SECTION TO MEET YOUR NEEDS
|
||||||
|
#####################################
|
||||||
|
|
||||||
|
# this is a shortname for your tenant to setup storage
|
||||||
|
export tenantshortname=""
|
||||||
|
|
||||||
|
# this is your tenant info from the app setup on O365
|
||||||
|
export AZURE_TENANT_ID=""
|
||||||
|
export AZURE_CLIENT_ID=""
|
||||||
|
export AZURE_CLIENT_SECRET=""
|
||||||
|
|
||||||
|
# this is your credentials for your s3 storage
|
||||||
|
export AWS_ACCESS_KEY_ID="<S3-STORAGE-USERNAME>"
|
||||||
|
export AWS_SECRET_ACCESS_KEY="<S3-STORAGE-PASSWORD"
|
||||||
|
|
||||||
|
# this sets your encryption key for your backups
|
||||||
|
export CORSO_PASSPHRASE="<ENCRYPTION-PASSWORD>"
|
||||||
|
|
||||||
|
# this is your s3 storage endpoint
|
||||||
|
export s3endpoint="<YOUR-S3-STORAGE-SERVER>"
|
||||||
|
export bucket="<YOUR-BUCKET>"
|
||||||
|
|
||||||
|
####################################
|
||||||
|
#END EDIT
|
||||||
|
####################################
|
||||||
|
|
||||||
|
export configfile=/opt/corso/toml/${tenantshortname}-exchange.toml
|
||||||
|
```
|
||||||
|
|
||||||
|
## The backup scripts
|
||||||
|
|
||||||
|
Create a backup script `/opt/corso/scripts/back-available/blank-exchange` with the following content for an exchange
|
||||||
|
backup template. This can be copied to `tenantshortname-exchange` in the same directory for creating the backup script.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
##############Begin Edit###
|
||||||
|
|
||||||
|
# change blank to tenant short name
|
||||||
|
source /opt/corso/scripts/environments/blank-exchange
|
||||||
|
|
||||||
|
##############End Edit###
|
||||||
|
|
||||||
|
# create runtime variables
|
||||||
|
logfilename="/opt/corso/log/${tenantshortname}-exchange/$(date +'%Y-%m-%d-%H%M%S').log" runcorso="/opt/corso/corso"
|
||||||
|
|
||||||
|
# init bucket
|
||||||
|
$runcorso repo init s3 --bucket $bucket --prefix ${tenantshortname}_exchange --endpoint $s3endpoint \
|
||||||
|
--log-file $logfilename --config-file $configfile --hide-progress
|
||||||
|
$runcorso repo connect s3 --bucket $bucket --log-file $logfilename --config-file $configfile --hide- progress
|
||||||
|
|
||||||
|
# run Backup
|
||||||
|
$runcorso backup create exchange --mailbox '*' --log-file $logfilename --config-file $configfile --hide- progress
|
||||||
|
```
|
||||||
|
|
||||||
|
Use this folder for a working directory and create a symbolic link to the scripts that you want to activate in `/opt/corso/scripts/back-active/`.
|
||||||
|
|
||||||
|
## The backup runner
|
||||||
|
|
||||||
|
To fire it all off, I have a `backuprunner.sh` script that cycles through the `/opt/corso/scripts/back-active` folder
|
||||||
|
and is scheduled with a `cron` job to run at your interval. You can put it wherever you want but I put it in the scripts
|
||||||
|
folder as well so I know where everything is. Add your email address. This relies on the Linux mail package, you will
|
||||||
|
have to accept the email from it.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Directory containing the scripts
|
||||||
|
script_directory="/opt/corso/scripts/back-active"
|
||||||
|
|
||||||
|
# Email configuration
|
||||||
|
recipient="<YOUR-EMAIL-ADDRESS>"
|
||||||
|
subject_prefix="Backup Job: "
|
||||||
|
|
||||||
|
# Iterate over all scripts in the directory
|
||||||
|
for script_file in "$script_directory"/*; do
|
||||||
|
# Run the script and capture the output
|
||||||
|
output=$("$script_file")
|
||||||
|
|
||||||
|
# Prepare email subject
|
||||||
|
script_name=$(basename "$script_file")
|
||||||
|
subject="$subject_prefix$script_name"
|
||||||
|
|
||||||
|
# Send an email with the script output
|
||||||
|
echo "$output" | mail -s "$subject" "$recipient"
|
||||||
|
done
|
||||||
|
```
|
||||||
|
|
||||||
|
Once your backups have completed, you can load the environments using the command
|
||||||
|
`source /opt/corso/scripts/environments/tenant-exchange` to load the variables and access the backups of that tenant. Be
|
||||||
|
sure to specify the `–config-file` flag.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
source /opt/corso/scripts/environments/tenant-exchange
|
||||||
|
/opt/corso/corso backup list exchange --config-file $configfile
|
||||||
|
```
|
||||||
|
|
||||||
|
Don’t forget to backup your /opt/corso folder once in a while to save your scripts!
|
||||||
BIN
website/blog/images/data-center.jpg
Normal file
BIN
website/blog/images/data-center.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 226 KiB |
@ -21,6 +21,16 @@ You can run the linter manually or with the `Makefile` in the repository. Runnin
|
|||||||
the `Makefile` will also ensure you have the proper version of golangci-lint
|
the `Makefile` will also ensure you have the proper version of golangci-lint
|
||||||
installed.
|
installed.
|
||||||
|
|
||||||
|
### Running the actions linter
|
||||||
|
|
||||||
|
Installation:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
go install github.com/rhysd/actionlint/cmd/actionlint@latest
|
||||||
|
```
|
||||||
|
|
||||||
|
[Instructions for running locally.](https://github.com/rhysd/actionlint/blob/main/docs/usage.md)
|
||||||
|
|
||||||
### Running with the `Makefile`
|
### Running with the `Makefile`
|
||||||
|
|
||||||
There’s a `Makefile` in the corso/src that will automatically check if the proper
|
There’s a `Makefile` in the corso/src that will automatically check if the proper
|
||||||
|
|||||||
28
website/package-lock.json
generated
28
website/package-lock.json
generated
@ -24,7 +24,7 @@
|
|||||||
"prism-react-renderer": "^1.3.5",
|
"prism-react-renderer": "^1.3.5",
|
||||||
"react": "^17.0.2",
|
"react": "^17.0.2",
|
||||||
"react-dom": "^17.0.2",
|
"react-dom": "^17.0.2",
|
||||||
"sass": "^1.64.0",
|
"sass": "^1.64.1",
|
||||||
"tiny-slider": "^2.9.4",
|
"tiny-slider": "^2.9.4",
|
||||||
"tw-elements": "^1.0.0-alpha13",
|
"tw-elements": "^1.0.0-alpha13",
|
||||||
"wow.js": "^1.2.2"
|
"wow.js": "^1.2.2"
|
||||||
@ -33,7 +33,7 @@
|
|||||||
"@docusaurus/module-type-aliases": "2.4.1",
|
"@docusaurus/module-type-aliases": "2.4.1",
|
||||||
"@iconify/react": "^4.1.1",
|
"@iconify/react": "^4.1.1",
|
||||||
"autoprefixer": "^10.4.14",
|
"autoprefixer": "^10.4.14",
|
||||||
"postcss": "^8.4.26",
|
"postcss": "^8.4.27",
|
||||||
"tailwindcss": "^3.3.3"
|
"tailwindcss": "^3.3.3"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -10656,9 +10656,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/postcss": {
|
"node_modules/postcss": {
|
||||||
"version": "8.4.26",
|
"version": "8.4.27",
|
||||||
"resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.26.tgz",
|
"resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.27.tgz",
|
||||||
"integrity": "sha512-jrXHFF8iTloAenySjM/ob3gSj7pCu0Ji49hnjqzsgSRa50hkWCKD0HQ+gMNJkW38jBI68MpAAg7ZWwHwX8NMMw==",
|
"integrity": "sha512-gY/ACJtJPSmUFPDCHtX78+01fHa64FaU4zaaWfuh1MhGJISufJAH4cun6k/8fwsHYeK4UQmENQK+tRLCFJE8JQ==",
|
||||||
"funding": [
|
"funding": [
|
||||||
{
|
{
|
||||||
"type": "opencollective",
|
"type": "opencollective",
|
||||||
@ -12571,9 +12571,9 @@
|
|||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
"node_modules/sass": {
|
"node_modules/sass": {
|
||||||
"version": "1.64.0",
|
"version": "1.64.1",
|
||||||
"resolved": "https://registry.npmjs.org/sass/-/sass-1.64.0.tgz",
|
"resolved": "https://registry.npmjs.org/sass/-/sass-1.64.1.tgz",
|
||||||
"integrity": "sha512-m7YtAGmQta9uANIUJwXesAJMSncqH+3INc8kdVXs6eV6GUC8Qu2IYKQSN8PRLgiQfpca697G94klm2leYMxSHw==",
|
"integrity": "sha512-16rRACSOFEE8VN7SCgBu1MpYCyN7urj9At898tyzdXFhC+a+yOX5dXwAR7L8/IdPJ1NB8OYoXmD55DM30B2kEQ==",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"chokidar": ">=3.0.0 <4.0.0",
|
"chokidar": ">=3.0.0 <4.0.0",
|
||||||
"immutable": "^4.0.0",
|
"immutable": "^4.0.0",
|
||||||
@ -22569,9 +22569,9 @@
|
|||||||
"integrity": "sha512-Wb4p1J4zyFTbM+u6WuO4XstYx4Ky9Cewe4DWrel7B0w6VVICvPwdOpotjzcf6eD8TsckVnIMNONQyPIUFOUbCQ=="
|
"integrity": "sha512-Wb4p1J4zyFTbM+u6WuO4XstYx4Ky9Cewe4DWrel7B0w6VVICvPwdOpotjzcf6eD8TsckVnIMNONQyPIUFOUbCQ=="
|
||||||
},
|
},
|
||||||
"postcss": {
|
"postcss": {
|
||||||
"version": "8.4.26",
|
"version": "8.4.27",
|
||||||
"resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.26.tgz",
|
"resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.27.tgz",
|
||||||
"integrity": "sha512-jrXHFF8iTloAenySjM/ob3gSj7pCu0Ji49hnjqzsgSRa50hkWCKD0HQ+gMNJkW38jBI68MpAAg7ZWwHwX8NMMw==",
|
"integrity": "sha512-gY/ACJtJPSmUFPDCHtX78+01fHa64FaU4zaaWfuh1MhGJISufJAH4cun6k/8fwsHYeK4UQmENQK+tRLCFJE8JQ==",
|
||||||
"requires": {
|
"requires": {
|
||||||
"nanoid": "^3.3.6",
|
"nanoid": "^3.3.6",
|
||||||
"picocolors": "^1.0.0",
|
"picocolors": "^1.0.0",
|
||||||
@ -23802,9 +23802,9 @@
|
|||||||
"integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
|
"integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
|
||||||
},
|
},
|
||||||
"sass": {
|
"sass": {
|
||||||
"version": "1.64.0",
|
"version": "1.64.1",
|
||||||
"resolved": "https://registry.npmjs.org/sass/-/sass-1.64.0.tgz",
|
"resolved": "https://registry.npmjs.org/sass/-/sass-1.64.1.tgz",
|
||||||
"integrity": "sha512-m7YtAGmQta9uANIUJwXesAJMSncqH+3INc8kdVXs6eV6GUC8Qu2IYKQSN8PRLgiQfpca697G94klm2leYMxSHw==",
|
"integrity": "sha512-16rRACSOFEE8VN7SCgBu1MpYCyN7urj9At898tyzdXFhC+a+yOX5dXwAR7L8/IdPJ1NB8OYoXmD55DM30B2kEQ==",
|
||||||
"requires": {
|
"requires": {
|
||||||
"chokidar": ">=3.0.0 <4.0.0",
|
"chokidar": ">=3.0.0 <4.0.0",
|
||||||
"immutable": "^4.0.0",
|
"immutable": "^4.0.0",
|
||||||
|
|||||||
@ -30,7 +30,7 @@
|
|||||||
"prism-react-renderer": "^1.3.5",
|
"prism-react-renderer": "^1.3.5",
|
||||||
"react": "^17.0.2",
|
"react": "^17.0.2",
|
||||||
"react-dom": "^17.0.2",
|
"react-dom": "^17.0.2",
|
||||||
"sass": "^1.64.0",
|
"sass": "^1.64.1",
|
||||||
"tiny-slider": "^2.9.4",
|
"tiny-slider": "^2.9.4",
|
||||||
"tw-elements": "^1.0.0-alpha13",
|
"tw-elements": "^1.0.0-alpha13",
|
||||||
"wow.js": "^1.2.2"
|
"wow.js": "^1.2.2"
|
||||||
@ -39,7 +39,7 @@
|
|||||||
"@docusaurus/module-type-aliases": "2.4.1",
|
"@docusaurus/module-type-aliases": "2.4.1",
|
||||||
"@iconify/react": "^4.1.1",
|
"@iconify/react": "^4.1.1",
|
||||||
"autoprefixer": "^10.4.14",
|
"autoprefixer": "^10.4.14",
|
||||||
"postcss": "^8.4.26",
|
"postcss": "^8.4.27",
|
||||||
"tailwindcss": "^3.3.3"
|
"tailwindcss": "^3.3.3"
|
||||||
},
|
},
|
||||||
"browserslist": {
|
"browserslist": {
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user