Merge branch 'teamsDiscovery' of https://github.com/alcionai/corso into groupAPIs

This commit is contained in:
neha-Gupta1 2023-08-02 17:50:44 +05:30
commit fadd5ec5df
178 changed files with 9143 additions and 3463 deletions

View File

@ -24,6 +24,10 @@ inputs:
log-dir: log-dir:
description: Folder to store test log files description: Folder to store test log files
required: true required: true
on-collision:
description: Value for the --collisions flag
requried: false
default: "replace"
outputs: outputs:
backup-id: backup-id:
@ -57,6 +61,7 @@ runs:
./corso restore '${{ inputs.service }}' \ ./corso restore '${{ inputs.service }}' \
--no-stats \ --no-stats \
--hide-progress \ --hide-progress \
--collisions ${{ inputs.on-collision }} \
${{ inputs.restore-args }} \ ${{ inputs.restore-args }} \
--backup '${{ steps.backup.outputs.result }}' \ --backup '${{ steps.backup.outputs.result }}' \
2>&1 | 2>&1 |
@ -67,12 +72,78 @@ runs:
cat /tmp/corsologs cat /tmp/corsologs
- name: Check ${{ inputs.service }} ${{ inputs.kind }} - name: Check restore ${{ inputs.service }} ${{ inputs.kind }}
shell: bash shell: bash
working-directory: src working-directory: src
env: env:
SANITY_RESTORE_FOLDER: ${{ steps.restore.outputs.result }} SANITY_TEST_KIND: restore
SANITY_RESTORE_SERVICE: ${{ inputs.service }} SANITY_TEST_FOLDER: ${{ steps.restore.outputs.result }}
SANITY_TEST_SERVICE: ${{ inputs.service }}
TEST_DATA: ${{ inputs.test-folder }}
BASE_BACKUP: ${{ inputs.base-backup }}
run: |
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-validate-${{ inputs.service }}-${{inputs.kind }}.log
./sanity-test
- name: Export ${{ inputs.service }} ${{ inputs.kind }}
id: export
shell: bash
working-directory: src
if: ${{ inputs.service == 'onedrive' || inputs.service == 'sharepoint' }}
run: |
set -euo pipefail
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-restore-${{ inputs.service }}-${{inputs.kind }}.log
./corso export '${{ inputs.service }}' \
/tmp/export-${{ inputs.service }}-${{inputs.kind }} \
--no-stats \
--hide-progress \
${{ inputs.export-args }} \
--backup '${{ steps.backup.outputs.result }}'
cat /tmp/corsologs
- name: Check export ${{ inputs.service }} ${{ inputs.kind }}
shell: bash
working-directory: src
if: ${{ inputs.service == 'onedrive' || inputs.service == 'sharepoint' }}
env:
SANITY_TEST_KIND: export
SANITY_TEST_FOLDER: /tmp/export-${{ inputs.service }}-${{inputs.kind }}
SANITY_TEST_SERVICE: ${{ inputs.service }}
TEST_DATA: ${{ inputs.test-folder }}
BASE_BACKUP: ${{ inputs.base-backup }}
run: |
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-validate-${{ inputs.service }}-${{inputs.kind }}.log
./sanity-test
- name: Export archive ${{ inputs.service }} ${{ inputs.kind }}
id: export-archive
shell: bash
working-directory: src
if: ${{ inputs.service == 'onedrive' }} # Export only available for OneDrive
run: |
set -euo pipefail
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-restore-${{ inputs.service }}-${{inputs.kind }}.log
./corso export '${{ inputs.service }}' \
/tmp/export-${{ inputs.service }}-${{inputs.kind }}-archive \
--no-stats \
--hide-progress \
--archive \
${{ inputs.export-args }} \
--backup '${{ steps.backup.outputs.result }}'
unzip /tmp/export-${{ inputs.service }}-${{inputs.kind }}-archive/*.zip \
-d /tmp/export-${{ inputs.service }}-${{inputs.kind }}-unzipped
cat /tmp/corsologs
- name: Check archive export ${{ inputs.service }} ${{ inputs.kind }}
shell: bash
working-directory: src
if: ${{ inputs.service == 'onedrive' }}
env:
SANITY_TEST_KIND: export
SANITY_TEST_FOLDER: /tmp/export-${{ inputs.service }}-${{inputs.kind }}-unzipped
SANITY_TEST_SERVICE: ${{ inputs.service }}
TEST_DATA: ${{ inputs.test-folder }} TEST_DATA: ${{ inputs.test-folder }}
BASE_BACKUP: ${{ inputs.base-backup }} BASE_BACKUP: ${{ inputs.base-backup }}
run: | run: |

View File

@ -19,7 +19,9 @@ inputs:
site: site:
description: Sharepoint site where data is to be purged. description: Sharepoint site where data is to be purged.
libraries: libraries:
description: List of library names within site where data is to be purged. description: List of library names within the site where data is to be purged.
library-prefix:
description: List of library names within the site where the library will get deleted entirely.
folder-prefix: folder-prefix:
description: Name of the folder to be purged. If falsy, will purge the set of static, well known folders instead. description: Name of the folder to be purged. If falsy, will purge the set of static, well known folders instead.
older-than: older-than:
@ -51,8 +53,7 @@ runs:
AZURE_CLIENT_ID: ${{ inputs.azure-client-id }} AZURE_CLIENT_ID: ${{ inputs.azure-client-id }}
AZURE_CLIENT_SECRET: ${{ inputs.azure-client-secret }} AZURE_CLIENT_SECRET: ${{ inputs.azure-client-secret }}
AZURE_TENANT_ID: ${{ inputs.azure-tenant-id }} AZURE_TENANT_ID: ${{ inputs.azure-tenant-id }}
run: | run: ./exchangePurge.ps1 -User ${{ inputs.user }} -FolderNamePurgeList PersonMetadata -FolderPrefixPurgeList "${{ inputs.folder-prefix }}".Split(",") -PurgeBeforeTimestamp ${{ inputs.older-than }}
./exchangePurge.ps1 -User ${{ inputs.user }} -FolderNamePurgeList PersonMetadata -FolderPrefixPurgeList "${{ inputs.folder-prefix }}".Split(",") -PurgeBeforeTimestamp ${{ inputs.older-than }}
- name: Reset retention for all mailboxes to 0 - name: Reset retention for all mailboxes to 0
if: ${{ inputs.user == '' }} if: ${{ inputs.user == '' }}
@ -61,8 +62,7 @@ runs:
env: env:
M365_TENANT_ADMIN_USER: ${{ inputs.m365-admin-user }} M365_TENANT_ADMIN_USER: ${{ inputs.m365-admin-user }}
M365_TENANT_ADMIN_PASSWORD: ${{ inputs.m365-admin-password }} M365_TENANT_ADMIN_PASSWORD: ${{ inputs.m365-admin-password }}
run: | run: ./exchangeRetention.ps1
./exchangeRetention.ps1
################################################################################################################ ################################################################################################################
# OneDrive # OneDrive
@ -89,5 +89,4 @@ runs:
env: env:
M365_TENANT_ADMIN_USER: ${{ inputs.m365-admin-user }} M365_TENANT_ADMIN_USER: ${{ inputs.m365-admin-user }}
M365_TENANT_ADMIN_PASSWORD: ${{ inputs.m365-admin-password }} M365_TENANT_ADMIN_PASSWORD: ${{ inputs.m365-admin-password }}
run: | run: ./onedrivePurge.ps1 -Site ${{ inputs.site }} -LibraryNameList "${{ inputs.libraries }}".split(",") -FolderPrefixPurgeList ${{ inputs.folder-prefix }} -LibraryPrefixDeleteList ${{ inputs.library-prefix && inputs.library-prefix || '[]' }} -PurgeBeforeTimestamp ${{ inputs.older-than }}
./onedrivePurge.ps1 -Site ${{ inputs.site }} -LibraryNameList "${{ inputs.libraries }}".split(",") -FolderPrefixPurgeList ${{ inputs.folder-prefix }} -PurgeBeforeTimestamp ${{ inputs.older-than }}

View File

@ -0,0 +1,58 @@
name: Send a message to slack
inputs:
msg:
description: The slack message text
slack_url:
description: passthrough for secrets.SLACK_WEBHOOK_URL
runs:
using: composite
steps:
- uses: actions/checkout@v3
- name: set github ref
shell: bash
run: |
echo "github_reference=${{ github.ref }}" >> $GITHUB_ENV
- name: trim github ref
shell: bash
run: |
echo "trimmed_ref=${github_reference#refs/}" >> $GITHUB_ENV
- name: build urls
shell: bash
run: |
echo "logurl=$(printf '<https://github.com/alcionai/corso/actions/runs/%s|[Action]>' ${{ github.run_id }})" >> $GITHUB_ENV
echo "commiturl=$(printf '<https://github.com/alcionai/corso/commit/%s|[Commit]>' ${{ github.sha }})" >> $GITHUB_ENV
echo "refurl=$(printf '<https://github.com/alcionai/corso/%s|[Ref]>' ${{ env.trimmed_ref }})" >> $GITHUB_ENV
- name: use url or blank val
shell: bash
run: |
echo "STEP=${{ github.action || '' }}" >> $GITHUB_ENV
echo "JOB=${{ github.job || '' }}" >> $GITHUB_ENV
echo "LOGS=${{ github.run_id && env.logurl || '-' }}" >> $GITHUB_ENV
echo "COMMIT=${{ github.sha && env.commiturl || '-' }}" >> $GITHUB_ENV
echo "REF=${{ env.trimmed_ref && env.refurl || '-' }}" >> $GITHUB_ENV
- id: slack-message
uses: slackapi/slack-github-action@v1.24.0
env:
SLACK_WEBHOOK_URL: ${{ inputs.slack_url }}
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
with:
payload: |
{
"text": "${{ inputs.msg }} :: ${{ env.LOGS }} ${{ env.COMMIT }} ${{ env.REF }}",
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "${{ inputs.msg }} :: ${{ env.JOB }} - ${{ env.STEP }}\n${{ env.LOGS }} ${{ env.COMMIT }} ${{ env.REF }}"
}
}
]
}

View File

@ -12,18 +12,15 @@ jobs:
continue-on-error: true continue-on-error: true
strategy: strategy:
matrix: matrix:
user: [ CORSO_M365_TEST_USER_ID, CORSO_SECONDARY_M365_TEST_USER_ID, EXT_SDK_TEST_USER_ID, '' ] user: [ CORSO_M365_TEST_USER_ID, CORSO_SECONDARY_M365_TEST_USER_ID, '' ]
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
# sets the maximum time to now-30m. # sets the maximum time to now-30m.
# CI test have a 10 minute timeout. # CI test have a 20 minute timeout.
# At 20 minutes ago, we should be safe from conflicts.
# The additional 10 minutes is just to be good citizens.
- name: Set purge boundary - name: Set purge boundary
run: | run: echo "HALF_HOUR_AGO=$(date -d '30 minutes ago' -u +"%Y-%m-%dT%H:%M:%SZ")" >> $GITHUB_ENV
echo "HALF_HOUR_AGO=$(date -d '30 minutes ago' -u +"%Y-%m-%dT%H:%M:%SZ")" >> $GITHUB_ENV
- name: Purge CI-Produced Folders for Users - name: Purge CI-Produced Folders for Users
uses: ./.github/actions/purge-m365-data uses: ./.github/actions/purge-m365-data
@ -37,34 +34,46 @@ jobs:
m365-admin-user: ${{ secrets.M365_TENANT_ADMIN_USER }} m365-admin-user: ${{ secrets.M365_TENANT_ADMIN_USER }}
m365-admin-password: ${{ secrets.M365_TENANT_ADMIN_PASSWORD }} m365-admin-password: ${{ secrets.M365_TENANT_ADMIN_PASSWORD }}
- name: Notify failure in slack
if: failure()
uses: ./.github/actions/slack-message
with:
msg: "[FAILED] ${{ vars[matrix.user] }} CI Cleanup"
slack_url: ${{ secrets.SLACK_WEBHOOK_URL }}
Test-Site-Data-Cleanup: Test-Site-Data-Cleanup:
environment: Testing environment: Testing
runs-on: ubuntu-latest runs-on: ubuntu-latest
continue-on-error: true continue-on-error: true
strategy: strategy:
matrix: matrix:
site: [ CORSO_M365_TEST_SITE_URL, EXT_SDK_TEST_SITE_URL ] site: [ CORSO_M365_TEST_SITE_URL ]
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
# sets the maximum time to now-30m. # sets the maximum time to now-30m.
# CI test have a 10 minute timeout. # CI test have a 20 minute timeout.
# At 20 minutes ago, we should be safe from conflicts.
# The additional 10 minutes is just to be good citizens.
- name: Set purge boundary - name: Set purge boundary
run: | run: echo "HALF_HOUR_AGO=$(date -d '30 minutes ago' -u +"%Y-%m-%dT%H:%M:%SZ")" >> $GITHUB_ENV
echo "HALF_HOUR_AGO=$(date -d '30 minutes ago' -u +"%Y-%m-%dT%H:%M:%SZ")" >> $GITHUB_ENV
- name: Purge CI-Produced Folders for Sites - name: Purge CI-Produced Folders for Sites
uses: ./.github/actions/purge-m365-data uses: ./.github/actions/purge-m365-data
with: with:
site: ${{ vars[matrix.site] }} site: ${{ vars[matrix.site] }}
folder-prefix: ${{ vars.CORSO_M365_TEST_PREFIXES }} folder-prefix: ${{ vars.CORSO_M365_TEST_PREFIXES }}
libraries: ${{ vars.CORSO_M365_TEST_SITE_LIBRARIES }} libraries: ${{ vars.CORSO_M365_TEST_SITE_LIBRARIES }}
library-prefix: ${{ vars.CORSO_M365_TEST_PREFIXES }}
older-than: ${{ env.HALF_HOUR_AGO }} older-than: ${{ env.HALF_HOUR_AGO }}
azure-client-id: ${{ secrets.CLIENT_ID }} azure-client-id: ${{ secrets.CLIENT_ID }}
azure-client-secret: ${{ secrets.CLIENT_SECRET }} azure-client-secret: ${{ secrets.CLIENT_SECRET }}
azure-tenant-id: ${{ secrets.TENANT_ID }} azure-tenant-id: ${{ secrets.TENANT_ID }}
m365-admin-user: ${{ secrets.M365_TENANT_ADMIN_USER }} m365-admin-user: ${{ secrets.M365_TENANT_ADMIN_USER }}
m365-admin-password: ${{ secrets.M365_TENANT_ADMIN_PASSWORD }} m365-admin-password: ${{ secrets.M365_TENANT_ADMIN_PASSWORD }}
- name: Notify failure in slack
if: failure()
uses: ./.github/actions/slack-message
with:
msg: "[FAILED] ${{ vars[matrix.site] }} CI Cleanup"
slack_url: ${{ secrets.SLACK_WEBHOOK_URL }}

View File

@ -277,33 +277,9 @@ jobs:
if-no-files-found: error if-no-files-found: error
retention-days: 14 retention-days: 14
- name: SHA info - name: Notify failure in slack
id: sha-info
if: failure() if: failure()
run: | uses: ./.github/actions/slack-message
echo ${GITHUB_REF#refs/heads/}-${GITHUB_SHA}
echo SHA=${GITHUB_REF#refs/heads/}-${GITHUB_SHA} >> $GITHUB_OUTPUT
echo RUN_URL=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} >> $GITHUB_OUTPUT
echo COMMIT_URL=${{ github.server_url }}/${{ github.repository }}/commit/${GITHUB_SHA} >> $GITHUB_OUTPUT
- name: Send Github Action failure to Slack
id: slack-notification
if: failure()
uses: slackapi/slack-github-action@v1.24.0
with: with:
payload: | msg: "[FAILED] Longevity Test"
{ slack_url: ${{ secrets.SLACK_WEBHOOK_URL }}
"text": "Longevity test failure - build: ${{ job.status }} - SHA: ${{ steps.sha-info.outputs.SHA }}",
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "[FAILED] Longevity Checks :: <${{ steps.sha-info.outputs.RUN_URL }}|[Logs]> <${{ steps.sha-info.outputs.COMMIT_URL }}|[Base]>\nCommit: <${{ steps.sha-info.outputs.COMMIT_URL }}|${{ steps.sha-info.outputs.SHA }}>"
}
}
]
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK

View File

@ -94,6 +94,7 @@ jobs:
CORSO_PASSPHRASE: ${{ secrets.INTEGRATION_TEST_CORSO_PASSPHRASE }} CORSO_PASSPHRASE: ${{ secrets.INTEGRATION_TEST_CORSO_PASSPHRASE }}
CORSO_LOG_FILE: ${{ github.workspace }}/testlog/run-nightly.log CORSO_LOG_FILE: ${{ github.workspace }}/testlog/run-nightly.log
LOG_GRAPH_REQUESTS: true LOG_GRAPH_REQUESTS: true
S3_BUCKET: ${{ secrets.CI_TESTS_S3_BUCKET }}
run: | run: |
set -euo pipefail set -euo pipefail
go test \ go test \
@ -119,33 +120,9 @@ jobs:
if-no-files-found: error if-no-files-found: error
retention-days: 14 retention-days: 14
- name: SHA info - name: Notify failure in slack
id: sha-info
if: failure() if: failure()
run: | uses: ./.github/actions/slack-message
echo ${GITHUB_REF#refs/heads/}-${GITHUB_SHA}
echo SHA=${GITHUB_REF#refs/heads/}-${GITHUB_SHA} >> $GITHUB_OUTPUT
echo RUN_URL=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} >> $GITHUB_OUTPUT
echo COMMIT_URL=${{ github.server_url }}/${{ github.repository }}/commit/${GITHUB_SHA} >> $GITHUB_OUTPUT
- name: Send Github Action failure to Slack
id: slack-notification
if: failure()
uses: slackapi/slack-github-action@v1.24.0
with: with:
payload: | msg: "[FAILED] Nightly Checks"
{ slack_url: ${{ secrets.SLACK_WEBHOOK_URL }}
"text": "Nightly test failure - build: ${{ job.status }} - SHA: ${{ steps.sha-info.outputs.SHA }}",
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "[FAILED] Nightly Checks :: <${{ steps.sha-info.outputs.RUN_URL }}|[Logs]> <${{ steps.sha-info.outputs.COMMIT_URL }}|[Base]>\nCommit: <${{ steps.sha-info.outputs.COMMIT_URL }}|${{ steps.sha-info.outputs.SHA }}>"
}
}
]
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK

View File

@ -333,33 +333,9 @@ jobs:
if-no-files-found: error if-no-files-found: error
retention-days: 14 retention-days: 14
- name: SHA info - name: Notify failure in slack
id: sha-info
if: failure() if: failure()
run: | uses: ./.github/actions/slack-message
echo ${GITHUB_REF#refs/heads/}-${GITHUB_SHA}
echo SHA=${GITHUB_REF#refs/heads/}-${GITHUB_SHA} >> $GITHUB_OUTPUT
echo RUN_URL=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} >> $GITHUB_OUTPUT
echo COMMIT_URL=${{ github.server_url }}/${{ github.repository }}/commit/${GITHUB_SHA} >> $GITHUB_OUTPUT
- name: Send Github Action failure to Slack
id: slack-notification
if: failure()
uses: slackapi/slack-github-action@v1.24.0
with: with:
payload: | msg: "[FAILED] Sanity Tests"
{ slack_url: ${{ secrets.SLACK_WEBHOOK_URL }}
"text": "Sanity test failure - build: ${{ job.status }} - SHA: ${{ steps.sha-info.outputs.SHA }}",
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "[FAILED] Sanity Checks :: <${{ steps.sha-info.outputs.RUN_URL }}|[Logs]> <${{ github.event.pull_request.html_url || github.event.head_commit.url }}|[Base]>\nCommit: <${{ steps.sha-info.outputs.COMMIT_URL }}|${{ steps.sha-info.outputs.SHA }}>"
}
}
]
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK

View File

@ -7,6 +7,18 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased] (beta) ## [Unreleased] (beta)
### Added
- Restore commands now accept an optional resource override with the `--to-resource` flag. This allows restores to recreate backup data within different mailboxes, sites, and users.
### Fixed
- SharePoint document libraries deleted after the last backup can now be restored.
- Restore requires the protected resource to have access to the service being restored.
### Added
- Added option to export data from OneDrive and SharePoint backups as individual files or as a single zip file.
## [v0.11.1] (beta) - 2023-07-20
### Fixed ### Fixed
- Allow repo connect to succeed when a `corso.toml` file was not provided but configuration is specified using environment variables and flags. - Allow repo connect to succeed when a `corso.toml` file was not provided but configuration is specified using environment variables and flags.
@ -21,6 +33,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Fixed ### Fixed
- Return a ServiceNotEnabled error when a tenant has no active SharePoint license. - Return a ServiceNotEnabled error when a tenant has no active SharePoint license.
- Added retries for http/2 stream connection failures when downloading large item content. - Added retries for http/2 stream connection failures when downloading large item content.
- SharePoint document libraries that were deleted after the last backup can now be restored.
### Known issues ### Known issues
- If a link share is created for an item with inheritance disabled - If a link share is created for an item with inheritance disabled
@ -328,7 +341,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Miscellaneous - Miscellaneous
- Optional usage statistics reporting ([RM-35](https://github.com/alcionai/corso-roadmap/issues/35)) - Optional usage statistics reporting ([RM-35](https://github.com/alcionai/corso-roadmap/issues/35))
[Unreleased]: https://github.com/alcionai/corso/compare/v0.11.0...HEAD [Unreleased]: https://github.com/alcionai/corso/compare/v0.11.1...HEAD
[v0.11.1]: https://github.com/alcionai/corso/compare/v0.11.0...v0.11.1
[v0.11.0]: https://github.com/alcionai/corso/compare/v0.10.0...v0.11.0 [v0.11.0]: https://github.com/alcionai/corso/compare/v0.10.0...v0.11.0
[v0.10.0]: https://github.com/alcionai/corso/compare/v0.9.0...v0.10.0 [v0.10.0]: https://github.com/alcionai/corso/compare/v0.9.0...v0.10.0
[v0.9.0]: https://github.com/alcionai/corso/compare/v0.8.1...v0.9.0 [v0.9.0]: https://github.com/alcionai/corso/compare/v0.8.1...v0.9.0

View File

@ -3,14 +3,20 @@ run:
linters: linters:
enable: enable:
- errcheck
- forbidigo
- gci - gci
- gofmt - gofmt
- gofumpt - gofumpt
- errcheck - gosimple
- forbidigo - govet
- ineffassign
- lll - lll
- loggercheck
- misspell - misspell
- revive - revive
- unused
- usestdlibvars
- wsl - wsl
disable: disable:

View File

@ -94,6 +94,7 @@ func addExchangeCommands(cmd *cobra.Command) *cobra.Command {
flags.AddDisableDeltaFlag(c) flags.AddDisableDeltaFlag(c)
flags.AddEnableImmutableIDFlag(c) flags.AddEnableImmutableIDFlag(c)
flags.AddDisableConcurrencyLimiterFlag(c) flags.AddDisableConcurrencyLimiterFlag(c)
flags.AddDeltaPageSizeFlag(c)
case listCommand: case listCommand:
c, fs = utils.AddCommand(cmd, exchangeListCmd()) c, fs = utils.AddCommand(cmd, exchangeListCmd())
@ -175,7 +176,7 @@ func createExchangeCmd(cmd *cobra.Command, args []string) error {
sel := exchangeBackupCreateSelectors(flags.UserFV, flags.CategoryDataFV) sel := exchangeBackupCreateSelectors(flags.UserFV, flags.CategoryDataFV)
ins, err := utils.UsersMap(ctx, *acct, fault.New(true)) ins, err := utils.UsersMap(ctx, *acct, utils.Control(), fault.New(true))
if err != nil { if err != nil {
return Only(ctx, clues.Wrap(err, "Failed to retrieve M365 users")) return Only(ctx, clues.Wrap(err, "Failed to retrieve M365 users"))
} }

View File

@ -37,11 +37,11 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() {
expectRunE func(*cobra.Command, []string) error expectRunE func(*cobra.Command, []string) error
}{ }{
{ {
"create exchange", name: "create exchange",
createCommand, use: createCommand,
expectUse + " " + exchangeServiceCommandCreateUseSuffix, expectUse: expectUse + " " + exchangeServiceCommandCreateUseSuffix,
exchangeCreateCmd().Short, expectShort: exchangeCreateCmd().Short,
[]string{ flags: []string{
flags.UserFN, flags.UserFN,
flags.CategoryDataFN, flags.CategoryDataFN,
flags.DisableIncrementalsFN, flags.DisableIncrementalsFN,
@ -50,28 +50,29 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() {
flags.FetchParallelismFN, flags.FetchParallelismFN,
flags.SkipReduceFN, flags.SkipReduceFN,
flags.NoStatsFN, flags.NoStatsFN,
flags.DeltaPageSizeFN,
}, },
createExchangeCmd, expectRunE: createExchangeCmd,
}, },
{ {
"list exchange", name: "list exchange",
listCommand, use: listCommand,
expectUse, expectUse: expectUse,
exchangeListCmd().Short, expectShort: exchangeListCmd().Short,
[]string{ flags: []string{
flags.BackupFN, flags.BackupFN,
flags.FailedItemsFN, flags.FailedItemsFN,
flags.SkippedItemsFN, flags.SkippedItemsFN,
flags.RecoveredErrorsFN, flags.RecoveredErrorsFN,
}, },
listExchangeCmd, expectRunE: listExchangeCmd,
}, },
{ {
"details exchange", name: "details exchange",
detailsCommand, use: detailsCommand,
expectUse + " " + exchangeServiceCommandDetailsUseSuffix, expectUse: expectUse + " " + exchangeServiceCommandDetailsUseSuffix,
exchangeDetailsCmd().Short, expectShort: exchangeDetailsCmd().Short,
[]string{ flags: []string{
flags.BackupFN, flags.BackupFN,
flags.ContactFN, flags.ContactFN,
flags.ContactFolderFN, flags.ContactFolderFN,
@ -90,7 +91,7 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() {
flags.EventStartsBeforeFN, flags.EventStartsBeforeFN,
flags.EventSubjectFN, flags.EventSubjectFN,
}, },
detailsExchangeCmd, expectRunE: detailsExchangeCmd,
}, },
{ {
"delete exchange", "delete exchange",

View File

@ -13,6 +13,7 @@ import (
"github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
"github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/repository"
"github.com/alcionai/corso/src/pkg/storage" "github.com/alcionai/corso/src/pkg/storage"
"github.com/alcionai/corso/src/pkg/storage/testdata" "github.com/alcionai/corso/src/pkg/storage/testdata"
@ -47,7 +48,12 @@ func prepM365Test(
vpr, cfgFP := tconfig.MakeTempTestConfigClone(t, force) vpr, cfgFP := tconfig.MakeTempTestConfigClone(t, force)
ctx = config.SetViper(ctx, vpr) ctx = config.SetViper(ctx, vpr)
repo, err := repository.Initialize(ctx, acct, st, control.Defaults()) repo, err := repository.Initialize(
ctx,
acct,
st,
control.DefaultOptions(),
ctrlRepo.Retention{})
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
return acct, st, repo, vpr, recorder, cfgFP return acct, st, repo, vpr, recorder, cfgFP

View File

@ -157,7 +157,7 @@ func createOneDriveCmd(cmd *cobra.Command, args []string) error {
sel := oneDriveBackupCreateSelectors(flags.UserFV) sel := oneDriveBackupCreateSelectors(flags.UserFV)
ins, err := utils.UsersMap(ctx, *acct, fault.New(true)) ins, err := utils.UsersMap(ctx, *acct, utils.Control(), fault.New(true))
if err != nil { if err != nil {
return Only(ctx, clues.Wrap(err, "Failed to retrieve M365 users")) return Only(ctx, clues.Wrap(err, "Failed to retrieve M365 users"))
} }

View File

@ -11,6 +11,7 @@ import (
"github.com/alcionai/corso/src/cli/backup" "github.com/alcionai/corso/src/cli/backup"
"github.com/alcionai/corso/src/cli/config" "github.com/alcionai/corso/src/cli/config"
"github.com/alcionai/corso/src/cli/export"
"github.com/alcionai/corso/src/cli/flags" "github.com/alcionai/corso/src/cli/flags"
"github.com/alcionai/corso/src/cli/help" "github.com/alcionai/corso/src/cli/help"
"github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/print"
@ -53,7 +54,7 @@ func preRun(cc *cobra.Command, args []string) error {
} }
avoidTheseCommands := []string{ avoidTheseCommands := []string{
"corso", "env", "help", "backup", "details", "list", "restore", "delete", "repo", "init", "connect", "corso", "env", "help", "backup", "details", "list", "restore", "export", "delete", "repo", "init", "connect",
} }
if len(logger.ResolvedLogFile) > 0 && !slices.Contains(avoidTheseCommands, cc.Use) { if len(logger.ResolvedLogFile) > 0 && !slices.Contains(avoidTheseCommands, cc.Use) {
@ -150,6 +151,7 @@ func BuildCommandTree(cmd *cobra.Command) {
repo.AddCommands(cmd) repo.AddCommands(cmd)
backup.AddCommands(cmd) backup.AddCommands(cmd)
restore.AddCommands(cmd) restore.AddCommands(cmd)
export.AddCommands(cmd)
help.AddCommands(cmd) help.AddCommands(cmd)
} }

108
src/cli/export/export.go Normal file
View File

@ -0,0 +1,108 @@
package export
import (
"context"
"errors"
"github.com/alcionai/clues"
"github.com/spf13/cobra"
. "github.com/alcionai/corso/src/cli/print"
"github.com/alcionai/corso/src/cli/repo"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/common/dttm"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/observe"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/export"
"github.com/alcionai/corso/src/pkg/selectors"
)
var exportCommands = []func(cmd *cobra.Command) *cobra.Command{
addOneDriveCommands,
addSharePointCommands,
}
// AddCommands attaches all `corso export * *` commands to the parent.
func AddCommands(cmd *cobra.Command) {
exportC := exportCmd()
cmd.AddCommand(exportC)
for _, addExportTo := range exportCommands {
addExportTo(exportC)
}
}
const exportCommand = "export"
// The export category of commands.
// `corso export [<subcommand>] [<flag>...]`
func exportCmd() *cobra.Command {
return &cobra.Command{
Use: exportCommand,
Short: "Export your service data",
Long: `Export the data stored in one of your M365 services.`,
RunE: handleExportCmd,
Args: cobra.NoArgs,
}
}
// Handler for flat calls to `corso export`.
// Produces the same output as `corso export --help`.
func handleExportCmd(cmd *cobra.Command, args []string) error {
return cmd.Help()
}
func runExport(
ctx context.Context,
cmd *cobra.Command,
args []string,
ueco utils.ExportCfgOpts,
sel selectors.Selector,
backupID, serviceName string,
) error {
r, _, _, _, err := utils.GetAccountAndConnect(ctx, sel.PathService(), repo.S3Overrides(cmd))
if err != nil {
return Only(ctx, err)
}
defer utils.CloseRepo(ctx, r)
exportLocation := args[0]
if len(exportLocation) == 0 {
// This should not be possible, but adding it just in case.
exportLocation = control.DefaultRestoreLocation + dttm.FormatNow(dttm.HumanReadableDriveItem)
}
Infof(ctx, "Exporting to folder %s", exportLocation)
eo, err := r.NewExport(
ctx,
backupID,
sel,
utils.MakeExportConfig(ctx, ueco))
if err != nil {
return Only(ctx, clues.Wrap(err, "Failed to initialize "+serviceName+" export"))
}
expColl, err := eo.Run(ctx)
if err != nil {
if errors.Is(err, data.ErrNotFound) {
return Only(ctx, clues.New("Backup or backup details missing for id "+backupID))
}
return Only(ctx, clues.Wrap(err, "Failed to run "+serviceName+" export"))
}
// It would be better to give a progressbar than a spinner, but we
// have any way of knowing how many files are available as of now.
diskWriteComplete := observe.MessageWithCompletion(ctx, "Writing data to disk")
defer close(diskWriteComplete)
err = export.ConsumeExportCollections(ctx, exportLocation, expColl, eo.Errors)
if err != nil {
return Only(ctx, err)
}
return nil
}

View File

@ -0,0 +1,96 @@
package export
import (
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/alcionai/corso/src/cli/flags"
"github.com/alcionai/corso/src/cli/utils"
)
// called by export.go to map subcommands to provider-specific handling.
func addOneDriveCommands(cmd *cobra.Command) *cobra.Command {
var (
c *cobra.Command
fs *pflag.FlagSet
)
switch cmd.Use {
case exportCommand:
c, fs = utils.AddCommand(cmd, oneDriveExportCmd())
c.Use = c.Use + " " + oneDriveServiceCommandUseSuffix
// Flags addition ordering should follow the order we want them to appear in help and docs:
// More generic (ex: --user) and more frequently used flags take precedence.
fs.SortFlags = false
flags.AddBackupIDFlag(c, true)
flags.AddOneDriveDetailsAndRestoreFlags(c)
flags.AddExportConfigFlags(c)
flags.AddFailFastFlag(c)
flags.AddCorsoPassphaseFlags(c)
flags.AddAWSCredsFlags(c)
}
return c
}
const (
oneDriveServiceCommand = "onedrive"
oneDriveServiceCommandUseSuffix = "--backup <backupId> <destination>"
//nolint:lll
oneDriveServiceCommandExportExamples = `# Export file with ID 98765abcdef in Bob's last backup (1234abcd...) to my-exports directory
corso export onedrive my-exports --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef
# Export files named "FY2021 Planning.xlsx" in "Documents/Finance Reports" to current directory
corso export onedrive . --backup 1234abcd-12ab-cd34-56de-1234abcd \
--file "FY2021 Planning.xlsx" --folder "Documents/Finance Reports"
# Export all files and folders in folder "Documents/Finance Reports" that were created before 2020 to my-exports
corso export onedrive my-exports --backup 1234abcd-12ab-cd34-56de-1234abcd
--folder "Documents/Finance Reports" --file-created-before 2020-01-01T00:00:00`
)
// `corso export onedrive [<flag>...] <destination>`
func oneDriveExportCmd() *cobra.Command {
return &cobra.Command{
Use: oneDriveServiceCommand,
Short: "Export M365 OneDrive service data",
RunE: exportOneDriveCmd,
Args: func(cmd *cobra.Command, args []string) error {
if len(args) != 1 {
return errors.New("missing restore destination")
}
return nil
},
Example: oneDriveServiceCommandExportExamples,
}
}
// processes an onedrive service export.
func exportOneDriveCmd(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if utils.HasNoFlagsAndShownHelp(cmd) {
return nil
}
opts := utils.MakeOneDriveOpts(cmd)
if flags.RunModeFV == flags.RunModeFlagTest {
return nil
}
if err := utils.ValidateOneDriveRestoreFlags(flags.BackupIDFV, opts); err != nil {
return err
}
sel := utils.IncludeOneDriveRestoreDataSelectors(opts)
utils.FilterOneDriveRestoreInfoSelectors(sel, opts)
return runExport(ctx, cmd, args, opts.ExportCfg, sel.Selector, flags.BackupIDFV, "OneDrive")
}

View File

@ -0,0 +1,106 @@
package export
import (
"bytes"
"testing"
"github.com/alcionai/clues"
"github.com/spf13/cobra"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/cli/flags"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/cli/utils/testdata"
"github.com/alcionai/corso/src/internal/tester"
)
type OneDriveUnitSuite struct {
tester.Suite
}
func TestOneDriveUnitSuite(t *testing.T) {
suite.Run(t, &OneDriveUnitSuite{Suite: tester.NewUnitSuite(t)})
}
func (suite *OneDriveUnitSuite) TestAddOneDriveCommands() {
expectUse := oneDriveServiceCommand + " " + oneDriveServiceCommandUseSuffix
table := []struct {
name string
use string
expectUse string
expectShort string
expectRunE func(*cobra.Command, []string) error
}{
{"export onedrive", exportCommand, expectUse, oneDriveExportCmd().Short, exportOneDriveCmd},
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
cmd := &cobra.Command{Use: test.use}
// normally a persistent flag from the root.
// required to ensure a dry run.
flags.AddRunModeFlag(cmd, true)
c := addOneDriveCommands(cmd)
require.NotNil(t, c)
cmds := cmd.Commands()
require.Len(t, cmds, 1)
child := cmds[0]
assert.Equal(t, test.expectUse, child.Use)
assert.Equal(t, test.expectShort, child.Short)
tester.AreSameFunc(t, test.expectRunE, child.RunE)
cmd.SetArgs([]string{
"onedrive",
testdata.RestoreDestination,
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, testdata.BackupInput,
"--" + flags.FileFN, testdata.FlgInputs(testdata.FileNameInput),
"--" + flags.FolderFN, testdata.FlgInputs(testdata.FolderPathInput),
"--" + flags.FileCreatedAfterFN, testdata.FileCreatedAfterInput,
"--" + flags.FileCreatedBeforeFN, testdata.FileCreatedBeforeInput,
"--" + flags.FileModifiedAfterFN, testdata.FileModifiedAfterInput,
"--" + flags.FileModifiedBeforeFN, testdata.FileModifiedBeforeInput,
"--" + flags.AWSAccessKeyFN, testdata.AWSAccessKeyID,
"--" + flags.AWSSecretAccessKeyFN, testdata.AWSSecretAccessKey,
"--" + flags.AWSSessionTokenFN, testdata.AWSSessionToken,
"--" + flags.CorsoPassphraseFN, testdata.CorsoPassphrase,
// bool flags
"--" + flags.ArchiveFN,
})
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
opts := utils.MakeOneDriveOpts(cmd)
assert.Equal(t, testdata.BackupInput, flags.BackupIDFV)
assert.ElementsMatch(t, testdata.FileNameInput, opts.FileName)
assert.ElementsMatch(t, testdata.FolderPathInput, opts.FolderPath)
assert.Equal(t, testdata.FileCreatedAfterInput, opts.FileCreatedAfter)
assert.Equal(t, testdata.FileCreatedBeforeInput, opts.FileCreatedBefore)
assert.Equal(t, testdata.FileModifiedAfterInput, opts.FileModifiedAfter)
assert.Equal(t, testdata.FileModifiedBeforeInput, opts.FileModifiedBefore)
assert.Equal(t, testdata.Archive, opts.ExportCfg.Archive)
assert.Equal(t, testdata.AWSAccessKeyID, flags.AWSAccessKeyFV)
assert.Equal(t, testdata.AWSSecretAccessKey, flags.AWSSecretAccessKeyFV)
assert.Equal(t, testdata.AWSSessionToken, flags.AWSSessionTokenFV)
assert.Equal(t, testdata.CorsoPassphrase, flags.CorsoPassphraseFV)
})
}
}

View File

@ -0,0 +1,100 @@
package export
import (
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/alcionai/corso/src/cli/flags"
"github.com/alcionai/corso/src/cli/utils"
)
// called by export.go to map subcommands to provider-specific handling.
func addSharePointCommands(cmd *cobra.Command) *cobra.Command {
var (
c *cobra.Command
fs *pflag.FlagSet
)
switch cmd.Use {
case exportCommand:
c, fs = utils.AddCommand(cmd, sharePointExportCmd())
c.Use = c.Use + " " + sharePointServiceCommandUseSuffix
// Flags addition ordering should follow the order we want them to appear in help and docs:
// More generic (ex: --user) and more frequently used flags take precedence.
fs.SortFlags = false
flags.AddBackupIDFlag(c, true)
flags.AddSharePointDetailsAndRestoreFlags(c)
flags.AddExportConfigFlags(c)
flags.AddFailFastFlag(c)
flags.AddCorsoPassphaseFlags(c)
flags.AddAWSCredsFlags(c)
}
return c
}
const (
sharePointServiceCommand = "sharepoint"
sharePointServiceCommandUseSuffix = "--backup <backupId> <destination>"
//nolint:lll
sharePointServiceCommandExportExamples = `# Export file with ID 98765abcdef in Bob's latest backup (1234abcd...) to my-exports directory
corso export sharepoint --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef my-exports
# Export files named "ServerRenderTemplate.xsl" in the folder "Display Templates/Style Sheets". as archive to current directory
corso export sharepoint --backup 1234abcd-12ab-cd34-56de-1234abcd \
--file "ServerRenderTemplate.xsl" --folder "Display Templates/Style Sheets" --archive .
# Export all files in the folder "Display Templates/Style Sheets" that were created before 2020 to my-exports directory.
corso export sharepoint --backup 1234abcd-12ab-cd34-56de-1234abcd
--file-created-before 2020-01-01T00:00:00 --folder "Display Templates/Style Sheets" my-exports
# Export all files in the "Documents" library to current directory.
corso export sharepoint --backup 1234abcd-12ab-cd34-56de-1234abcd
--library Documents --folder "Display Templates/Style Sheets" .`
)
// `corso export sharepoint [<flag>...] <destination>`
func sharePointExportCmd() *cobra.Command {
return &cobra.Command{
Use: sharePointServiceCommand,
Short: "Export M365 SharePoint service data",
RunE: exportSharePointCmd,
Args: func(cmd *cobra.Command, args []string) error {
if len(args) != 1 {
return errors.New("missing restore destination")
}
return nil
},
Example: sharePointServiceCommandExportExamples,
}
}
// processes an sharepoint service export.
func exportSharePointCmd(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if utils.HasNoFlagsAndShownHelp(cmd) {
return nil
}
opts := utils.MakeSharePointOpts(cmd)
if flags.RunModeFV == flags.RunModeFlagTest {
return nil
}
if err := utils.ValidateSharePointRestoreFlags(flags.BackupIDFV, opts); err != nil {
return err
}
sel := utils.IncludeSharePointRestoreDataSelectors(ctx, opts)
utils.FilterSharePointRestoreInfoSelectors(sel, opts)
return runExport(ctx, cmd, args, opts.ExportCfg, sel.Selector, flags.BackupIDFV, "SharePoint")
}

View File

@ -0,0 +1,118 @@
package export
import (
"bytes"
"testing"
"github.com/alcionai/clues"
"github.com/spf13/cobra"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/cli/flags"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/cli/utils/testdata"
"github.com/alcionai/corso/src/internal/tester"
)
type SharePointUnitSuite struct {
tester.Suite
}
func TestSharePointUnitSuite(t *testing.T) {
suite.Run(t, &SharePointUnitSuite{Suite: tester.NewUnitSuite(t)})
}
func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
expectUse := sharePointServiceCommand + " " + sharePointServiceCommandUseSuffix
table := []struct {
name string
use string
expectUse string
expectShort string
expectRunE func(*cobra.Command, []string) error
}{
{"export sharepoint", exportCommand, expectUse, sharePointExportCmd().Short, exportSharePointCmd},
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
cmd := &cobra.Command{Use: test.use}
// normally a persistent flag from the root.
// required to ensure a dry run.
flags.AddRunModeFlag(cmd, true)
c := addSharePointCommands(cmd)
require.NotNil(t, c)
cmds := cmd.Commands()
require.Len(t, cmds, 1)
child := cmds[0]
assert.Equal(t, test.expectUse, child.Use)
assert.Equal(t, test.expectShort, child.Short)
tester.AreSameFunc(t, test.expectRunE, child.RunE)
cmd.SetArgs([]string{
"sharepoint",
testdata.RestoreDestination,
"--" + flags.RunModeFN, flags.RunModeFlagTest,
"--" + flags.BackupFN, testdata.BackupInput,
"--" + flags.LibraryFN, testdata.LibraryInput,
"--" + flags.FileFN, testdata.FlgInputs(testdata.FileNameInput),
"--" + flags.FolderFN, testdata.FlgInputs(testdata.FolderPathInput),
"--" + flags.FileCreatedAfterFN, testdata.FileCreatedAfterInput,
"--" + flags.FileCreatedBeforeFN, testdata.FileCreatedBeforeInput,
"--" + flags.FileModifiedAfterFN, testdata.FileModifiedAfterInput,
"--" + flags.FileModifiedBeforeFN, testdata.FileModifiedBeforeInput,
"--" + flags.ListItemFN, testdata.FlgInputs(testdata.ListItemInput),
"--" + flags.ListFolderFN, testdata.FlgInputs(testdata.ListFolderInput),
"--" + flags.PageFN, testdata.FlgInputs(testdata.PageInput),
"--" + flags.PageFolderFN, testdata.FlgInputs(testdata.PageFolderInput),
"--" + flags.AWSAccessKeyFN, testdata.AWSAccessKeyID,
"--" + flags.AWSSecretAccessKeyFN, testdata.AWSSecretAccessKey,
"--" + flags.AWSSessionTokenFN, testdata.AWSSessionToken,
"--" + flags.CorsoPassphraseFN, testdata.CorsoPassphrase,
// bool flags
"--" + flags.ArchiveFN,
})
cmd.SetOut(new(bytes.Buffer)) // drop output
cmd.SetErr(new(bytes.Buffer)) // drop output
err := cmd.Execute()
assert.NoError(t, err, clues.ToCore(err))
opts := utils.MakeSharePointOpts(cmd)
assert.Equal(t, testdata.BackupInput, flags.BackupIDFV)
assert.Equal(t, testdata.LibraryInput, opts.Library)
assert.ElementsMatch(t, testdata.FileNameInput, opts.FileName)
assert.ElementsMatch(t, testdata.FolderPathInput, opts.FolderPath)
assert.Equal(t, testdata.FileCreatedAfterInput, opts.FileCreatedAfter)
assert.Equal(t, testdata.FileCreatedBeforeInput, opts.FileCreatedBefore)
assert.Equal(t, testdata.FileModifiedAfterInput, opts.FileModifiedAfter)
assert.Equal(t, testdata.FileModifiedBeforeInput, opts.FileModifiedBefore)
assert.ElementsMatch(t, testdata.ListItemInput, opts.ListItem)
assert.ElementsMatch(t, testdata.ListFolderInput, opts.ListFolder)
assert.ElementsMatch(t, testdata.PageInput, opts.Page)
assert.ElementsMatch(t, testdata.PageFolderInput, opts.PageFolder)
assert.Equal(t, testdata.Archive, opts.ExportCfg.Archive)
assert.Equal(t, testdata.AWSAccessKeyID, flags.AWSAccessKeyFV)
assert.Equal(t, testdata.AWSSecretAccessKey, flags.AWSSecretAccessKeyFV)
assert.Equal(t, testdata.AWSSessionToken, flags.AWSSessionTokenFV)
assert.Equal(t, testdata.CorsoPassphrase, flags.CorsoPassphraseFV)
})
}
}

15
src/cli/flags/export.go Normal file
View File

@ -0,0 +1,15 @@
package flags
import (
"github.com/spf13/cobra"
)
const ArchiveFN = "archive"
var ArchiveFV bool
// AddExportConfigFlags adds the restore config flag set.
func AddExportConfigFlags(cmd *cobra.Command) {
fs := cmd.Flags()
fs.BoolVar(&ArchiveFV, ArchiveFN, false, "Export data as an archive instead of individual files")
}

View File

@ -5,6 +5,7 @@ import (
) )
const ( const (
DeltaPageSizeFN = "delta-page-size"
DisableConcurrencyLimiterFN = "disable-concurrency-limiter" DisableConcurrencyLimiterFN = "disable-concurrency-limiter"
DisableDeltaFN = "disable-delta" DisableDeltaFN = "disable-delta"
DisableIncrementalsFN = "disable-incrementals" DisableIncrementalsFN = "disable-incrementals"
@ -21,6 +22,7 @@ const (
) )
var ( var (
DeltaPageSizeFV int
DisableConcurrencyLimiterFV bool DisableConcurrencyLimiterFV bool
DisableDeltaFV bool DisableDeltaFV bool
DisableIncrementalsFV bool DisableIncrementalsFV bool
@ -72,6 +74,18 @@ func AddSkipReduceFlag(cmd *cobra.Command) {
cobra.CheckErr(fs.MarkHidden(SkipReduceFN)) cobra.CheckErr(fs.MarkHidden(SkipReduceFN))
} }
// AddDeltaPageSizeFlag adds a hidden flag that allows callers to reduce delta
// query page sizes below 500.
func AddDeltaPageSizeFlag(cmd *cobra.Command) {
fs := cmd.Flags()
fs.IntVar(
&DeltaPageSizeFV,
DeltaPageSizeFN,
500,
"Control quantity of items returned in paged queries. Valid range is [1-500]. Default: 500")
cobra.CheckErr(fs.MarkHidden(DeltaPageSizeFN))
}
// AddFetchParallelismFlag adds a hidden flag that allows callers to reduce call // AddFetchParallelismFlag adds a hidden flag that allows callers to reduce call
// paralellism (ie, the corso worker pool size) from 4 to as low as 1. // paralellism (ie, the corso worker pool size) from 4 to as low as 1.
func AddFetchParallelismFlag(cmd *cobra.Command) { func AddFetchParallelismFlag(cmd *cobra.Command) {

View File

@ -9,11 +9,13 @@ import (
const ( const (
CollisionsFN = "collisions" CollisionsFN = "collisions"
DestinationFN = "destination" DestinationFN = "destination"
ToResourceFN = "to-resource"
) )
var ( var (
CollisionsFV string CollisionsFV string
DestinationFV string DestinationFV string
ToResourceFV string
) )
// AddRestoreConfigFlags adds the restore config flag set. // AddRestoreConfigFlags adds the restore config flag set.
@ -25,5 +27,8 @@ func AddRestoreConfigFlags(cmd *cobra.Command) {
"Sets the behavior for existing item collisions: "+string(control.Skip)+", "+string(control.Copy)+", or "+string(control.Replace)) "Sets the behavior for existing item collisions: "+string(control.Skip)+", "+string(control.Copy)+", or "+string(control.Replace))
fs.StringVar( fs.StringVar(
&DestinationFV, DestinationFN, "", &DestinationFV, DestinationFN, "",
"Overrides the destination where items get restored; '/' places items into their original location") "Overrides the folder where items get restored; '/' places items into their original location")
fs.StringVar(
&ToResourceFV, ToResourceFN, "",
"Overrides the protected resource (mailbox, site, user, etc) where data gets restored")
} }

View File

@ -15,6 +15,7 @@ import (
"github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/events" "github.com/alcionai/corso/src/internal/events"
"github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/account"
rep "github.com/alcionai/corso/src/pkg/control/repository"
"github.com/alcionai/corso/src/pkg/credentials" "github.com/alcionai/corso/src/pkg/credentials"
"github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/repository"
"github.com/alcionai/corso/src/pkg/storage" "github.com/alcionai/corso/src/pkg/storage"
@ -158,7 +159,13 @@ func initS3Cmd(cmd *cobra.Command, args []string) error {
return Only(ctx, clues.Wrap(err, "Failed to parse m365 account config")) return Only(ctx, clues.Wrap(err, "Failed to parse m365 account config"))
} }
r, err := repository.Initialize(ctx, cfg.Account, cfg.Storage, opt) // TODO(ashmrtn): Wire to flags for retention during repo init.
r, err := repository.Initialize(
ctx,
cfg.Account,
cfg.Storage,
opt,
rep.Retention{})
if err != nil { if err != nil {
if succeedIfExists && errors.Is(err, repository.ErrorRepoAlreadyExists) { if succeedIfExists && errors.Is(err, repository.ErrorRepoAlreadyExists) {
return nil return nil

View File

@ -16,6 +16,7 @@ import (
"github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
"github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/repository"
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata" storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
) )
@ -200,7 +201,12 @@ func (suite *S3E2ESuite) TestConnectS3Cmd() {
ctx = config.SetViper(ctx, vpr) ctx = config.SetViper(ctx, vpr)
// init the repo first // init the repo first
_, err = repository.Initialize(ctx, account.Account{}, st, control.Defaults()) _, err = repository.Initialize(
ctx,
account.Account{},
st,
control.DefaultOptions(),
ctrlRepo.Retention{})
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
// then test it // then test it

View File

@ -20,6 +20,7 @@ import (
"github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/repository"
"github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/selectors"
@ -83,7 +84,12 @@ func (suite *RestoreExchangeE2ESuite) SetupSuite() {
) )
// init the repo first // init the repo first
suite.repo, err = repository.Initialize(ctx, suite.acct, suite.st, control.Options{}) suite.repo, err = repository.Initialize(
ctx,
suite.acct,
suite.st,
control.Options{},
ctrlRepo.Retention{})
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
suite.backupOps = make(map[path.CategoryType]operations.BackupOperation) suite.backupOps = make(map[path.CategoryType]operations.BackupOperation)

View File

@ -84,6 +84,7 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() {
"--" + flags.CollisionsFN, testdata.Collisions, "--" + flags.CollisionsFN, testdata.Collisions,
"--" + flags.DestinationFN, testdata.Destination, "--" + flags.DestinationFN, testdata.Destination,
"--" + flags.ToResourceFN, testdata.ToResource,
"--" + flags.AWSAccessKeyFN, testdata.AWSAccessKeyID, "--" + flags.AWSAccessKeyFN, testdata.AWSAccessKeyID,
"--" + flags.AWSSecretAccessKeyFN, testdata.AWSSecretAccessKey, "--" + flags.AWSSecretAccessKeyFN, testdata.AWSSecretAccessKey,
@ -125,6 +126,7 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() {
assert.Equal(t, testdata.Collisions, opts.RestoreCfg.Collisions) assert.Equal(t, testdata.Collisions, opts.RestoreCfg.Collisions)
assert.Equal(t, testdata.Destination, opts.RestoreCfg.Destination) assert.Equal(t, testdata.Destination, opts.RestoreCfg.Destination)
assert.Equal(t, testdata.ToResource, opts.RestoreCfg.ProtectedResource)
assert.Equal(t, testdata.AWSAccessKeyID, flags.AWSAccessKeyFV) assert.Equal(t, testdata.AWSAccessKeyID, flags.AWSAccessKeyFV)
assert.Equal(t, testdata.AWSSecretAccessKey, flags.AWSSecretAccessKeyFV) assert.Equal(t, testdata.AWSSecretAccessKey, flags.AWSSecretAccessKeyFV)

View File

@ -70,6 +70,7 @@ func (suite *OneDriveUnitSuite) TestAddOneDriveCommands() {
"--" + flags.CollisionsFN, testdata.Collisions, "--" + flags.CollisionsFN, testdata.Collisions,
"--" + flags.DestinationFN, testdata.Destination, "--" + flags.DestinationFN, testdata.Destination,
"--" + flags.ToResourceFN, testdata.ToResource,
"--" + flags.AWSAccessKeyFN, testdata.AWSAccessKeyID, "--" + flags.AWSAccessKeyFN, testdata.AWSAccessKeyID,
"--" + flags.AWSSecretAccessKeyFN, testdata.AWSSecretAccessKey, "--" + flags.AWSSecretAccessKeyFN, testdata.AWSSecretAccessKey,
@ -80,6 +81,9 @@ func (suite *OneDriveUnitSuite) TestAddOneDriveCommands() {
"--" + flags.AzureClientSecretFN, testdata.AzureClientSecret, "--" + flags.AzureClientSecretFN, testdata.AzureClientSecret,
"--" + flags.CorsoPassphraseFN, testdata.CorsoPassphrase, "--" + flags.CorsoPassphraseFN, testdata.CorsoPassphrase,
// bool flags
"--" + flags.RestorePermissionsFN,
}) })
cmd.SetOut(new(bytes.Buffer)) // drop output cmd.SetOut(new(bytes.Buffer)) // drop output
@ -99,6 +103,7 @@ func (suite *OneDriveUnitSuite) TestAddOneDriveCommands() {
assert.Equal(t, testdata.Collisions, opts.RestoreCfg.Collisions) assert.Equal(t, testdata.Collisions, opts.RestoreCfg.Collisions)
assert.Equal(t, testdata.Destination, opts.RestoreCfg.Destination) assert.Equal(t, testdata.Destination, opts.RestoreCfg.Destination)
assert.Equal(t, testdata.ToResource, opts.RestoreCfg.ProtectedResource)
assert.Equal(t, testdata.AWSAccessKeyID, flags.AWSAccessKeyFV) assert.Equal(t, testdata.AWSAccessKeyID, flags.AWSAccessKeyFV)
assert.Equal(t, testdata.AWSSecretAccessKey, flags.AWSSecretAccessKeyFV) assert.Equal(t, testdata.AWSSecretAccessKey, flags.AWSSecretAccessKeyFV)
@ -109,6 +114,7 @@ func (suite *OneDriveUnitSuite) TestAddOneDriveCommands() {
assert.Equal(t, testdata.AzureClientSecret, flags.AzureClientSecretFV) assert.Equal(t, testdata.AzureClientSecret, flags.AzureClientSecretFV)
assert.Equal(t, testdata.CorsoPassphrase, flags.CorsoPassphraseFV) assert.Equal(t, testdata.CorsoPassphrase, flags.CorsoPassphraseFV)
assert.True(t, flags.RestorePermissionsFV)
}) })
} }
} }

View File

@ -34,7 +34,7 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
expectShort string expectShort string
expectRunE func(*cobra.Command, []string) error expectRunE func(*cobra.Command, []string) error
}{ }{
{"restore onedrive", restoreCommand, expectUse, sharePointRestoreCmd().Short, restoreSharePointCmd}, {"restore sharepoint", restoreCommand, expectUse, sharePointRestoreCmd().Short, restoreSharePointCmd},
} }
for _, test := range table { for _, test := range table {
suite.Run(test.name, func() { suite.Run(test.name, func() {
@ -75,6 +75,7 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
"--" + flags.CollisionsFN, testdata.Collisions, "--" + flags.CollisionsFN, testdata.Collisions,
"--" + flags.DestinationFN, testdata.Destination, "--" + flags.DestinationFN, testdata.Destination,
"--" + flags.ToResourceFN, testdata.ToResource,
"--" + flags.AWSAccessKeyFN, testdata.AWSAccessKeyID, "--" + flags.AWSAccessKeyFN, testdata.AWSAccessKeyID,
"--" + flags.AWSSecretAccessKeyFN, testdata.AWSSecretAccessKey, "--" + flags.AWSSecretAccessKeyFN, testdata.AWSSecretAccessKey,
@ -85,6 +86,9 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
"--" + flags.AzureClientSecretFN, testdata.AzureClientSecret, "--" + flags.AzureClientSecretFN, testdata.AzureClientSecret,
"--" + flags.CorsoPassphraseFN, testdata.CorsoPassphrase, "--" + flags.CorsoPassphraseFN, testdata.CorsoPassphrase,
// bool flags
"--" + flags.RestorePermissionsFN,
}) })
cmd.SetOut(new(bytes.Buffer)) // drop output cmd.SetOut(new(bytes.Buffer)) // drop output
@ -111,6 +115,7 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
assert.Equal(t, testdata.Collisions, opts.RestoreCfg.Collisions) assert.Equal(t, testdata.Collisions, opts.RestoreCfg.Collisions)
assert.Equal(t, testdata.Destination, opts.RestoreCfg.Destination) assert.Equal(t, testdata.Destination, opts.RestoreCfg.Destination)
assert.Equal(t, testdata.ToResource, opts.RestoreCfg.ProtectedResource)
assert.Equal(t, testdata.AWSAccessKeyID, flags.AWSAccessKeyFV) assert.Equal(t, testdata.AWSAccessKeyID, flags.AWSAccessKeyFV)
assert.Equal(t, testdata.AWSSecretAccessKey, flags.AWSSecretAccessKeyFV) assert.Equal(t, testdata.AWSSecretAccessKey, flags.AWSSecretAccessKeyFV)
@ -121,6 +126,9 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
assert.Equal(t, testdata.AzureClientSecret, flags.AzureClientSecretFV) assert.Equal(t, testdata.AzureClientSecret, flags.AzureClientSecretFV)
assert.Equal(t, testdata.CorsoPassphrase, flags.CorsoPassphraseFV) assert.Equal(t, testdata.CorsoPassphrase, flags.CorsoPassphraseFV)
// bool flags
assert.True(t, flags.RestorePermissionsFV)
}) })
} }
} }

View File

@ -0,0 +1,38 @@
package utils
import (
"context"
"github.com/spf13/cobra"
"github.com/alcionai/corso/src/cli/flags"
"github.com/alcionai/corso/src/pkg/control"
)
type ExportCfgOpts struct {
Archive bool
Populated flags.PopulatedFlags
}
func makeExportCfgOpts(cmd *cobra.Command) ExportCfgOpts {
return ExportCfgOpts{
Archive: flags.ArchiveFV,
// populated contains the list of flags that appear in the
// command, according to pflags. Use this to differentiate
// between an "empty" and a "missing" value.
Populated: flags.GetPopulatedFlags(cmd),
}
}
func MakeExportConfig(
ctx context.Context,
opts ExportCfgOpts,
) control.ExportConfig {
exportCfg := control.DefaultExportConfig()
exportCfg.Archive = opts.Archive
return exportCfg
}

View File

@ -0,0 +1,54 @@
package utils
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/cli/flags"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/control"
)
type ExportCfgUnitSuite struct {
tester.Suite
}
func TestExportCfgUnitSuite(t *testing.T) {
suite.Run(t, &ExportCfgUnitSuite{Suite: tester.NewUnitSuite(t)})
}
func (suite *ExportCfgUnitSuite) TestMakeExportConfig() {
rco := &ExportCfgOpts{Archive: true}
table := []struct {
name string
populated flags.PopulatedFlags
expect control.ExportConfig
}{
{
name: "archive populated",
populated: flags.PopulatedFlags{
flags.ArchiveFN: {},
},
expect: control.ExportConfig{
Archive: true,
},
},
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
opts := *rco
opts.Populated = test.populated
result := MakeExportConfig(ctx, opts)
assert.Equal(t, test.expect.Archive, result.Archive)
})
}
}

View File

@ -19,6 +19,7 @@ type OneDriveOpts struct {
FileModifiedBefore string FileModifiedBefore string
RestoreCfg RestoreCfgOpts RestoreCfg RestoreCfgOpts
ExportCfg ExportCfgOpts
Populated flags.PopulatedFlags Populated flags.PopulatedFlags
} }
@ -35,6 +36,7 @@ func MakeOneDriveOpts(cmd *cobra.Command) OneDriveOpts {
FileModifiedBefore: flags.FileModifiedBeforeFV, FileModifiedBefore: flags.FileModifiedBeforeFV,
RestoreCfg: makeRestoreCfgOpts(cmd), RestoreCfg: makeRestoreCfgOpts(cmd),
ExportCfg: makeExportCfgOpts(cmd),
// populated contains the list of flags that appear in the // populated contains the list of flags that appear in the
// command, according to pflags. Use this to differentiate // command, according to pflags. Use this to differentiate

View File

@ -8,14 +8,19 @@ import (
// Control produces the control options based on the user's flags. // Control produces the control options based on the user's flags.
func Control() control.Options { func Control() control.Options {
opt := control.Defaults() opt := control.DefaultOptions()
if flags.FailFastFV { if flags.FailFastFV {
opt.FailureHandling = control.FailFast opt.FailureHandling = control.FailFast
} }
dps := int32(flags.DeltaPageSizeFV)
if dps > 500 || dps < 1 {
dps = 500
}
opt.DeltaPageSize = dps
opt.DisableMetrics = flags.NoStatsFV opt.DisableMetrics = flags.NoStatsFV
opt.RestorePermissions = flags.RestorePermissionsFV
opt.SkipReduce = flags.SkipReduceFV opt.SkipReduce = flags.SkipReduceFV
opt.ToggleFeatures.DisableIncrementals = flags.DisableIncrementalsFV opt.ToggleFeatures.DisableIncrementals = flags.DisableIncrementalsFV
opt.ToggleFeatures.DisableDelta = flags.DisableDeltaFV opt.ToggleFeatures.DisableDelta = flags.DisableDeltaFV

View File

@ -35,6 +35,7 @@ func (suite *OptionsUnitSuite) TestAddExchangeCommands() {
assert.True(t, flags.SkipReduceFV, flags.SkipReduceFN) assert.True(t, flags.SkipReduceFV, flags.SkipReduceFN)
assert.Equal(t, 2, flags.FetchParallelismFV, flags.FetchParallelismFN) assert.Equal(t, 2, flags.FetchParallelismFV, flags.FetchParallelismFN)
assert.True(t, flags.DisableConcurrencyLimiterFV, flags.DisableConcurrencyLimiterFN) assert.True(t, flags.DisableConcurrencyLimiterFV, flags.DisableConcurrencyLimiterFN)
assert.Equal(t, 499, flags.DeltaPageSizeFV, flags.DeltaPageSizeFN)
}, },
} }
@ -48,6 +49,7 @@ func (suite *OptionsUnitSuite) TestAddExchangeCommands() {
flags.AddSkipReduceFlag(cmd) flags.AddSkipReduceFlag(cmd)
flags.AddFetchParallelismFlag(cmd) flags.AddFetchParallelismFlag(cmd)
flags.AddDisableConcurrencyLimiterFlag(cmd) flags.AddDisableConcurrencyLimiterFlag(cmd)
flags.AddDeltaPageSizeFlag(cmd)
// Test arg parsing for few args // Test arg parsing for few args
cmd.SetArgs([]string{ cmd.SetArgs([]string{
@ -60,6 +62,7 @@ func (suite *OptionsUnitSuite) TestAddExchangeCommands() {
"--" + flags.SkipReduceFN, "--" + flags.SkipReduceFN,
"--" + flags.FetchParallelismFN, "2", "--" + flags.FetchParallelismFN, "2",
"--" + flags.DisableConcurrencyLimiterFN, "--" + flags.DisableConcurrencyLimiterFN,
"--" + flags.DeltaPageSizeFN, "499",
}) })
err := cmd.Execute() err := cmd.Execute()

View File

@ -18,16 +18,20 @@ type RestoreCfgOpts struct {
// DTTMFormat is the timestamp format appended // DTTMFormat is the timestamp format appended
// to the default folder name. Defaults to // to the default folder name. Defaults to
// dttm.HumanReadable. // dttm.HumanReadable.
DTTMFormat dttm.TimeFormat DTTMFormat dttm.TimeFormat
ProtectedResource string
RestorePermissions bool
Populated flags.PopulatedFlags Populated flags.PopulatedFlags
} }
func makeRestoreCfgOpts(cmd *cobra.Command) RestoreCfgOpts { func makeRestoreCfgOpts(cmd *cobra.Command) RestoreCfgOpts {
return RestoreCfgOpts{ return RestoreCfgOpts{
Collisions: flags.CollisionsFV, Collisions: flags.CollisionsFV,
Destination: flags.DestinationFV, Destination: flags.DestinationFV,
DTTMFormat: dttm.HumanReadable, DTTMFormat: dttm.HumanReadable,
ProtectedResource: flags.ToResourceFV,
RestorePermissions: flags.RestorePermissionsFV,
// populated contains the list of flags that appear in the // populated contains the list of flags that appear in the
// command, according to pflags. Use this to differentiate // command, according to pflags. Use this to differentiate
@ -67,6 +71,9 @@ func MakeRestoreConfig(
restoreCfg.Location = opts.Destination restoreCfg.Location = opts.Destination
} }
restoreCfg.ProtectedResource = opts.ProtectedResource
restoreCfg.IncludePermissions = opts.RestorePermissions
Infof(ctx, "Restoring to folder %s", restoreCfg.Location) Infof(ctx, "Restoring to folder %s", restoreCfg.Location)
return restoreCfg return restoreCfg

View File

@ -68,18 +68,18 @@ func (suite *RestoreCfgUnitSuite) TestValidateRestoreConfigFlags() {
} }
func (suite *RestoreCfgUnitSuite) TestMakeRestoreConfig() { func (suite *RestoreCfgUnitSuite) TestMakeRestoreConfig() {
rco := &RestoreCfgOpts{
Collisions: "collisions",
Destination: "destination",
}
table := []struct { table := []struct {
name string name string
rco *RestoreCfgOpts
populated flags.PopulatedFlags populated flags.PopulatedFlags
expect control.RestoreConfig expect control.RestoreConfig
}{ }{
{ {
name: "not populated", name: "not populated",
rco: &RestoreCfgOpts{
Collisions: "collisions",
Destination: "destination",
},
populated: flags.PopulatedFlags{}, populated: flags.PopulatedFlags{},
expect: control.RestoreConfig{ expect: control.RestoreConfig{
OnCollision: control.Skip, OnCollision: control.Skip,
@ -88,6 +88,10 @@ func (suite *RestoreCfgUnitSuite) TestMakeRestoreConfig() {
}, },
{ {
name: "collision populated", name: "collision populated",
rco: &RestoreCfgOpts{
Collisions: "collisions",
Destination: "destination",
},
populated: flags.PopulatedFlags{ populated: flags.PopulatedFlags{
flags.CollisionsFN: {}, flags.CollisionsFN: {},
}, },
@ -98,6 +102,10 @@ func (suite *RestoreCfgUnitSuite) TestMakeRestoreConfig() {
}, },
{ {
name: "destination populated", name: "destination populated",
rco: &RestoreCfgOpts{
Collisions: "collisions",
Destination: "destination",
},
populated: flags.PopulatedFlags{ populated: flags.PopulatedFlags{
flags.DestinationFN: {}, flags.DestinationFN: {},
}, },
@ -108,6 +116,10 @@ func (suite *RestoreCfgUnitSuite) TestMakeRestoreConfig() {
}, },
{ {
name: "both populated", name: "both populated",
rco: &RestoreCfgOpts{
Collisions: "collisions",
Destination: "destination",
},
populated: flags.PopulatedFlags{ populated: flags.PopulatedFlags{
flags.CollisionsFN: {}, flags.CollisionsFN: {},
flags.DestinationFN: {}, flags.DestinationFN: {},
@ -117,6 +129,23 @@ func (suite *RestoreCfgUnitSuite) TestMakeRestoreConfig() {
Location: "destination", Location: "destination",
}, },
}, },
{
name: "with restore permissions",
rco: &RestoreCfgOpts{
Collisions: "collisions",
Destination: "destination",
RestorePermissions: true,
},
populated: flags.PopulatedFlags{
flags.CollisionsFN: {},
flags.DestinationFN: {},
},
expect: control.RestoreConfig{
OnCollision: control.CollisionPolicy("collisions"),
Location: "destination",
IncludePermissions: true,
},
},
} }
for _, test := range table { for _, test := range table {
suite.Run(test.name, func() { suite.Run(test.name, func() {
@ -125,12 +154,13 @@ func (suite *RestoreCfgUnitSuite) TestMakeRestoreConfig() {
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
defer flush() defer flush()
opts := *rco opts := *test.rco
opts.Populated = test.populated opts.Populated = test.populated
result := MakeRestoreConfig(ctx, opts) result := MakeRestoreConfig(ctx, opts)
assert.Equal(t, test.expect.OnCollision, result.OnCollision) assert.Equal(t, test.expect.OnCollision, result.OnCollision)
assert.Contains(t, result.Location, test.expect.Location) assert.Contains(t, result.Location, test.expect.Location)
assert.Equal(t, test.expect.IncludePermissions, result.IncludePermissions)
}) })
} }
} }

View File

@ -32,6 +32,7 @@ type SharePointOpts struct {
Page []string Page []string
RestoreCfg RestoreCfgOpts RestoreCfg RestoreCfgOpts
ExportCfg ExportCfgOpts
Populated flags.PopulatedFlags Populated flags.PopulatedFlags
} }
@ -56,6 +57,7 @@ func MakeSharePointOpts(cmd *cobra.Command) SharePointOpts {
PageFolder: flags.PageFolderFV, PageFolder: flags.PageFolderFV,
RestoreCfg: makeRestoreCfgOpts(cmd), RestoreCfg: makeRestoreCfgOpts(cmd),
ExportCfg: makeExportCfgOpts(cmd),
// populated contains the list of flags that appear in the // populated contains the list of flags that appear in the
// command, according to pflags. Use this to differentiate // command, according to pflags. Use this to differentiate

View File

@ -46,8 +46,13 @@ var (
Collisions = "collisions" Collisions = "collisions"
Destination = "destination" Destination = "destination"
ToResource = "toResource"
RestorePermissions = true RestorePermissions = true
DeltaPageSize = "deltaPageSize"
Archive = true
AzureClientID = "testAzureClientId" AzureClientID = "testAzureClientId"
AzureTenantID = "testAzureTenantId" AzureTenantID = "testAzureTenantId"
AzureClientSecret = "testAzureClientSecret" AzureClientSecret = "testAzureClientSecret"
@ -57,4 +62,6 @@ var (
AWSSessionToken = "testAWSSessionToken" AWSSessionToken = "testAWSSessionToken"
CorsoPassphrase = "testCorsoPassphrase" CorsoPassphrase = "testCorsoPassphrase"
RestoreDestination = "test-restore-destination"
) )

View File

@ -7,6 +7,7 @@ import (
"github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/internal/common/idname"
"github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/services/m365/api" "github.com/alcionai/corso/src/pkg/services/m365/api"
) )
@ -15,9 +16,10 @@ import (
func UsersMap( func UsersMap(
ctx context.Context, ctx context.Context,
acct account.Account, acct account.Account,
co control.Options,
errs *fault.Bus, errs *fault.Bus,
) (idname.Cacher, error) { ) (idname.Cacher, error) {
au, err := makeUserAPI(acct) au, err := makeUserAPI(acct, co)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "constructing a graph client") return nil, clues.Wrap(err, "constructing a graph client")
} }
@ -25,13 +27,13 @@ func UsersMap(
return au.GetAllIDsAndNames(ctx, errs) return au.GetAllIDsAndNames(ctx, errs)
} }
func makeUserAPI(acct account.Account) (api.Users, error) { func makeUserAPI(acct account.Account, co control.Options) (api.Users, error) {
creds, err := acct.M365Config() creds, err := acct.M365Config()
if err != nil { if err != nil {
return api.Users{}, clues.Wrap(err, "getting m365 account creds") return api.Users{}, clues.Wrap(err, "getting m365 account creds")
} }
cli, err := api.NewClient(creds) cli, err := api.NewClient(creds, co)
if err != nil { if err != nil {
return api.Users{}, clues.Wrap(err, "constructing api client") return api.Users{}, clues.Wrap(err, "constructing api client")
} }

View File

@ -21,12 +21,12 @@ import (
odStub "github.com/alcionai/corso/src/internal/m365/onedrive/stub" odStub "github.com/alcionai/corso/src/internal/m365/onedrive/stub"
"github.com/alcionai/corso/src/internal/m365/resource" "github.com/alcionai/corso/src/internal/m365/resource"
m365Stub "github.com/alcionai/corso/src/internal/m365/stub" m365Stub "github.com/alcionai/corso/src/internal/m365/stub"
"github.com/alcionai/corso/src/internal/operations/inject"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/version" "github.com/alcionai/corso/src/internal/version"
"github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/control/testdata"
"github.com/alcionai/corso/src/pkg/count" "github.com/alcionai/corso/src/pkg/count"
"github.com/alcionai/corso/src/pkg/credentials" "github.com/alcionai/corso/src/pkg/credentials"
"github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/fault"
@ -104,7 +104,15 @@ func generateAndRestoreItems(
print.Infof(ctx, "Generating %d %s items in %s\n", howMany, cat, Destination) print.Infof(ctx, "Generating %d %s items in %s\n", howMany, cat, Destination)
return ctrl.ConsumeRestoreCollections(ctx, version.Backup, sel, restoreCfg, opts, dataColls, errs, ctr) rcc := inject.RestoreConsumerConfig{
BackupVersion: version.Backup,
Options: opts,
ProtectedResource: sel,
RestoreConfig: restoreCfg,
Selector: sel,
}
return ctrl.ConsumeRestoreCollections(ctx, rcc, dataColls, errs, ctr)
} }
// ------------------------------------------------------------------------------------------ // ------------------------------------------------------------------------------------------
@ -144,7 +152,7 @@ func getControllerAndVerifyResourceOwner(
return nil, account.Account{}, nil, clues.Wrap(err, "connecting to graph api") return nil, account.Account{}, nil, clues.Wrap(err, "connecting to graph api")
} }
id, _, err := ctrl.PopulateOwnerIDAndNamesFrom(ctx, resourceOwner, nil) id, _, err := ctrl.PopulateProtectedResourceIDAndName(ctx, resourceOwner, nil)
if err != nil { if err != nil {
return nil, account.Account{}, nil, clues.Wrap(err, "verifying user") return nil, account.Account{}, nil, clues.Wrap(err, "verifying user")
} }
@ -216,7 +224,8 @@ var (
func generateAndRestoreDriveItems( func generateAndRestoreDriveItems(
ctrl *m365.Controller, ctrl *m365.Controller,
resourceOwner, secondaryUserID, secondaryUserName string, protectedResource idname.Provider,
secondaryUserID, secondaryUserName string,
acct account.Account, acct account.Account,
service path.ServiceType, service path.ServiceType,
cat path.CategoryType, cat path.CategoryType,
@ -240,14 +249,23 @@ func generateAndRestoreDriveItems(
switch service { switch service {
case path.SharePointService: case path.SharePointService:
d, err := ctrl.AC.Stable.Client().Sites().BySiteId(resourceOwner).Drive().Get(ctx, nil) d, err := ctrl.AC.Stable.
Client().
Sites().
BySiteId(protectedResource.ID()).
Drive().
Get(ctx, nil)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "getting site's default drive") return nil, clues.Wrap(err, "getting site's default drive")
} }
driveID = ptr.Val(d.GetId()) driveID = ptr.Val(d.GetId())
default: default:
d, err := ctrl.AC.Stable.Client().Users().ByUserId(resourceOwner).Drive().Get(ctx, nil) d, err := ctrl.AC.Stable.Client().
Users().
ByUserId(protectedResource.ID()).
Drive().
Get(ctx, nil)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "getting user's default drive") return nil, clues.Wrap(err, "getting user's default drive")
} }
@ -407,18 +425,16 @@ func generateAndRestoreDriveItems(
// input, // input,
// version.Backup) // version.Backup)
opts := control.Options{ opts := control.DefaultOptions()
RestorePermissions: true, restoreCfg.IncludePermissions = true
ToggleFeatures: control.Toggles{},
}
config := m365Stub.ConfigInfo{ config := m365Stub.ConfigInfo{
Opts: opts, Opts: opts,
Resource: resource.Users, Resource: resource.Users,
Service: service, Service: service,
Tenant: tenantID, Tenant: tenantID,
ResourceOwners: []string{resourceOwner}, ResourceOwners: []string{protectedResource.ID()},
RestoreCfg: testdata.DefaultRestoreConfig(""), RestoreCfg: restoreCfg,
} }
_, _, collections, _, err := m365Stub.GetCollectionsAndExpected( _, _, collections, _, err := m365Stub.GetCollectionsAndExpected(
@ -429,5 +445,13 @@ func generateAndRestoreDriveItems(
return nil, err return nil, err
} }
return ctrl.ConsumeRestoreCollections(ctx, version.Backup, sel, restoreCfg, opts, collections, errs, ctr) rcc := inject.RestoreConsumerConfig{
BackupVersion: version.Backup,
Options: opts,
ProtectedResource: protectedResource,
RestoreConfig: restoreCfg,
Selector: sel,
}
return ctrl.ConsumeRestoreCollections(ctx, rcc, collections, errs, ctr)
} }

View File

@ -72,7 +72,7 @@ func handleExchangeEmailFactory(cmd *cobra.Command, args []string) error {
subject, body, body, subject, body, body,
now, now, now, now) now, now, now, now)
}, },
control.Defaults(), control.DefaultOptions(),
errs, errs,
count.New()) count.New())
if err != nil { if err != nil {
@ -121,7 +121,7 @@ func handleExchangeCalendarEventFactory(cmd *cobra.Command, args []string) error
exchMock.NoAttachments, exchMock.NoCancelledOccurrences, exchMock.NoAttachments, exchMock.NoCancelledOccurrences,
exchMock.NoExceptionOccurrences) exchMock.NoExceptionOccurrences)
}, },
control.Defaults(), control.DefaultOptions(),
errs, errs,
count.New()) count.New())
if err != nil { if err != nil {
@ -172,7 +172,7 @@ func handleExchangeContactFactory(cmd *cobra.Command, args []string) error {
"123-456-7890", "123-456-7890",
) )
}, },
control.Defaults(), control.DefaultOptions(),
errs, errs,
count.New()) count.New())
if err != nil { if err != nil {

View File

@ -47,7 +47,7 @@ func handleOneDriveFileFactory(cmd *cobra.Command, args []string) error {
deets, err := generateAndRestoreDriveItems( deets, err := generateAndRestoreDriveItems(
ctrl, ctrl,
inp.ID(), inp,
SecondaryUser, SecondaryUser,
strings.ToLower(SecondaryUser), strings.ToLower(SecondaryUser),
acct, acct,

View File

@ -47,7 +47,7 @@ func handleSharePointLibraryFileFactory(cmd *cobra.Command, args []string) error
deets, err := generateAndRestoreDriveItems( deets, err := generateAndRestoreDriveItems(
ctrl, ctrl,
inp.ID(), inp,
SecondaryUser, SecondaryUser,
strings.ToLower(SecondaryUser), strings.ToLower(SecondaryUser),
acct, acct,

View File

@ -1,157 +0,0 @@
// get_item.go is a source file designed to retrieve an m365 object from an
// existing M365 account. Data displayed is representative of the current
// serialization abstraction versioning used by Microsoft Graph and stored by Corso.
package exchange
import (
"context"
"fmt"
"os"
"github.com/alcionai/clues"
"github.com/microsoft/kiota-abstractions-go/serialization"
kw "github.com/microsoft/kiota-serialization-json-go"
"github.com/spf13/cobra"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/common/str"
"github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/credentials"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/services/m365/api"
)
// Required inputs from user for command execution
var (
user, tenant, m365ID, category string
)
func AddCommands(parent *cobra.Command) {
exCmd := &cobra.Command{
Use: "exchange",
Short: "Get an M365ID item JSON",
RunE: handleExchangeCmd,
}
fs := exCmd.PersistentFlags()
fs.StringVar(&m365ID, "id", "", "m365 identifier for object")
fs.StringVar(&category, "category", "", "type of M365 data (contacts, email, events)")
fs.StringVar(&user, "user", "", "m365 user id of M365 user")
fs.StringVar(&tenant, "tenant", "", "m365 identifier for the tenant")
cobra.CheckErr(exCmd.MarkPersistentFlagRequired("user"))
cobra.CheckErr(exCmd.MarkPersistentFlagRequired("id"))
cobra.CheckErr(exCmd.MarkPersistentFlagRequired("category"))
parent.AddCommand(exCmd)
}
func handleExchangeCmd(cmd *cobra.Command, args []string) error {
if utils.HasNoFlagsAndShownHelp(cmd) {
return nil
}
tid := str.First(tenant, os.Getenv(account.AzureTenantID))
ctx := clues.Add(
cmd.Context(),
"item_id", m365ID,
"resource_owner", user,
"tenant", tid)
creds := account.M365Config{
M365: credentials.GetM365(),
AzureTenantID: tid,
}
err := runDisplayM365JSON(ctx, creds, user, m365ID, fault.New(true))
if err != nil {
cmd.SilenceUsage = true
cmd.SilenceErrors = true
return clues.Wrap(err, "getting item")
}
return nil
}
func runDisplayM365JSON(
ctx context.Context,
creds account.M365Config,
user, itemID string,
errs *fault.Bus,
) error {
var (
bs []byte
err error
cat = path.ToCategoryType(category)
sw = kw.NewJsonSerializationWriter()
)
ac, err := api.NewClient(creds)
if err != nil {
return err
}
switch cat {
case path.EmailCategory:
bs, err = getItem(ctx, ac.Mail(), user, itemID, true, errs)
case path.EventsCategory:
bs, err = getItem(ctx, ac.Events(), user, itemID, true, errs)
case path.ContactsCategory:
bs, err = getItem(ctx, ac.Contacts(), user, itemID, true, errs)
default:
return fmt.Errorf("unable to process category: %s", cat)
}
if err != nil {
return err
}
err = sw.WriteStringValue("", ptr.To(string(bs)))
if err != nil {
return clues.Wrap(err, "Error writing string value: "+itemID)
}
array, err := sw.GetSerializedContent()
if err != nil {
return clues.Wrap(err, "Error serializing item: "+itemID)
}
fmt.Println(string(array))
return nil
}
type itemer interface {
GetItem(
ctx context.Context,
user, itemID string,
immutableID bool,
errs *fault.Bus,
) (serialization.Parsable, *details.ExchangeInfo, error)
Serialize(
ctx context.Context,
item serialization.Parsable,
user, itemID string,
) ([]byte, error)
}
func getItem(
ctx context.Context,
itm itemer,
user, itemID string,
immutableIDs bool,
errs *fault.Bus,
) ([]byte, error) {
sp, _, err := itm.GetItem(ctx, user, itemID, immutableIDs, errs)
if err != nil {
return nil, clues.Wrap(err, "getting item")
}
return itm.Serialize(ctx, sp, user, itemID)
}

View File

@ -1,36 +0,0 @@
package main
import (
"context"
"os"
"github.com/spf13/cobra"
. "github.com/alcionai/corso/src/cli/print"
"github.com/alcionai/corso/src/cmd/getM365/exchange"
"github.com/alcionai/corso/src/cmd/getM365/onedrive"
"github.com/alcionai/corso/src/pkg/logger"
)
var rootCmd = &cobra.Command{
Use: "getM365",
}
func main() {
ls := logger.Settings{
Level: logger.LLDebug,
Format: logger.LFText,
}
ctx, _ := logger.CtxOrSeed(context.Background(), ls)
ctx = SetRootCmd(ctx, rootCmd)
defer logger.Flush(ctx)
exchange.AddCommands(rootCmd)
onedrive.AddCommands(rootCmd)
if err := rootCmd.Execute(); err != nil {
Err(ctx, err)
os.Exit(1)
}
}

View File

@ -1,207 +0,0 @@
// get_item.go is a source file designed to retrieve an m365 object from an
// existing M365 account. Data displayed is representative of the current
// serialization abstraction versioning used by Microsoft Graph and stored by Corso.
package onedrive
import (
"context"
"encoding/json"
"io"
"net/http"
"os"
"github.com/alcionai/clues"
"github.com/microsoft/kiota-abstractions-go/serialization"
kjson "github.com/microsoft/kiota-serialization-json-go"
"github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/spf13/cobra"
. "github.com/alcionai/corso/src/cli/print"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/common/str"
"github.com/alcionai/corso/src/internal/m365/graph"
"github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/credentials"
"github.com/alcionai/corso/src/pkg/services/m365/api"
)
const downloadURLKey = "@microsoft.graph.downloadUrl"
// Required inputs from user for command execution
var (
user, tenant, m365ID string
)
func AddCommands(parent *cobra.Command) {
exCmd := &cobra.Command{
Use: "onedrive",
Short: "Get an M365ID item",
RunE: handleOneDriveCmd,
}
fs := exCmd.PersistentFlags()
fs.StringVar(&m365ID, "id", "", "m365 identifier for object")
fs.StringVar(&user, "user", "", "m365 user id of M365 user")
fs.StringVar(&tenant, "tenant", "", "m365 identifier for the tenant")
cobra.CheckErr(exCmd.MarkPersistentFlagRequired("user"))
cobra.CheckErr(exCmd.MarkPersistentFlagRequired("id"))
parent.AddCommand(exCmd)
}
func handleOneDriveCmd(cmd *cobra.Command, args []string) error {
if utils.HasNoFlagsAndShownHelp(cmd) {
return nil
}
tid := str.First(tenant, os.Getenv(account.AzureTenantID))
ctx := clues.Add(
cmd.Context(),
"item_id", m365ID,
"resource_owner", user,
"tenant", tid)
// get account info
creds := account.M365Config{
M365: credentials.GetM365(),
AzureTenantID: tid,
}
gr := graph.NewNoTimeoutHTTPWrapper()
ac, err := api.NewClient(creds)
if err != nil {
return Only(ctx, clues.Wrap(err, "getting api client"))
}
err = runDisplayM365JSON(ctx, ac, gr, creds, user, m365ID)
if err != nil {
cmd.SilenceUsage = true
cmd.SilenceErrors = true
return Only(ctx, clues.Wrap(err, "getting item"))
}
return nil
}
type itemData struct {
Size int `json:"size"`
}
type itemPrintable struct {
Info json.RawMessage `json:"info"`
Permissions json.RawMessage `json:"permissions"`
Data itemData `json:"data"`
}
func (i itemPrintable) MinimumPrintable() any {
return i
}
func runDisplayM365JSON(
ctx context.Context,
ac api.Client,
gr graph.Requester,
creds account.M365Config,
userID, itemID string,
) error {
drive, err := ac.Users().GetDefaultDrive(ctx, userID)
if err != nil {
return err
}
driveID := ptr.Val(drive.GetId())
it := itemPrintable{}
item, err := ac.Drives().GetItem(ctx, driveID, itemID)
if err != nil {
return err
}
if item != nil {
content, err := getDriveItemContent(ctx, gr, item)
if err != nil {
return err
}
// We could get size from item.GetSize(), but the
// getDriveItemContent call is to ensure that we are able to
// download the file.
it.Data.Size = len(content)
}
sInfo, err := serializeObject(item)
if err != nil {
return err
}
err = json.Unmarshal([]byte(sInfo), &it.Info)
if err != nil {
return err
}
perms, err := ac.Drives().GetItemPermission(ctx, driveID, itemID)
if err != nil {
return err
}
sPerms, err := serializeObject(perms)
if err != nil {
return err
}
err = json.Unmarshal([]byte(sPerms), &it.Permissions)
if err != nil {
return err
}
PrettyJSON(ctx, it)
return nil
}
func serializeObject(data serialization.Parsable) (string, error) {
sw := kjson.NewJsonSerializationWriter()
err := sw.WriteObjectValue("", data)
if err != nil {
return "", clues.Wrap(err, "writing serializing info")
}
content, err := sw.GetSerializedContent()
if err != nil {
return "", clues.Wrap(err, "getting serializing info")
}
return string(content), err
}
func getDriveItemContent(
ctx context.Context,
gr graph.Requester,
item models.DriveItemable,
) ([]byte, error) {
url, ok := item.GetAdditionalData()[downloadURLKey].(*string)
if !ok {
return nil, clues.New("retrieving download url")
}
resp, err := gr.Request(ctx, http.MethodGet, *url, nil, nil)
if err != nil {
return nil, clues.New("requesting item content").With("error", err)
}
defer resp.Body.Close()
content, err := io.ReadAll(resp.Body)
if err != nil {
return nil, clues.New("reading item content").With("error", err)
}
return content, nil
}

View File

@ -7,15 +7,122 @@ import (
"strconv" "strconv"
"time" "time"
"github.com/alcionai/clues"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"golang.org/x/exp/maps"
"github.com/alcionai/corso/src/cli/config" "github.com/alcionai/corso/src/cli/config"
"github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/logger"
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/repository"
"github.com/alcionai/corso/src/pkg/store" "github.com/alcionai/corso/src/pkg/store"
) )
// deleteBackups connects to the repository and deletes all backups for
// service that are at least deletionDays old. Returns the IDs of all backups
// that were deleted.
func deleteBackups(
ctx context.Context,
service path.ServiceType,
deletionDays int,
) ([]string, error) {
ctx = clues.Add(ctx, "cutoff_days", deletionDays)
r, _, _, _, err := utils.GetAccountAndConnect(ctx, service, nil)
if err != nil {
return nil, clues.Wrap(err, "connecting to account").WithClues(ctx)
}
defer r.Close(ctx)
backups, err := r.BackupsByTag(ctx, store.Service(service))
if err != nil {
return nil, clues.Wrap(err, "listing backups").WithClues(ctx)
}
var (
deleted []string
cutoff = time.Now().Add(-time.Hour * 24 * time.Duration(deletionDays))
)
for _, backup := range backups {
if backup.StartAndEndTime.CompletedAt.Before(cutoff) {
if err := r.DeleteBackup(ctx, backup.ID.String()); err != nil {
return nil, clues.Wrap(
err,
"deleting backup").
With("backup_id", backup.ID).
WithClues(ctx)
}
deleted = append(deleted, backup.ID.String())
logAndPrint(ctx, "Deleted backup %s", backup.ID.String())
}
}
return deleted, nil
}
// pitrListBackups connects to the repository at the given point in time and
// lists the backups for service. It then checks the list of backups contains
// the backups in backupIDs.
//
//nolint:unused
//lint:ignore U1000 Waiting for full support.
func pitrListBackups(
ctx context.Context,
service path.ServiceType,
pitr time.Time,
backupIDs []string,
) error {
if len(backupIDs) == 0 {
return nil
}
ctx = clues.Add(ctx, "pitr_time", pitr, "search_backups", backupIDs)
// TODO(ashmrtn): This may be moved into CLI layer at some point when we add
// flags for opening a repo at a point in time.
cfg, err := config.GetConfigRepoDetails(ctx, true, true, nil)
if err != nil {
return clues.Wrap(err, "getting config info")
}
opts := utils.ControlWithConfig(cfg)
opts.Repo.ViewTimestamp = &pitr
r, err := repository.Connect(ctx, cfg.Account, cfg.Storage, cfg.RepoID, opts)
if err != nil {
return clues.Wrap(err, "connecting to repo").WithClues(ctx)
}
defer r.Close(ctx)
backups, err := r.BackupsByTag(ctx, store.Service(service))
if err != nil {
return clues.Wrap(err, "listing backups").WithClues(ctx)
}
bups := map[string]struct{}{}
for _, backup := range backups {
bups[backup.ID.String()] = struct{}{}
}
ctx = clues.Add(ctx, "found_backups", maps.Keys(bups))
for _, backupID := range backupIDs {
if _, ok := bups[backupID]; !ok {
return clues.New("looking for backup").
With("search_backup_id", backupID).
WithClues(ctx)
}
}
return nil
}
func main() { func main() {
var ( var (
service path.ServiceType service path.ServiceType
@ -39,31 +146,16 @@ func main() {
fatal(cc.Context(), "unknown service", nil) fatal(cc.Context(), "unknown service", nil)
} }
r, _, _, _, err := utils.GetAccountAndConnect(cc.Context(), service, nil) ctx := clues.Add(cc.Context(), "service", service)
if err != nil {
fatal(cc.Context(), "unable to connect account", err)
}
defer r.Close(cc.Context())
backups, err := r.BackupsByTag(cc.Context(), store.Service(service))
if err != nil {
fatal(cc.Context(), "unable to find backups", err)
}
days, err := strconv.Atoi(os.Getenv("DELETION_DAYS")) days, err := strconv.Atoi(os.Getenv("DELETION_DAYS"))
if err != nil { if err != nil {
fatal(cc.Context(), "invalid no of days provided", nil) fatal(ctx, "invalid number of days provided", nil)
} }
for _, backup := range backups { _, err = deleteBackups(ctx, service, days)
if backup.StartAndEndTime.CompletedAt.Before(time.Now().AddDate(0, 0, -days)) { if err != nil {
if err := r.DeleteBackup(cc.Context(), backup.ID.String()); err != nil { fatal(cc.Context(), "deleting backups", clues.Stack(err))
fatal(cc.Context(), "deleting backup", err)
}
logAndPrint(cc.Context(), "Deleted backup %s", backup.ID.String())
}
} }
} }

View File

@ -19,14 +19,17 @@ Param (
[datetime]$PurgeBeforeTimestamp, [datetime]$PurgeBeforeTimestamp,
[Parameter(Mandatory = $True, HelpMessage = "Purge folders with this prefix")] [Parameter(Mandatory = $True, HelpMessage = "Purge folders with this prefix")]
[String[]]$FolderPrefixPurgeList [String[]]$FolderPrefixPurgeList,
[Parameter(Mandatory = $False, HelpMessage = "Delete document libraries with this prefix")]
[String[]]$LibraryPrefixDeleteList = @()
) )
Set-StrictMode -Version 2.0 Set-StrictMode -Version 2.0
# Attempt to set network timeout to 10min # Attempt to set network timeout to 10min
[System.Net.ServicePointManager]::MaxServicePointIdleTime = 600000 [System.Net.ServicePointManager]::MaxServicePointIdleTime = 600000
function Get-TimestampFromName { function Get-TimestampFromFolderName {
param ( param (
[Parameter(Mandatory = $True, HelpMessage = "Folder ")] [Parameter(Mandatory = $True, HelpMessage = "Folder ")]
[Microsoft.SharePoint.Client.Folder]$folder [Microsoft.SharePoint.Client.Folder]$folder
@ -54,6 +57,36 @@ function Get-TimestampFromName {
return $timestamp return $timestamp
} }
function Get-TimestampFromListName {
param (
[Parameter(Mandatory = $True, HelpMessage = "List ")]
[Microsoft.SharePoint.Client.List]$list
)
$name = $list.Title
#fallback on list create time
[datetime]$timestamp = $list.LastItemUserModifiedDate
try {
# Assumes that the timestamp is at the end and starts with yyyy-mm-ddT and is ISO8601
if ($name -imatch "(\d{4}}-\d{2}-\d{2}T.*)") {
$timestamp = [System.Convert]::ToDatetime($Matches.0)
}
# Assumes that the timestamp is at the end and starts with dd-MMM-yyyy_HH-MM-SS
if ($name -imatch "(\d{2}-[a-zA-Z]{3}-\d{4}_\d{2}-\d{2}-\d{2})") {
$timestamp = [datetime]::ParseExact($Matches.0, "dd-MMM-yyyy_HH-mm-ss", [CultureInfo]::InvariantCulture, "AssumeUniversal")
}
}
catch {}
Write-Verbose "List: $name, create timestamp: $timestamp"
return $timestamp
}
function Purge-Library { function Purge-Library {
[CmdletBinding(SupportsShouldProcess)] [CmdletBinding(SupportsShouldProcess)]
Param ( Param (
@ -77,7 +110,7 @@ function Purge-Library {
foreach ($f in $folders) { foreach ($f in $folders) {
$folderName = $f.Name $folderName = $f.Name
$createTime = Get-TimestampFromName -Folder $f $createTime = Get-TimestampFromFolderName -Folder $f
if ($PurgeBeforeTimestamp -gt $createTime) { if ($PurgeBeforeTimestamp -gt $createTime) {
foreach ($p in $FolderPrefixPurgeList) { foreach ($p in $FolderPrefixPurgeList) {
@ -97,7 +130,7 @@ function Purge-Library {
if ($f.ServerRelativeUrl -imatch "$SiteSuffix/{0,1}(.+?)/{0,1}$folderName$") { if ($f.ServerRelativeUrl -imatch "$SiteSuffix/{0,1}(.+?)/{0,1}$folderName$") {
$siteRelativeParentPath = $Matches.1 $siteRelativeParentPath = $Matches.1
} }
if ($PSCmdlet.ShouldProcess("Name: " + $f.Name + " Parent: " + $siteRelativeParentPath, "Remove folder")) { if ($PSCmdlet.ShouldProcess("Name: " + $f.Name + " Parent: " + $siteRelativeParentPath, "Remove folder")) {
Write-Host "Deleting folder: "$f.Name" with parent: $siteRelativeParentPath" Write-Host "Deleting folder: "$f.Name" with parent: $siteRelativeParentPath"
try { try {
@ -110,6 +143,54 @@ function Purge-Library {
} }
} }
function Delete-LibraryByPrefix {
[CmdletBinding(SupportsShouldProcess)]
Param (
[Parameter(Mandatory = $True, HelpMessage = "Document library root")]
[String]$LibraryNamePrefix,
[Parameter(Mandatory = $True, HelpMessage = "Purge folders before this date time (UTC)")]
[datetime]$PurgeBeforeTimestamp,
[Parameter(Mandatory = $True, HelpMessage = "Site suffix")]
[String[]]$SiteSuffix
)
Write-Host "`nDeleting library: $LibraryNamePrefix"
$listsToDelete = @()
$lists = Get-PnPList
foreach ($l in $lists) {
$listName = $l.Title
$createTime = Get-TimestampFromListName -List $l
if ($PurgeBeforeTimestamp -gt $createTime) {
foreach ($p in $FolderPrefixPurgeList) {
if ($listName -like "$p*") {
$listsToDelete += $l
}
}
}
}
Write-Host "Found"$listsToDelete.count"lists to delete"
foreach ($l in $listsToDelete) {
$listName = $l.Title
if ($PSCmdlet.ShouldProcess("Name: " + $l.Title + "Remove folder")) {
Write-Host "Deleting list: "$l.Title
try {
Remove-PnPList -Identity $l.Id -Force
}
catch [ System.Management.Automation.ItemNotFoundException ] {
Write-Host "List: "$f.Name" is already deleted. Skipping..."
}
}
}
}
######## MAIN ######### ######## MAIN #########
# Setup SharePointPnP # Setup SharePointPnP
@ -176,4 +257,8 @@ $FolderPrefixPurgeList = $FolderPrefixPurgeList | ForEach-Object { @($_.Split(',
foreach ($library in $LibraryNameList) { foreach ($library in $LibraryNameList) {
Purge-Library -LibraryName $library -PurgeBeforeTimestamp $PurgeBeforeTimestamp -FolderPrefixPurgeList $FolderPrefixPurgeList -SiteSuffix $siteSuffix Purge-Library -LibraryName $library -PurgeBeforeTimestamp $PurgeBeforeTimestamp -FolderPrefixPurgeList $FolderPrefixPurgeList -SiteSuffix $siteSuffix
} }
foreach ($libraryPfx in $LibraryPrefixDeleteList) {
Delete-LibraryByPrefix -LibraryNamePrefix $libraryPfx -PurgeBeforeTimestamp $PurgeBeforeTimestamp -SiteSuffix $siteSuffix
}

View File

@ -0,0 +1,6 @@
package common
type PermissionInfo struct {
EntityID string
Roles []string
}

View File

@ -0,0 +1,82 @@
package common
import (
"context"
"errors"
"fmt"
"os"
"strings"
"time"
"github.com/alcionai/corso/src/internal/common/dttm"
"github.com/alcionai/corso/src/pkg/logger"
)
func Assert(
ctx context.Context,
passes func() bool,
header string,
expect, current any,
) {
if passes() {
return
}
header = "Error: " + header
expected := fmt.Sprintf("* Expected: %+v", expect)
got := fmt.Sprintf("* Current: %+v", current)
logger.Ctx(ctx).Info(strings.Join([]string{header, expected, got}, " "))
fmt.Println(header)
fmt.Println(expected)
fmt.Println(got)
os.Exit(1)
}
func Fatal(ctx context.Context, msg string, err error) {
logger.CtxErr(ctx, err).Error("test failure: " + msg)
fmt.Println(msg+": ", err)
os.Exit(1)
}
func MustGetTimeFromName(ctx context.Context, name string) (time.Time, bool) {
t, err := dttm.ExtractTime(name)
if err != nil && !errors.Is(err, dttm.ErrNoTimeString) {
Fatal(ctx, "extracting time from name: "+name, err)
}
return t, !errors.Is(err, dttm.ErrNoTimeString)
}
func IsWithinTimeBound(ctx context.Context, bound, check time.Time, hasTime bool) bool {
if hasTime {
if bound.Before(check) {
logger.Ctx(ctx).
With("boundary_time", bound, "check_time", check).
Info("skipping restore folder: not older than time bound")
return false
}
}
return true
}
func FilterSlice(sl []string, remove string) []string {
r := []string{}
for _, s := range sl {
if !strings.EqualFold(s, remove) {
r = append(r, s)
}
}
return r
}
func LogAndPrint(ctx context.Context, tmpl string, vs ...any) {
logger.Ctx(ctx).Infof(tmpl, vs...)
fmt.Printf(tmpl+"\n", vs...)
}

View File

@ -0,0 +1,88 @@
package export
import (
"context"
"fmt"
"os"
"path/filepath"
"time"
"github.com/alcionai/clues"
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
"github.com/alcionai/corso/src/cmd/sanity_test/common"
"github.com/alcionai/corso/src/cmd/sanity_test/restore"
"github.com/alcionai/corso/src/internal/common/ptr"
)
func CheckOneDriveExport(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
userID, folderName, dataFolder string,
) {
drive, err := client.
Users().
ByUserId(userID).
Drive().
Get(ctx, nil)
if err != nil {
common.Fatal(ctx, "getting the drive:", err)
}
// map itemID -> item size
var (
fileSizes = make(map[string]int64)
exportFileSizes = make(map[string]int64)
startTime = time.Now()
)
err = filepath.Walk(folderName, func(path string, info os.FileInfo, err error) error {
if err != nil {
return clues.Stack(err)
}
if info.IsDir() {
return nil
}
relPath, err := filepath.Rel(folderName, path)
if err != nil {
return clues.Stack(err)
}
exportFileSizes[relPath] = info.Size()
if startTime.After(info.ModTime()) {
startTime = info.ModTime()
}
return nil
})
if err != nil {
fmt.Println("Error walking the path:", err)
}
_ = restore.PopulateDriveDetails(
ctx,
client,
ptr.Val(drive.GetId()),
folderName,
dataFolder,
fileSizes,
map[string][]common.PermissionInfo{},
startTime)
for fileName, expected := range fileSizes {
common.LogAndPrint(ctx, "checking for file: %s", fileName)
got := exportFileSizes[fileName]
common.Assert(
ctx,
func() bool { return expected == got },
fmt.Sprintf("different file size: %s", fileName),
expected,
got)
}
fmt.Println("Success")
}

View File

@ -0,0 +1,88 @@
package export
import (
"context"
"fmt"
"os"
"path/filepath"
"time"
"github.com/alcionai/clues"
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
"github.com/alcionai/corso/src/cmd/sanity_test/common"
"github.com/alcionai/corso/src/cmd/sanity_test/restore"
"github.com/alcionai/corso/src/internal/common/ptr"
)
func CheckSharePointExport(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
siteID, folderName, dataFolder string,
) {
drive, err := client.
Sites().
BySiteId(siteID).
Drive().
Get(ctx, nil)
if err != nil {
common.Fatal(ctx, "getting the drive:", err)
}
// map itemID -> item size
var (
fileSizes = make(map[string]int64)
exportFileSizes = make(map[string]int64)
startTime = time.Now()
)
err = filepath.Walk(folderName, func(path string, info os.FileInfo, err error) error {
if err != nil {
return clues.Stack(err)
}
if info.IsDir() {
return nil
}
relPath, err := filepath.Rel(folderName, path)
if err != nil {
return clues.Stack(err)
}
exportFileSizes[relPath] = info.Size()
if startTime.After(info.ModTime()) {
startTime = info.ModTime()
}
return nil
})
if err != nil {
fmt.Println("Error walking the path:", err)
}
_ = restore.PopulateDriveDetails(
ctx,
client,
ptr.Val(drive.GetId()),
folderName,
dataFolder,
fileSizes,
map[string][]common.PermissionInfo{},
startTime)
for fileName, expected := range fileSizes {
common.LogAndPrint(ctx, "checking for file: %s", fileName)
got := exportFileSizes[fileName]
common.Assert(
ctx,
func() bool { return expected == got },
fmt.Sprintf("different file size: %s", fileName),
expected,
got)
}
fmt.Println("Success")
}

View File

@ -0,0 +1,219 @@
package restore
import (
"context"
"fmt"
stdpath "path"
"strings"
"time"
"github.com/alcionai/clues"
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
"github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/microsoftgraph/msgraph-sdk-go/users"
"github.com/alcionai/corso/src/cmd/sanity_test/common"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/pkg/filters"
)
// CheckEmailRestoration verifies that the emails count in restored folder is equivalent to
// emails in actual m365 account
func CheckEmailRestoration(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
testUser, folderName, dataFolder, baseBackupFolder string,
startTime time.Time,
) {
var (
restoreFolder models.MailFolderable
itemCount = make(map[string]int32)
restoreItemCount = make(map[string]int32)
builder = client.Users().ByUserId(testUser).MailFolders()
)
for {
result, err := builder.Get(ctx, nil)
if err != nil {
common.Fatal(ctx, "getting mail folders", err)
}
values := result.GetValue()
for _, v := range values {
itemName := ptr.Val(v.GetDisplayName())
if itemName == folderName {
restoreFolder = v
continue
}
if itemName == dataFolder || itemName == baseBackupFolder {
// otherwise, recursively aggregate all child folders.
getAllMailSubFolders(ctx, client, testUser, v, itemName, dataFolder, itemCount)
itemCount[itemName] = ptr.Val(v.GetTotalItemCount())
}
}
link, ok := ptr.ValOK(result.GetOdataNextLink())
if !ok {
break
}
builder = users.NewItemMailFoldersRequestBuilder(link, client.GetAdapter())
}
folderID := ptr.Val(restoreFolder.GetId())
folderName = ptr.Val(restoreFolder.GetDisplayName())
ctx = clues.Add(
ctx,
"restore_folder_id", folderID,
"restore_folder_name", folderName)
childFolder, err := client.
Users().
ByUserId(testUser).
MailFolders().
ByMailFolderId(folderID).
ChildFolders().
Get(ctx, nil)
if err != nil {
common.Fatal(ctx, "getting restore folder child folders", err)
}
for _, fld := range childFolder.GetValue() {
restoreDisplayName := ptr.Val(fld.GetDisplayName())
// check if folder is the data folder we loaded or the base backup to verify
// the incremental backup worked fine
if strings.EqualFold(restoreDisplayName, dataFolder) || strings.EqualFold(restoreDisplayName, baseBackupFolder) {
count, _ := ptr.ValOK(fld.GetTotalItemCount())
restoreItemCount[restoreDisplayName] = count
checkAllSubFolder(ctx, client, fld, testUser, restoreDisplayName, dataFolder, restoreItemCount)
}
}
verifyEmailData(ctx, restoreItemCount, itemCount)
}
func verifyEmailData(ctx context.Context, restoreMessageCount, messageCount map[string]int32) {
for fldName, expected := range messageCount {
got := restoreMessageCount[fldName]
common.Assert(
ctx,
func() bool { return expected == got },
fmt.Sprintf("Restore item counts do not match: %s", fldName),
expected,
got)
}
}
// getAllSubFolder will recursively check for all subfolders and get the corresponding
// email count.
func getAllMailSubFolders(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
testUser string,
r models.MailFolderable,
parentFolder,
dataFolder string,
messageCount map[string]int32,
) {
var (
folderID = ptr.Val(r.GetId())
count int32 = 99
options = &users.ItemMailFoldersItemChildFoldersRequestBuilderGetRequestConfiguration{
QueryParameters: &users.ItemMailFoldersItemChildFoldersRequestBuilderGetQueryParameters{
Top: &count,
},
}
)
ctx = clues.Add(ctx, "parent_folder_id", folderID)
childFolder, err := client.
Users().
ByUserId(testUser).
MailFolders().
ByMailFolderId(folderID).
ChildFolders().
Get(ctx, options)
if err != nil {
common.Fatal(ctx, "getting mail subfolders", err)
}
for _, child := range childFolder.GetValue() {
var (
childDisplayName = ptr.Val(child.GetDisplayName())
childFolderCount = ptr.Val(child.GetChildFolderCount())
//nolint:forbidigo
fullFolderName = stdpath.Join(parentFolder, childDisplayName)
)
if filters.PathContains([]string{dataFolder}).Compare(fullFolderName) {
messageCount[fullFolderName] = ptr.Val(child.GetTotalItemCount())
// recursively check for subfolders
if childFolderCount > 0 {
parentFolder := fullFolderName
getAllMailSubFolders(ctx, client, testUser, child, parentFolder, dataFolder, messageCount)
}
}
}
}
// checkAllSubFolder will recursively traverse inside the restore folder and
// verify that data matched in all subfolders
func checkAllSubFolder(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
r models.MailFolderable,
testUser,
parentFolder,
dataFolder string,
restoreMessageCount map[string]int32,
) {
var (
folderID = ptr.Val(r.GetId())
count int32 = 99
options = &users.ItemMailFoldersItemChildFoldersRequestBuilderGetRequestConfiguration{
QueryParameters: &users.ItemMailFoldersItemChildFoldersRequestBuilderGetQueryParameters{
Top: &count,
},
}
)
childFolder, err := client.
Users().
ByUserId(testUser).
MailFolders().
ByMailFolderId(folderID).
ChildFolders().
Get(ctx, options)
if err != nil {
common.Fatal(ctx, "getting mail subfolders", err)
}
for _, child := range childFolder.GetValue() {
var (
childDisplayName = ptr.Val(child.GetDisplayName())
//nolint:forbidigo
fullFolderName = stdpath.Join(parentFolder, childDisplayName)
)
if filters.PathContains([]string{dataFolder}).Compare(fullFolderName) {
childTotalCount, _ := ptr.ValOK(child.GetTotalItemCount())
restoreMessageCount[fullFolderName] = childTotalCount
}
childFolderCount := ptr.Val(child.GetChildFolderCount())
if childFolderCount > 0 {
parentFolder := fullFolderName
checkAllSubFolder(ctx, client, child, testUser, parentFolder, dataFolder, restoreMessageCount)
}
}
}

View File

@ -0,0 +1,369 @@
package restore
import (
"context"
"fmt"
"strings"
"time"
"github.com/alcionai/clues"
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
"golang.org/x/exp/slices"
"github.com/alcionai/corso/src/cmd/sanity_test/common"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/pkg/path"
)
const (
owner = "owner"
)
func CheckOneDriveRestoration(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
userID, folderName, dataFolder string,
startTime time.Time,
) {
drive, err := client.
Users().
ByUserId(userID).
Drive().
Get(ctx, nil)
if err != nil {
common.Fatal(ctx, "getting the drive:", err)
}
checkDriveRestoration(
ctx,
client,
path.OneDriveService,
folderName,
ptr.Val(drive.GetId()),
ptr.Val(drive.GetName()),
dataFolder,
startTime,
false)
}
func checkDriveRestoration(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
service path.ServiceType,
folderName,
driveID,
driveName,
dataFolder string,
startTime time.Time,
skipPermissionTest bool,
) {
var (
// map itemID -> item size
fileSizes = make(map[string]int64)
// map itemID -> permission id -> []permission roles
folderPermissions = make(map[string][]common.PermissionInfo)
restoreFile = make(map[string]int64)
restoredFolderPermissions = make(map[string][]common.PermissionInfo)
)
ctx = clues.Add(ctx, "drive_id", driveID, "drive_name", driveName)
restoreFolderID := PopulateDriveDetails(
ctx,
client,
driveID,
folderName,
dataFolder,
fileSizes,
folderPermissions,
startTime)
getRestoredDrive(ctx, client, driveID, restoreFolderID, restoreFile, restoredFolderPermissions, startTime)
checkRestoredDriveItemPermissions(
ctx,
service,
skipPermissionTest,
folderPermissions,
restoredFolderPermissions)
for fileName, expected := range fileSizes {
common.LogAndPrint(ctx, "checking for file: %s", fileName)
got := restoreFile[fileName]
common.Assert(
ctx,
func() bool { return expected == got },
fmt.Sprintf("different file size: %s", fileName),
expected,
got)
}
fmt.Println("Success")
}
func PopulateDriveDetails(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
driveID, folderName, dataFolder string,
fileSizes map[string]int64,
folderPermissions map[string][]common.PermissionInfo,
startTime time.Time,
) string {
var restoreFolderID string
response, err := client.
Drives().
ByDriveId(driveID).
Items().
ByDriveItemId("root").
Children().
Get(ctx, nil)
if err != nil {
common.Fatal(ctx, "getting drive by id", err)
}
for _, driveItem := range response.GetValue() {
var (
itemID = ptr.Val(driveItem.GetId())
itemName = ptr.Val(driveItem.GetName())
)
if itemName == folderName {
restoreFolderID = itemID
continue
}
if itemName != dataFolder {
common.LogAndPrint(ctx, "test data for folder: %s", dataFolder)
continue
}
// if it's a file check the size
if driveItem.GetFile() != nil {
fileSizes[itemName] = ptr.Val(driveItem.GetSize())
}
if driveItem.GetFolder() == nil && driveItem.GetPackage() == nil {
continue
}
// currently we don't restore blank folders.
// skip permission check for empty folders
if ptr.Val(driveItem.GetFolder().GetChildCount()) == 0 {
common.LogAndPrint(ctx, "skipped empty folder: %s", itemName)
continue
}
folderPermissions[itemName] = permissionIn(ctx, client, driveID, itemID)
getOneDriveChildFolder(ctx, client, driveID, itemID, itemName, fileSizes, folderPermissions, startTime)
}
return restoreFolderID
}
func checkRestoredDriveItemPermissions(
ctx context.Context,
service path.ServiceType,
skip bool,
folderPermissions map[string][]common.PermissionInfo,
restoredFolderPermissions map[string][]common.PermissionInfo,
) {
if skip {
return
}
/**
TODO: replace this check with testElementsMatch
from internal/connecter/graph_connector_helper_test.go
**/
for folderName, permissions := range folderPermissions {
common.LogAndPrint(ctx, "checking for folder: %s", folderName)
restoreFolderPerm := restoredFolderPermissions[folderName]
if len(permissions) < 1 {
common.LogAndPrint(ctx, "no permissions found in: %s", folderName)
continue
}
permCheck := func() bool { return len(permissions) == len(restoreFolderPerm) }
if service == path.SharePointService {
permCheck = func() bool { return len(permissions) <= len(restoreFolderPerm) }
}
common.Assert(
ctx,
permCheck,
fmt.Sprintf("wrong number of restored permissions: %s", folderName),
permissions,
restoreFolderPerm)
for _, perm := range permissions {
eqID := func(pi common.PermissionInfo) bool { return strings.EqualFold(pi.EntityID, perm.EntityID) }
i := slices.IndexFunc(restoreFolderPerm, eqID)
common.Assert(
ctx,
func() bool { return i >= 0 },
fmt.Sprintf("permission was restored in: %s", folderName),
perm.EntityID,
restoreFolderPerm)
// permissions should be sorted, so a by-index comparison works
restored := restoreFolderPerm[i]
common.Assert(
ctx,
func() bool { return slices.Equal(perm.Roles, restored.Roles) },
fmt.Sprintf("different roles restored: %s", folderName),
perm.Roles,
restored.Roles)
}
}
}
func getOneDriveChildFolder(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
driveID, itemID, parentName string,
fileSizes map[string]int64,
folderPermission map[string][]common.PermissionInfo,
startTime time.Time,
) {
response, err := client.Drives().ByDriveId(driveID).Items().ByDriveItemId(itemID).Children().Get(ctx, nil)
if err != nil {
common.Fatal(ctx, "getting child folder", err)
}
for _, driveItem := range response.GetValue() {
var (
itemID = ptr.Val(driveItem.GetId())
itemName = ptr.Val(driveItem.GetName())
fullName = parentName + "/" + itemName
)
folderTime, hasTime := common.MustGetTimeFromName(ctx, itemName)
if !common.IsWithinTimeBound(ctx, startTime, folderTime, hasTime) {
continue
}
// if it's a file check the size
if driveItem.GetFile() != nil {
fileSizes[fullName] = ptr.Val(driveItem.GetSize())
}
if driveItem.GetFolder() == nil && driveItem.GetPackage() == nil {
continue
}
// currently we don't restore blank folders.
// skip permission check for empty folders
if ptr.Val(driveItem.GetFolder().GetChildCount()) == 0 {
common.LogAndPrint(ctx, "skipped empty folder: %s", fullName)
continue
}
folderPermission[fullName] = permissionIn(ctx, client, driveID, itemID)
getOneDriveChildFolder(ctx, client, driveID, itemID, fullName, fileSizes, folderPermission, startTime)
}
}
func getRestoredDrive(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
driveID, restoreFolderID string,
restoreFile map[string]int64,
restoreFolder map[string][]common.PermissionInfo,
startTime time.Time,
) {
restored, err := client.
Drives().
ByDriveId(driveID).
Items().
ByDriveItemId(restoreFolderID).
Children().
Get(ctx, nil)
if err != nil {
common.Fatal(ctx, "getting child folder", err)
}
for _, item := range restored.GetValue() {
var (
itemID = ptr.Val(item.GetId())
itemName = ptr.Val(item.GetName())
itemSize = ptr.Val(item.GetSize())
)
if item.GetFile() != nil {
restoreFile[itemName] = itemSize
continue
}
if item.GetFolder() == nil && item.GetPackage() == nil {
continue
}
restoreFolder[itemName] = permissionIn(ctx, client, driveID, itemID)
getOneDriveChildFolder(ctx, client, driveID, itemID, itemName, restoreFile, restoreFolder, startTime)
}
}
// ---------------------------------------------------------------------------
// permission helpers
// ---------------------------------------------------------------------------
func permissionIn(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
driveID, itemID string,
) []common.PermissionInfo {
pi := []common.PermissionInfo{}
pcr, err := client.
Drives().
ByDriveId(driveID).
Items().
ByDriveItemId(itemID).
Permissions().
Get(ctx, nil)
if err != nil {
common.Fatal(ctx, "getting permission", err)
}
for _, perm := range pcr.GetValue() {
if perm.GetGrantedToV2() == nil {
continue
}
var (
gv2 = perm.GetGrantedToV2()
permInfo = common.PermissionInfo{}
entityID string
)
// TODO: replace with filterUserPermissions in onedrive item.go
if gv2.GetUser() != nil {
entityID = ptr.Val(gv2.GetUser().GetId())
} else if gv2.GetGroup() != nil {
entityID = ptr.Val(gv2.GetGroup().GetId())
}
roles := common.FilterSlice(perm.GetRoles(), owner)
for _, role := range roles {
permInfo.EntityID = entityID
permInfo.Roles = append(permInfo.Roles, role)
}
if len(roles) > 0 {
slices.Sort(permInfo.Roles)
pi = append(pi, permInfo)
}
}
return pi
}

View File

@ -0,0 +1,39 @@
package restore
import (
"context"
"time"
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
"github.com/alcionai/corso/src/cmd/sanity_test/common"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/pkg/path"
)
func CheckSharePointRestoration(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
siteID, userID, folderName, dataFolder string,
startTime time.Time,
) {
drive, err := client.
Sites().
BySiteId(siteID).
Drive().
Get(ctx, nil)
if err != nil {
common.Fatal(ctx, "getting the drive:", err)
}
checkDriveRestoration(
ctx,
client,
path.SharePointService,
folderName,
ptr.Val(drive.GetId()),
ptr.Val(drive.GetName()),
dataFolder,
startTime,
true)
}

View File

@ -2,45 +2,21 @@ package main
import ( import (
"context" "context"
"errors"
"fmt"
"os" "os"
stdpath "path"
"strings" "strings"
"time" "time"
"github.com/alcionai/clues" "github.com/alcionai/clues"
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go" msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
"github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/microsoftgraph/msgraph-sdk-go/users"
"golang.org/x/exp/slices"
"github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/cmd/sanity_test/common"
"github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/cmd/sanity_test/export"
"github.com/alcionai/corso/src/cmd/sanity_test/restore"
"github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/m365/graph"
"github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/filters"
"github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/logger"
"github.com/alcionai/corso/src/pkg/path"
) )
// ---------------------------------------------------------------------------
// types, consts, etc
// ---------------------------------------------------------------------------
type permissionInfo struct {
entityID string
roles []string
}
const (
owner = "owner"
)
// ---------------------------------------------------------------------------
// main
// ---------------------------------------------------------------------------
func main() { func main() {
ls := logger.Settings{ ls := logger.Settings{
File: logger.GetLogFile(""), File: logger.GetLogFile(""),
@ -60,16 +36,16 @@ func main() {
os.Getenv("AZURE_CLIENT_ID"), os.Getenv("AZURE_CLIENT_ID"),
os.Getenv("AZURE_CLIENT_SECRET")) os.Getenv("AZURE_CLIENT_SECRET"))
if err != nil { if err != nil {
fatal(ctx, "creating adapter", err) common.Fatal(ctx, "creating adapter", err)
} }
var ( var (
client = msgraphsdk.NewGraphServiceClient(adapter) client = msgraphsdk.NewGraphServiceClient(adapter)
testUser = tconfig.GetM365UserID(ctx) testUser = tconfig.GetM365UserID(ctx)
testSite = tconfig.GetM365SiteID(ctx) testSite = tconfig.GetM365SiteID(ctx)
testService = os.Getenv("SANITY_RESTORE_SERVICE") testKind = os.Getenv("SANITY_TEST_KIND") // restore or export (cli arg?)
folder = strings.TrimSpace(os.Getenv("SANITY_RESTORE_FOLDER")) testService = os.Getenv("SANITY_TEST_SERVICE")
startTime, _ = mustGetTimeFromName(ctx, folder) folder = strings.TrimSpace(os.Getenv("SANITY_TEST_FOLDER"))
dataFolder = os.Getenv("TEST_DATA") dataFolder = os.Getenv("TEST_DATA")
baseBackupFolder = os.Getenv("BASE_BACKUP") baseBackupFolder = os.Getenv("BASE_BACKUP")
) )
@ -78,664 +54,35 @@ func main() {
ctx, ctx,
"resource_owner", testUser, "resource_owner", testUser,
"service", testService, "service", testService,
"sanity_restore_folder", folder, "sanity_restore_folder", folder)
"start_time", startTime.Format(time.RFC3339Nano))
logger.Ctx(ctx).Info("starting sanity test check") logger.Ctx(ctx).Info("starting sanity test check")
switch testService { switch testKind {
case "exchange": case "restore":
checkEmailRestoration(ctx, client, testUser, folder, dataFolder, baseBackupFolder, startTime) startTime, _ := common.MustGetTimeFromName(ctx, folder)
case "onedrive": clues.Add(ctx, "sanity_restore_start_time", startTime.Format(time.RFC3339))
checkOneDriveRestoration(ctx, client, testUser, folder, dataFolder, startTime)
case "sharepoint": switch testService {
checkSharePointRestoration(ctx, client, testSite, testUser, folder, dataFolder, startTime) case "exchange":
restore.CheckEmailRestoration(ctx, client, testUser, folder, dataFolder, baseBackupFolder, startTime)
case "onedrive":
restore.CheckOneDriveRestoration(ctx, client, testUser, folder, dataFolder, startTime)
case "sharepoint":
restore.CheckSharePointRestoration(ctx, client, testSite, testUser, folder, dataFolder, startTime)
default:
common.Fatal(ctx, "unknown service for restore sanity tests", nil)
}
case "export":
switch testService {
case "onedrive":
export.CheckOneDriveExport(ctx, client, testUser, folder, dataFolder)
case "sharepoint":
export.CheckSharePointExport(ctx, client, testSite, folder, dataFolder)
default:
common.Fatal(ctx, "unknown service for export sanity tests", nil)
}
default: default:
fatal(ctx, "no service specified", nil) common.Fatal(ctx, "unknown test kind (expected restore or export)", nil)
} }
} }
// ---------------------------------------------------------------------------
// exchange
// ---------------------------------------------------------------------------
// checkEmailRestoration verifies that the emails count in restored folder is equivalent to
// emails in actual m365 account
func checkEmailRestoration(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
testUser, folderName, dataFolder, baseBackupFolder string,
startTime time.Time,
) {
var (
restoreFolder models.MailFolderable
itemCount = make(map[string]int32)
restoreItemCount = make(map[string]int32)
builder = client.Users().ByUserId(testUser).MailFolders()
)
for {
result, err := builder.Get(ctx, nil)
if err != nil {
fatal(ctx, "getting mail folders", err)
}
values := result.GetValue()
for _, v := range values {
itemName := ptr.Val(v.GetDisplayName())
if itemName == folderName {
restoreFolder = v
continue
}
if itemName == dataFolder || itemName == baseBackupFolder {
// otherwise, recursively aggregate all child folders.
getAllMailSubFolders(ctx, client, testUser, v, itemName, dataFolder, itemCount)
itemCount[itemName] = ptr.Val(v.GetTotalItemCount())
}
}
link, ok := ptr.ValOK(result.GetOdataNextLink())
if !ok {
break
}
builder = users.NewItemMailFoldersRequestBuilder(link, client.GetAdapter())
}
folderID := ptr.Val(restoreFolder.GetId())
folderName = ptr.Val(restoreFolder.GetDisplayName())
ctx = clues.Add(
ctx,
"restore_folder_id", folderID,
"restore_folder_name", folderName)
childFolder, err := client.
Users().
ByUserId(testUser).
MailFolders().
ByMailFolderId(folderID).
ChildFolders().
Get(ctx, nil)
if err != nil {
fatal(ctx, "getting restore folder child folders", err)
}
for _, fld := range childFolder.GetValue() {
restoreDisplayName := ptr.Val(fld.GetDisplayName())
// check if folder is the data folder we loaded or the base backup to verify
// the incremental backup worked fine
if strings.EqualFold(restoreDisplayName, dataFolder) || strings.EqualFold(restoreDisplayName, baseBackupFolder) {
count, _ := ptr.ValOK(fld.GetTotalItemCount())
restoreItemCount[restoreDisplayName] = count
checkAllSubFolder(ctx, client, fld, testUser, restoreDisplayName, dataFolder, restoreItemCount)
}
}
verifyEmailData(ctx, restoreItemCount, itemCount)
}
func verifyEmailData(ctx context.Context, restoreMessageCount, messageCount map[string]int32) {
for fldName, expected := range messageCount {
got := restoreMessageCount[fldName]
assert(
ctx,
func() bool { return expected == got },
fmt.Sprintf("Restore item counts do not match: %s", fldName),
expected,
got)
}
}
// getAllSubFolder will recursively check for all subfolders and get the corresponding
// email count.
func getAllMailSubFolders(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
testUser string,
r models.MailFolderable,
parentFolder,
dataFolder string,
messageCount map[string]int32,
) {
var (
folderID = ptr.Val(r.GetId())
count int32 = 99
options = &users.ItemMailFoldersItemChildFoldersRequestBuilderGetRequestConfiguration{
QueryParameters: &users.ItemMailFoldersItemChildFoldersRequestBuilderGetQueryParameters{
Top: &count,
},
}
)
ctx = clues.Add(ctx, "parent_folder_id", folderID)
childFolder, err := client.
Users().
ByUserId(testUser).
MailFolders().
ByMailFolderId(folderID).
ChildFolders().
Get(ctx, options)
if err != nil {
fatal(ctx, "getting mail subfolders", err)
}
for _, child := range childFolder.GetValue() {
var (
childDisplayName = ptr.Val(child.GetDisplayName())
childFolderCount = ptr.Val(child.GetChildFolderCount())
//nolint:forbidigo
fullFolderName = stdpath.Join(parentFolder, childDisplayName)
)
if filters.PathContains([]string{dataFolder}).Compare(fullFolderName) {
messageCount[fullFolderName] = ptr.Val(child.GetTotalItemCount())
// recursively check for subfolders
if childFolderCount > 0 {
parentFolder := fullFolderName
getAllMailSubFolders(ctx, client, testUser, child, parentFolder, dataFolder, messageCount)
}
}
}
}
// checkAllSubFolder will recursively traverse inside the restore folder and
// verify that data matched in all subfolders
func checkAllSubFolder(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
r models.MailFolderable,
testUser,
parentFolder,
dataFolder string,
restoreMessageCount map[string]int32,
) {
var (
folderID = ptr.Val(r.GetId())
count int32 = 99
options = &users.ItemMailFoldersItemChildFoldersRequestBuilderGetRequestConfiguration{
QueryParameters: &users.ItemMailFoldersItemChildFoldersRequestBuilderGetQueryParameters{
Top: &count,
},
}
)
childFolder, err := client.
Users().
ByUserId(testUser).
MailFolders().
ByMailFolderId(folderID).
ChildFolders().
Get(ctx, options)
if err != nil {
fatal(ctx, "getting mail subfolders", err)
}
for _, child := range childFolder.GetValue() {
var (
childDisplayName = ptr.Val(child.GetDisplayName())
//nolint:forbidigo
fullFolderName = stdpath.Join(parentFolder, childDisplayName)
)
if filters.PathContains([]string{dataFolder}).Compare(fullFolderName) {
childTotalCount, _ := ptr.ValOK(child.GetTotalItemCount())
restoreMessageCount[fullFolderName] = childTotalCount
}
childFolderCount := ptr.Val(child.GetChildFolderCount())
if childFolderCount > 0 {
parentFolder := fullFolderName
checkAllSubFolder(ctx, client, child, testUser, parentFolder, dataFolder, restoreMessageCount)
}
}
}
// ---------------------------------------------------------------------------
// oneDrive
// ---------------------------------------------------------------------------
func checkOneDriveRestoration(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
userID, folderName, dataFolder string,
startTime time.Time,
) {
drive, err := client.
Users().
ByUserId(userID).
Drive().
Get(ctx, nil)
if err != nil {
fatal(ctx, "getting the drive:", err)
}
checkDriveRestoration(
ctx,
client,
path.OneDriveService,
folderName,
ptr.Val(drive.GetId()),
ptr.Val(drive.GetName()),
dataFolder,
startTime,
false)
}
// ---------------------------------------------------------------------------
// sharePoint
// ---------------------------------------------------------------------------
func checkSharePointRestoration(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
siteID, userID, folderName, dataFolder string,
startTime time.Time,
) {
drive, err := client.
Sites().
BySiteId(siteID).
Drive().
Get(ctx, nil)
if err != nil {
fatal(ctx, "getting the drive:", err)
}
checkDriveRestoration(
ctx,
client,
path.SharePointService,
folderName,
ptr.Val(drive.GetId()),
ptr.Val(drive.GetName()),
dataFolder,
startTime,
true)
}
// ---------------------------------------------------------------------------
// shared drive tests
// ---------------------------------------------------------------------------
func checkDriveRestoration(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
service path.ServiceType,
folderName,
driveID,
driveName,
dataFolder string,
startTime time.Time,
skipPermissionTest bool,
) {
var (
// map itemID -> item size
fileSizes = make(map[string]int64)
// map itemID -> permission id -> []permission roles
folderPermissions = make(map[string][]permissionInfo)
restoreFile = make(map[string]int64)
restoredFolderPermissions = make(map[string][]permissionInfo)
)
var restoreFolderID string
ctx = clues.Add(ctx, "drive_id", driveID, "drive_name", driveName)
response, err := client.
Drives().
ByDriveId(driveID).
Items().
ByDriveItemId("root").
Children().
Get(ctx, nil)
if err != nil {
fatal(ctx, "getting drive by id", err)
}
for _, driveItem := range response.GetValue() {
var (
itemID = ptr.Val(driveItem.GetId())
itemName = ptr.Val(driveItem.GetName())
)
if itemName == folderName {
restoreFolderID = itemID
continue
}
if itemName != dataFolder {
logAndPrint(ctx, "test data for folder: %s", dataFolder)
continue
}
// if it's a file check the size
if driveItem.GetFile() != nil {
fileSizes[itemName] = ptr.Val(driveItem.GetSize())
}
if driveItem.GetFolder() == nil && driveItem.GetPackage() == nil {
continue
}
// currently we don't restore blank folders.
// skip permission check for empty folders
if ptr.Val(driveItem.GetFolder().GetChildCount()) == 0 {
logAndPrint(ctx, "skipped empty folder: %s", itemName)
continue
}
folderPermissions[itemName] = permissionIn(ctx, client, driveID, itemID)
getOneDriveChildFolder(ctx, client, driveID, itemID, itemName, fileSizes, folderPermissions, startTime)
}
getRestoredDrive(ctx, client, driveID, restoreFolderID, restoreFile, restoredFolderPermissions, startTime)
checkRestoredDriveItemPermissions(
ctx,
service,
skipPermissionTest,
folderPermissions,
restoredFolderPermissions)
for fileName, expected := range fileSizes {
logAndPrint(ctx, "checking for file: %s", fileName)
got := restoreFile[fileName]
assert(
ctx,
func() bool { return expected == got },
fmt.Sprintf("different file size: %s", fileName),
expected,
got)
}
fmt.Println("Success")
}
func checkRestoredDriveItemPermissions(
ctx context.Context,
service path.ServiceType,
skip bool,
folderPermissions map[string][]permissionInfo,
restoredFolderPermissions map[string][]permissionInfo,
) {
if skip {
return
}
/**
TODO: replace this check with testElementsMatch
from internal/connecter/graph_connector_helper_test.go
**/
for folderName, permissions := range folderPermissions {
logAndPrint(ctx, "checking for folder: %s", folderName)
restoreFolderPerm := restoredFolderPermissions[folderName]
if len(permissions) < 1 {
logAndPrint(ctx, "no permissions found in: %s", folderName)
continue
}
permCheck := func() bool { return len(permissions) == len(restoreFolderPerm) }
if service == path.SharePointService {
permCheck = func() bool { return len(permissions) <= len(restoreFolderPerm) }
}
assert(
ctx,
permCheck,
fmt.Sprintf("wrong number of restored permissions: %s", folderName),
permissions,
restoreFolderPerm)
for _, perm := range permissions {
eqID := func(pi permissionInfo) bool { return strings.EqualFold(pi.entityID, perm.entityID) }
i := slices.IndexFunc(restoreFolderPerm, eqID)
assert(
ctx,
func() bool { return i >= 0 },
fmt.Sprintf("permission was restored in: %s", folderName),
perm.entityID,
restoreFolderPerm)
// permissions should be sorted, so a by-index comparison works
restored := restoreFolderPerm[i]
assert(
ctx,
func() bool { return slices.Equal(perm.roles, restored.roles) },
fmt.Sprintf("different roles restored: %s", folderName),
perm.roles,
restored.roles)
}
}
}
func getOneDriveChildFolder(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
driveID, itemID, parentName string,
fileSizes map[string]int64,
folderPermission map[string][]permissionInfo,
startTime time.Time,
) {
response, err := client.Drives().ByDriveId(driveID).Items().ByDriveItemId(itemID).Children().Get(ctx, nil)
if err != nil {
fatal(ctx, "getting child folder", err)
}
for _, driveItem := range response.GetValue() {
var (
itemID = ptr.Val(driveItem.GetId())
itemName = ptr.Val(driveItem.GetName())
fullName = parentName + "/" + itemName
)
folderTime, hasTime := mustGetTimeFromName(ctx, itemName)
if !isWithinTimeBound(ctx, startTime, folderTime, hasTime) {
continue
}
// if it's a file check the size
if driveItem.GetFile() != nil {
fileSizes[fullName] = ptr.Val(driveItem.GetSize())
}
if driveItem.GetFolder() == nil && driveItem.GetPackage() == nil {
continue
}
// currently we don't restore blank folders.
// skip permission check for empty folders
if ptr.Val(driveItem.GetFolder().GetChildCount()) == 0 {
logAndPrint(ctx, "skipped empty folder: %s", fullName)
continue
}
folderPermission[fullName] = permissionIn(ctx, client, driveID, itemID)
getOneDriveChildFolder(ctx, client, driveID, itemID, fullName, fileSizes, folderPermission, startTime)
}
}
func getRestoredDrive(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
driveID, restoreFolderID string,
restoreFile map[string]int64,
restoreFolder map[string][]permissionInfo,
startTime time.Time,
) {
restored, err := client.
Drives().
ByDriveId(driveID).
Items().
ByDriveItemId(restoreFolderID).
Children().
Get(ctx, nil)
if err != nil {
fatal(ctx, "getting child folder", err)
}
for _, item := range restored.GetValue() {
var (
itemID = ptr.Val(item.GetId())
itemName = ptr.Val(item.GetName())
itemSize = ptr.Val(item.GetSize())
)
if item.GetFile() != nil {
restoreFile[itemName] = itemSize
continue
}
if item.GetFolder() == nil && item.GetPackage() == nil {
continue
}
restoreFolder[itemName] = permissionIn(ctx, client, driveID, itemID)
getOneDriveChildFolder(ctx, client, driveID, itemID, itemName, restoreFile, restoreFolder, startTime)
}
}
// ---------------------------------------------------------------------------
// permission helpers
// ---------------------------------------------------------------------------
func permissionIn(
ctx context.Context,
client *msgraphsdk.GraphServiceClient,
driveID, itemID string,
) []permissionInfo {
pi := []permissionInfo{}
pcr, err := client.
Drives().
ByDriveId(driveID).
Items().
ByDriveItemId(itemID).
Permissions().
Get(ctx, nil)
if err != nil {
fatal(ctx, "getting permission", err)
}
for _, perm := range pcr.GetValue() {
if perm.GetGrantedToV2() == nil {
continue
}
var (
gv2 = perm.GetGrantedToV2()
permInfo = permissionInfo{}
entityID string
)
// TODO: replace with filterUserPermissions in onedrive item.go
if gv2.GetUser() != nil {
entityID = ptr.Val(gv2.GetUser().GetId())
} else if gv2.GetGroup() != nil {
entityID = ptr.Val(gv2.GetGroup().GetId())
}
roles := filterSlice(perm.GetRoles(), owner)
for _, role := range roles {
permInfo.entityID = entityID
permInfo.roles = append(permInfo.roles, role)
}
if len(roles) > 0 {
slices.Sort(permInfo.roles)
pi = append(pi, permInfo)
}
}
return pi
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
func fatal(ctx context.Context, msg string, err error) {
logger.CtxErr(ctx, err).Error("test failure: " + msg)
fmt.Println(msg+": ", err)
os.Exit(1)
}
func mustGetTimeFromName(ctx context.Context, name string) (time.Time, bool) {
t, err := dttm.ExtractTime(name)
if err != nil && !errors.Is(err, dttm.ErrNoTimeString) {
fatal(ctx, "extracting time from name: "+name, err)
}
return t, !errors.Is(err, dttm.ErrNoTimeString)
}
func isWithinTimeBound(ctx context.Context, bound, check time.Time, hasTime bool) bool {
if hasTime {
if bound.Before(check) {
logger.Ctx(ctx).
With("boundary_time", bound, "check_time", check).
Info("skipping restore folder: not older than time bound")
return false
}
}
return true
}
func filterSlice(sl []string, remove string) []string {
r := []string{}
for _, s := range sl {
if !strings.EqualFold(s, remove) {
r = append(r, s)
}
}
return r
}
func assert(
ctx context.Context,
passes func() bool,
header string,
expect, current any,
) {
if passes() {
return
}
header = "Error: " + header
expected := fmt.Sprintf("* Expected: %+v", expect)
got := fmt.Sprintf("* Current: %+v", current)
logger.Ctx(ctx).Info(strings.Join([]string{header, expected, got}, " "))
fmt.Println(header)
fmt.Println(expected)
fmt.Println(got)
os.Exit(1)
}
func logAndPrint(ctx context.Context, tmpl string, vs ...any) {
logger.Ctx(ctx).Infof(tmpl, vs...)
fmt.Printf(tmpl+"\n", vs...)
}

View File

@ -0,0 +1,82 @@
package utils
import (
"context"
"errors"
"fmt"
"os"
"strings"
"time"
"github.com/alcionai/corso/src/internal/common/dttm"
"github.com/alcionai/corso/src/pkg/logger"
)
func Assert(
ctx context.Context,
passes func() bool,
header string,
expect, current any,
) {
if passes() {
return
}
header = "Error: " + header
expected := fmt.Sprintf("* Expected: %+v", expect)
got := fmt.Sprintf("* Current: %+v", current)
logger.Ctx(ctx).Info(strings.Join([]string{header, expected, got}, " "))
fmt.Println(header)
fmt.Println(expected)
fmt.Println(got)
os.Exit(1)
}
func Fatal(ctx context.Context, msg string, err error) {
logger.CtxErr(ctx, err).Error("test failure: " + msg)
fmt.Println(msg+": ", err)
os.Exit(1)
}
func MustGetTimeFromName(ctx context.Context, name string) (time.Time, bool) {
t, err := dttm.ExtractTime(name)
if err != nil && !errors.Is(err, dttm.ErrNoTimeString) {
Fatal(ctx, "extracting time from name: "+name, err)
}
return t, !errors.Is(err, dttm.ErrNoTimeString)
}
func IsWithinTimeBound(ctx context.Context, bound, check time.Time, hasTime bool) bool {
if hasTime {
if bound.Before(check) {
logger.Ctx(ctx).
With("boundary_time", bound, "check_time", check).
Info("skipping restore folder: not older than time bound")
return false
}
}
return true
}
func FilterSlice(sl []string, remove string) []string {
r := []string{}
for _, s := range sl {
if !strings.EqualFold(s, remove) {
r = append(r, s)
}
}
return r
}
func LogAndPrint(ctx context.Context, tmpl string, vs ...any) {
logger.Ctx(ctx).Infof(tmpl, vs...)
fmt.Printf(tmpl+"\n", vs...)
}

View File

@ -5,21 +5,21 @@ go 1.20
replace github.com/kopia/kopia => github.com/alcionai/kopia v0.12.2-0.20230713235606-4c85869e9377 replace github.com/kopia/kopia => github.com/alcionai/kopia v0.12.2-0.20230713235606-4c85869e9377
require ( require (
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0
github.com/alcionai/clues v0.0.0-20230630194723-e24d7940e07a github.com/alcionai/clues v0.0.0-20230728164842-7dc4795a43e4
github.com/armon/go-metrics v0.4.1 github.com/armon/go-metrics v0.4.1
github.com/aws/aws-sdk-go v1.44.302 github.com/aws/aws-sdk-go v1.44.311
github.com/aws/aws-xray-sdk-go v1.8.1 github.com/aws/aws-xray-sdk-go v1.8.1
github.com/cenkalti/backoff/v4 v4.2.1 github.com/cenkalti/backoff/v4 v4.2.1
github.com/google/uuid v1.3.0 github.com/google/uuid v1.3.0
github.com/h2non/gock v1.2.0 github.com/h2non/gock v1.2.0
github.com/kopia/kopia v0.12.2-0.20230327171220-747baeebdab1 github.com/kopia/kopia v0.12.2-0.20230327171220-747baeebdab1
github.com/microsoft/kiota-abstractions-go v1.0.0 github.com/microsoft/kiota-abstractions-go v1.1.0
github.com/microsoft/kiota-authentication-azure-go v1.0.0 github.com/microsoft/kiota-authentication-azure-go v1.0.0
github.com/microsoft/kiota-http-go v1.0.0 github.com/microsoft/kiota-http-go v1.0.1
github.com/microsoft/kiota-serialization-form-go v1.0.0 github.com/microsoft/kiota-serialization-form-go v1.0.0
github.com/microsoft/kiota-serialization-json-go v1.0.2 github.com/microsoft/kiota-serialization-json-go v1.0.4
github.com/microsoftgraph/msgraph-sdk-go v1.4.0 github.com/microsoftgraph/msgraph-sdk-go v1.12.0
github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0 github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
github.com/puzpuzpuz/xsync/v2 v2.4.1 github.com/puzpuzpuz/xsync/v2 v2.4.1
@ -42,7 +42,6 @@ require (
github.com/VividCortex/ewma v1.2.0 // indirect github.com/VividCortex/ewma v1.2.0 // indirect
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
github.com/andybalholm/brotli v1.0.4 // indirect github.com/andybalholm/brotli v1.0.4 // indirect
github.com/dnaeon/go-vcr v1.2.0 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/gofrs/flock v0.8.1 // indirect github.com/gofrs/flock v0.8.1 // indirect
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 // indirect github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 // indirect
@ -62,9 +61,9 @@ require (
) )
require ( require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect

View File

@ -36,14 +36,14 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1 h1:SEy2xmstIphdPwNBUi7uhvjyjhVKISfwjfOJmuy7kg4= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 h1:8q4SaHjFsClSvuVne0ID/5Ka8u3fcIHyqkLjcFpNRHQ=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 h1:t/W5MYAuQy81cvM8VUNfRLzhtKpXhVUAN7Cd7KVbTyc= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0/go.mod h1:NBanQUfSWiWn3QEpWDTCU0IjBECKOYvl2R8xdRtMtiM= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 h1:VgSJlZH5u0k2qxSpqyghcFQKmvYckj46uymKK5XzkBM= github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY=
github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0/go.mod h1:BDJ5qMFKx9DugEg3+uQSDCdbYPr5s9vBTrL9P8TpqOU= github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DATA-DOG/go-sqlmock v1.4.1 h1:ThlnYciV1iM/V0OSF/dtkqWb6xo5qITT1TJBG1MRDJM= github.com/DATA-DOG/go-sqlmock v1.4.1 h1:ThlnYciV1iM/V0OSF/dtkqWb6xo5qITT1TJBG1MRDJM=
@ -53,8 +53,8 @@ github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1o
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
github.com/alcionai/clues v0.0.0-20230630194723-e24d7940e07a h1:mtJyeK/FhArTn06M5Lfgxk/GWnu8yqCGNN1BY16vjaA= github.com/alcionai/clues v0.0.0-20230728164842-7dc4795a43e4 h1:husF7eAYw2HEzgjfAmNy+ZLzyztJV2SyoUngSUo829Y=
github.com/alcionai/clues v0.0.0-20230630194723-e24d7940e07a/go.mod h1:MLEWSZ0cjEMg6hiGCRvE7AtrOhs7deBcm7ZrJBpfGRM= github.com/alcionai/clues v0.0.0-20230728164842-7dc4795a43e4/go.mod h1:MLEWSZ0cjEMg6hiGCRvE7AtrOhs7deBcm7ZrJBpfGRM=
github.com/alcionai/kopia v0.12.2-0.20230713235606-4c85869e9377 h1:w50/aVU+zRP5lvE86TSSCCYrrEyuXOlJA06R5RdTS8Y= github.com/alcionai/kopia v0.12.2-0.20230713235606-4c85869e9377 h1:w50/aVU+zRP5lvE86TSSCCYrrEyuXOlJA06R5RdTS8Y=
github.com/alcionai/kopia v0.12.2-0.20230713235606-4c85869e9377/go.mod h1:WH725ws0BYpZpTkVh4uqFHHPiiJuirl1Cm73jv5RYyA= github.com/alcionai/kopia v0.12.2-0.20230713235606-4c85869e9377/go.mod h1:WH725ws0BYpZpTkVh4uqFHHPiiJuirl1Cm73jv5RYyA=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@ -66,12 +66,11 @@ github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/aws/aws-sdk-go v1.44.302 h1:ST3ko6GrJKn3Xi+nAvxjG3uk/V1pW8KC52WLeIxqqNk= github.com/aws/aws-sdk-go v1.44.311 h1:60i8hyVMOXqabKJQPCq4qKRBQ6hRafI/WOcDxGM+J7Q=
github.com/aws/aws-sdk-go v1.44.302/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go v1.44.311/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo= github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo=
github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A= github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@ -103,7 +102,6 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ=
@ -124,7 +122,6 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
@ -227,7 +224,6 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
@ -235,7 +231,6 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI=
@ -276,20 +271,20 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI=
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/microsoft/kiota-abstractions-go v1.0.0 h1:teQS3yOmcTyps+O48AD17LI8TR1B3wCEwGFcwC6K75c= github.com/microsoft/kiota-abstractions-go v1.1.0 h1:X1aKlsYCRs/0RSChr/fbq4j/+kxRzbSY5GeWhtHQNYI=
github.com/microsoft/kiota-abstractions-go v1.0.0/go.mod h1:2yaRQnx2KU7UaenYSApiTT4pf7fFkPV0B71Rm2uYynQ= github.com/microsoft/kiota-abstractions-go v1.1.0/go.mod h1:RkxyZ5x87Njik7iVeQY9M2wtrrL1MJZcXiI/BxD/82g=
github.com/microsoft/kiota-authentication-azure-go v1.0.0 h1:29FNZZ/4nnCOwFcGWlB/sxPvWz487HA2bXH8jR5k2Rk= github.com/microsoft/kiota-authentication-azure-go v1.0.0 h1:29FNZZ/4nnCOwFcGWlB/sxPvWz487HA2bXH8jR5k2Rk=
github.com/microsoft/kiota-authentication-azure-go v1.0.0/go.mod h1:rnx3PRlkGdXDcA/0lZQTbBwyYGmc+3POt7HpE/e4jGw= github.com/microsoft/kiota-authentication-azure-go v1.0.0/go.mod h1:rnx3PRlkGdXDcA/0lZQTbBwyYGmc+3POt7HpE/e4jGw=
github.com/microsoft/kiota-http-go v1.0.0 h1:F1hd6gMlLeEgH2CkRB7z13ow7LxMKMWEmms/t0VfS+k= github.com/microsoft/kiota-http-go v1.0.1 h1:818u3aiLpxj35hZgfUSqphQ18IUTK3gVdTE4cQ5vjLw=
github.com/microsoft/kiota-http-go v1.0.0/go.mod h1:eujxJliqodotsYepIc6ihhK+vXMMt5Q8YiSNL7+7M7U= github.com/microsoft/kiota-http-go v1.0.1/go.mod h1:H0cg+ly+5ZSR8z4swj5ea9O/GB5ll2YuYeQ0/pJs7AY=
github.com/microsoft/kiota-serialization-form-go v1.0.0 h1:UNdrkMnLFqUCccQZerKjblsyVgifS11b3WCx+eFEsAI= github.com/microsoft/kiota-serialization-form-go v1.0.0 h1:UNdrkMnLFqUCccQZerKjblsyVgifS11b3WCx+eFEsAI=
github.com/microsoft/kiota-serialization-form-go v1.0.0/go.mod h1:h4mQOO6KVTNciMF6azi1J9QB19ujSw3ULKcSNyXXOMA= github.com/microsoft/kiota-serialization-form-go v1.0.0/go.mod h1:h4mQOO6KVTNciMF6azi1J9QB19ujSw3ULKcSNyXXOMA=
github.com/microsoft/kiota-serialization-json-go v1.0.2 h1:RXan8v7yWBD88XxVZ2W38BBcqu2UqWtgS54nCbOS5ow= github.com/microsoft/kiota-serialization-json-go v1.0.4 h1:5TaISWwd2Me8clrK7SqNATo0tv9seOq59y4I5953egQ=
github.com/microsoft/kiota-serialization-json-go v1.0.2/go.mod h1:AUItT9exyxmjZQE8IeFD9ygP77q9GKVb+AQE2V5Ikho= github.com/microsoft/kiota-serialization-json-go v1.0.4/go.mod h1:rM4+FsAY+9AEpBsBzkFFis+b/LZLlNKKewuLwK9Q6Mg=
github.com/microsoft/kiota-serialization-text-go v1.0.0 h1:XOaRhAXy+g8ZVpcq7x7a0jlETWnWrEum0RhmbYrTFnA= github.com/microsoft/kiota-serialization-text-go v1.0.0 h1:XOaRhAXy+g8ZVpcq7x7a0jlETWnWrEum0RhmbYrTFnA=
github.com/microsoft/kiota-serialization-text-go v1.0.0/go.mod h1:sM1/C6ecnQ7IquQOGUrUldaO5wj+9+v7G2W3sQ3fy6M= github.com/microsoft/kiota-serialization-text-go v1.0.0/go.mod h1:sM1/C6ecnQ7IquQOGUrUldaO5wj+9+v7G2W3sQ3fy6M=
github.com/microsoftgraph/msgraph-sdk-go v1.4.0 h1:ibNwMDEZ6HikA9BVXu+TljCzCiE+yFsD6wLpJbTc1tc= github.com/microsoftgraph/msgraph-sdk-go v1.12.0 h1:/jZJ1KCtVlvxStKq31VsEPOQQ5Iy26R1pgvc+RYt7XI=
github.com/microsoftgraph/msgraph-sdk-go v1.4.0/go.mod h1:JIDL1xENx92B60NjO2ACyqGeKvtYkdl9rirgajIgryw= github.com/microsoftgraph/msgraph-sdk-go v1.12.0/go.mod h1:ccLv84FJFtwdSzYWM/HlTes5FLzkzzBsYh9kg93/WS8=
github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0 h1:7NWTfyXvOjoizW7PmxNp3+8wCKPgpODs/D1cUZ3fkAY= github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0 h1:7NWTfyXvOjoizW7PmxNp3+8wCKPgpODs/D1cUZ3fkAY=
github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0/go.mod h1:tQb4q3YMIj2dWhhXhQSJ4ELpol931ANKzHSYK5kX1qE= github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0/go.mod h1:tQb4q3YMIj2dWhhXhQSJ4ELpol931ANKzHSYK5kX1qE=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
@ -307,9 +302,7 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A= github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A=
github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM= github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM=
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy/FJl/rCYT0+EuS8+Z0z4= github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy/FJl/rCYT0+EuS8+Z0z4=
@ -446,7 +439,6 @@ go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLk
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI=
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
@ -798,7 +790,6 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@ -0,0 +1,99 @@
package archive
import (
"archive/zip"
"context"
"io"
"path"
"github.com/alcionai/clues"
"github.com/alcionai/corso/src/internal/common/dttm"
"github.com/alcionai/corso/src/pkg/export"
)
const (
// ZipCopyBufferSize is the size of the copy buffer for zip
// write operations
// TODO(meain): tweak this value
ZipCopyBufferSize = 5 * 1024 * 1024
)
type zipCollection struct {
reader io.ReadCloser
}
func (z zipCollection) BasePath() string {
return ""
}
func (z zipCollection) Items(ctx context.Context) <-chan export.Item {
rc := make(chan export.Item, 1)
defer close(rc)
rc <- export.Item{
Data: export.ItemData{
Name: "Corso_Export_" + dttm.FormatNow(dttm.HumanReadable) + ".zip",
Body: z.reader,
},
}
return rc
}
// ZipExportCollection takes a list of export collections and zips
// them into a single collection.
func ZipExportCollection(
ctx context.Context,
expCollections []export.Collection,
) (export.Collection, error) {
if len(expCollections) == 0 {
return nil, clues.New("no export collections provided")
}
reader, writer := io.Pipe()
wr := zip.NewWriter(writer)
go func() {
defer writer.Close()
defer wr.Close()
buf := make([]byte, ZipCopyBufferSize)
for _, ec := range expCollections {
folder := ec.BasePath()
items := ec.Items(ctx)
for item := range items {
err := item.Error
if err != nil {
writer.CloseWithError(clues.Wrap(err, "getting export item").With("id", item.ID))
return
}
name := item.Data.Name
// We assume folder and name to not contain any path separators.
// Also, this should always use `/` as this is
// created within a zip file and not written to disk.
// TODO(meain): Exchange paths might contain a path
// separator and will have to have special handling.
//nolint:forbidigo
f, err := wr.Create(path.Join(folder, name))
if err != nil {
writer.CloseWithError(clues.Wrap(err, "creating zip entry").With("name", name).With("id", item.ID))
return
}
_, err = io.CopyBuffer(f, item.Data.Body, buf)
if err != nil {
writer.CloseWithError(clues.Wrap(err, "writing zip entry").With("name", name).With("id", item.ID))
return
}
}
}
}()
return zipCollection{reader}, nil
}

View File

@ -28,6 +28,10 @@ type is struct {
name string name string
} }
func NewProvider(id, name string) *is {
return &is{id, name}
}
func (is is) ID() string { return is.id } func (is is) ID() string { return is.id }
func (is is) Name() string { return is.name } func (is is) Name() string { return is.name }
@ -40,6 +44,11 @@ type Cacher interface {
ProviderForName(id string) Provider ProviderForName(id string) Provider
} }
type CacheBuilder interface {
Add(id, name string)
Cacher
}
var _ Cacher = &cache{} var _ Cacher = &cache{}
type cache struct { type cache struct {
@ -47,17 +56,29 @@ type cache struct {
nameToID map[string]string nameToID map[string]string
} }
func NewCache(idToName map[string]string) cache { func NewCache(idToName map[string]string) *cache {
nti := make(map[string]string, len(idToName)) c := cache{
idToName: map[string]string{},
for id, name := range idToName { nameToID: map[string]string{},
nti[name] = id
} }
return cache{ if len(idToName) > 0 {
idToName: idToName, nti := make(map[string]string, len(idToName))
nameToID: nti,
for id, name := range idToName {
nti[name] = id
}
c.idToName = idToName
c.nameToID = nti
} }
return &c
}
func (c *cache) Add(id, name string) {
c.idToName[strings.ToLower(id)] = name
c.nameToID[strings.ToLower(name)] = id
} }
// IDOf returns the id associated with the given name. // IDOf returns the id associated with the given name.

View File

@ -0,0 +1,60 @@
package idname
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/tester"
)
type IDNameUnitSuite struct {
tester.Suite
}
func TestIDNameUnitSuite(t *testing.T) {
suite.Run(t, &IDNameUnitSuite{Suite: tester.NewUnitSuite(t)})
}
func (suite *IDNameUnitSuite) TestAdd() {
table := []struct {
name string
inID string
inName string
searchID string
searchName string
}{
{
name: "basic",
inID: "foo",
inName: "bar",
searchID: "foo",
searchName: "bar",
},
{
name: "change casing",
inID: "FNORDS",
inName: "SMARF",
searchID: "fnords",
searchName: "smarf",
},
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
cache := NewCache(nil)
cache.Add(test.inID, test.inName)
id, found := cache.IDOf(test.searchName)
assert.True(t, found)
assert.Equal(t, test.inID, id)
name, found := cache.NameOf(test.searchID)
assert.True(t, found)
assert.Equal(t, test.inName, name)
})
}
}

View File

@ -78,6 +78,10 @@ func (u SafeURL) Format(fs fmt.State, _ rune) {
fmt.Fprint(fs, u.Conceal()) fmt.Fprint(fs, u.Conceal())
} }
func (u SafeURL) PlainString() string {
return u.URL
}
// String complies with Stringer to ensure the Conceal() version // String complies with Stringer to ensure the Conceal() version
// of the url is printed anytime it gets transformed to a string. // of the url is printed anytime it gets transformed to a string.
func (u SafeURL) String() string { func (u SafeURL) String() string {

View File

@ -91,6 +91,11 @@ func (c NoFetchRestoreCollection) FetchItemByName(context.Context, string) (Stre
return nil, ErrNotFound return nil, ErrNotFound
} }
type FetchRestoreCollection struct {
Collection
FetchItemByNamer
}
// Stream represents a single item within a Collection // Stream represents a single item within a Collection
// that can be consumed as a stream (it embeds io.Reader) // that can be consumed as a stream (it embeds io.Reader)
type Stream interface { type Stream interface {

View File

@ -1,12 +1,24 @@
package mock package mock
import ( import (
"context"
"io" "io"
"time" "time"
"github.com/alcionai/clues"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/path"
) )
// ---------------------------------------------------------------------------
// stream
// ---------------------------------------------------------------------------
var _ data.Stream = &Stream{}
type Stream struct { type Stream struct {
ID string ID string
Reader io.ReadCloser Reader io.ReadCloser
@ -52,3 +64,39 @@ type errReader struct {
func (er errReader) Read([]byte) (int, error) { func (er errReader) Read([]byte) (int, error) {
return 0, er.readErr return 0, er.readErr
} }
// ---------------------------------------------------------------------------
// collection
// ---------------------------------------------------------------------------
var (
_ data.Collection = &Collection{}
_ data.BackupCollection = &Collection{}
_ data.RestoreCollection = &Collection{}
)
type Collection struct{}
func (c Collection) Items(ctx context.Context, errs *fault.Bus) <-chan data.Stream {
return nil
}
func (c Collection) FullPath() path.Path {
return nil
}
func (c Collection) PreviousPath() path.Path {
return nil
}
func (c Collection) State() data.CollectionState {
return data.NewState
}
func (c Collection) DoNotMergeItems() bool {
return true
}
func (c Collection) FetchItemByName(ctx context.Context, name string) (data.Stream, error) {
return &Stream{}, clues.New("not implemented")
}

View File

@ -35,6 +35,8 @@ const (
BackupEnd = "Backup End" BackupEnd = "Backup End"
RestoreStart = "Restore Start" RestoreStart = "Restore Start"
RestoreEnd = "Restore End" RestoreEnd = "Restore End"
ExportStart = "Export Start"
ExportEnd = "Export End"
MaintenanceStart = "Maintenance Start" MaintenanceStart = "Maintenance Start"
MaintenanceEnd = "Maintenance End" MaintenanceEnd = "Maintenance End"
@ -49,6 +51,7 @@ const (
ItemsWritten = "items_written" ItemsWritten = "items_written"
Resources = "resources" Resources = "resources"
RestoreID = "restore_id" RestoreID = "restore_id"
ExportID = "export_id"
Service = "service" Service = "service"
StartTime = "start_time" StartTime = "start_time"
Status = "status" Status = "status"
@ -82,8 +85,8 @@ var (
RudderStackDataPlaneURL string RudderStackDataPlaneURL string
) )
func NewBus(ctx context.Context, s storage.Storage, tenID string, opts control.Options) (Bus, error) { func NewBus(ctx context.Context, s storage.Storage, tenID string, co control.Options) (Bus, error) {
if opts.DisableMetrics { if co.DisableMetrics {
return Bus{}, nil return Bus{}, nil
} }

View File

@ -52,7 +52,7 @@ func (suite *EventsIntegrationSuite) TestNewBus() {
) )
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
b, err := events.NewBus(ctx, s, a.ID(), control.Defaults()) b, err := events.NewBus(ctx, s, a.ID(), control.DefaultOptions())
require.NotEmpty(t, b) require.NotEmpty(t, b)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))

View File

@ -24,7 +24,7 @@ type BackupBases interface {
MergeBackupBases( MergeBackupBases(
ctx context.Context, ctx context.Context,
other BackupBases, other BackupBases,
reasonToKey func(Reason) string, reasonToKey func(Reasoner) string,
) BackupBases ) BackupBases
} }
@ -109,10 +109,10 @@ func (bb *backupBases) ClearAssistBases() {
// some migration that disrupts lookup), and that the BackupBases used to call // some migration that disrupts lookup), and that the BackupBases used to call
// this function contains the current version. // this function contains the current version.
// //
// reasonToKey should be a function that, given a Reason, will produce some // reasonToKey should be a function that, given a Reasoner, will produce some
// string that represents Reason in the context of the merge operation. For // string that represents Reasoner in the context of the merge operation. For
// example, to merge BackupBases across a ResourceOwner migration, the Reason's // example, to merge BackupBases across a ProtectedResource migration, the
// service and category can be used as the key. // Reasoner's service and category can be used as the key.
// //
// Selection priority, for each reason key generated by reasonsToKey, follows // Selection priority, for each reason key generated by reasonsToKey, follows
// these rules: // these rules:
@ -125,7 +125,7 @@ func (bb *backupBases) ClearAssistBases() {
func (bb *backupBases) MergeBackupBases( func (bb *backupBases) MergeBackupBases(
ctx context.Context, ctx context.Context,
other BackupBases, other BackupBases,
reasonToKey func(reason Reason) string, reasonToKey func(reason Reasoner) string,
) BackupBases { ) BackupBases {
if other == nil || (len(other.MergeBases()) == 0 && len(other.AssistBases()) == 0) { if other == nil || (len(other.MergeBases()) == 0 && len(other.AssistBases()) == 0) {
return bb return bb
@ -159,7 +159,7 @@ func (bb *backupBases) MergeBackupBases(
// Calculate the set of mergeBases to pull from other into this one. // Calculate the set of mergeBases to pull from other into this one.
for _, m := range other.MergeBases() { for _, m := range other.MergeBases() {
useReasons := []Reason{} useReasons := []Reasoner{}
for _, r := range m.Reasons { for _, r := range m.Reasons {
k := reasonToKey(r) k := reasonToKey(r)
@ -210,7 +210,7 @@ func (bb *backupBases) MergeBackupBases(
// Add assistBases from other to this one as needed. // Add assistBases from other to this one as needed.
for _, m := range other.AssistBases() { for _, m := range other.AssistBases() {
useReasons := []Reason{} useReasons := []Reasoner{}
// Assume that all complete manifests in assist overlap with MergeBases. // Assume that all complete manifests in assist overlap with MergeBases.
if len(m.IncompleteReason) == 0 { if len(m.IncompleteReason) == 0 {
@ -267,8 +267,8 @@ func findNonUniqueManifests(
} }
for _, reason := range man.Reasons { for _, reason := range man.Reasons {
reasonKey := reason.ResourceOwner + reason.Service.String() + reason.Category.String() mapKey := reasonKey(reason)
reasons[reasonKey] = append(reasons[reasonKey], man) reasons[mapKey] = append(reasons[mapKey], man)
} }
} }

View File

@ -16,7 +16,7 @@ import (
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
) )
func makeManifest(id, incmpl, bID string, reasons ...Reason) ManifestEntry { func makeManifest(id, incmpl, bID string, reasons ...Reasoner) ManifestEntry {
bIDKey, _ := makeTagKV(TagBackupID) bIDKey, _ := makeTagKV(TagBackupID)
return ManifestEntry{ return ManifestEntry{
@ -223,14 +223,10 @@ func (suite *BackupBasesUnitSuite) TestMergeBackupBases() {
ir = "checkpoint" ir = "checkpoint"
} }
reasons := make([]Reason, 0, len(i.cat)) reasons := make([]Reasoner, 0, len(i.cat))
for _, c := range i.cat { for _, c := range i.cat {
reasons = append(reasons, Reason{ reasons = append(reasons, NewReason("", ro, path.ExchangeService, c))
ResourceOwner: ro,
Service: path.ExchangeService,
Category: c,
})
} }
m := makeManifest(baseID, ir, "b"+baseID, reasons...) m := makeManifest(baseID, ir, "b"+baseID, reasons...)
@ -457,8 +453,8 @@ func (suite *BackupBasesUnitSuite) TestMergeBackupBases() {
got := bb.MergeBackupBases( got := bb.MergeBackupBases(
ctx, ctx,
other, other,
func(reason Reason) string { func(r Reasoner) string {
return reason.Service.String() + reason.Category.String() return r.Service().String() + r.Category().String()
}) })
AssertBackupBasesEqual(t, expect, got) AssertBackupBasesEqual(t, expect, got)
}) })
@ -469,13 +465,8 @@ func (suite *BackupBasesUnitSuite) TestFixupAndVerify() {
ro := "resource_owner" ro := "resource_owner"
makeMan := func(pct path.CategoryType, id, incmpl, bID string) ManifestEntry { makeMan := func(pct path.CategoryType, id, incmpl, bID string) ManifestEntry {
reason := Reason{ r := NewReason("", ro, path.ExchangeService, pct)
ResourceOwner: ro, return makeManifest(id, incmpl, bID, r)
Service: path.ExchangeService,
Category: pct,
}
return makeManifest(id, incmpl, bID, reason)
} }
// Make a function so tests can modify things without messing with each other. // Make a function so tests can modify things without messing with each other.
@ -606,11 +597,7 @@ func (suite *BackupBasesUnitSuite) TestFixupAndVerify() {
res := validMail1() res := validMail1()
res.mergeBases[0].Reasons = append( res.mergeBases[0].Reasons = append(
res.mergeBases[0].Reasons, res.mergeBases[0].Reasons,
Reason{ NewReason("", ro, path.ExchangeService, path.ContactsCategory))
ResourceOwner: ro,
Service: path.ExchangeService,
Category: path.ContactsCategory,
})
res.assistBases = res.mergeBases res.assistBases = res.mergeBases
return res return res
@ -619,11 +606,7 @@ func (suite *BackupBasesUnitSuite) TestFixupAndVerify() {
res := validMail1() res := validMail1()
res.mergeBases[0].Reasons = append( res.mergeBases[0].Reasons = append(
res.mergeBases[0].Reasons, res.mergeBases[0].Reasons,
Reason{ NewReason("", ro, path.ExchangeService, path.ContactsCategory))
ResourceOwner: ro,
Service: path.ExchangeService,
Category: path.ContactsCategory,
})
res.assistBases = res.mergeBases res.assistBases = res.mergeBases
return res return res

View File

@ -29,39 +29,94 @@ const (
userTagPrefix = "tag:" userTagPrefix = "tag:"
) )
type Reason struct { // TODO(ashmrtn): Move this into some inject package. Here to avoid import
ResourceOwner string // cycles.
Service path.ServiceType type Reasoner interface {
Category path.CategoryType Tenant() string
ProtectedResource() string
Service() path.ServiceType
Category() path.CategoryType
// SubtreePath returns the path prefix for data in existing backups that have
// parameters (tenant, protected resourced, etc) that match this Reasoner.
SubtreePath() (path.Path, error)
} }
func (r Reason) TagKeys() []string { func NewReason(
return []string{ tenant, resource string,
r.ResourceOwner, service path.ServiceType,
serviceCatString(r.Service, r.Category), category path.CategoryType,
) Reasoner {
return reason{
tenant: tenant,
resource: resource,
service: service,
category: category,
} }
} }
// Key is the concatenation of the ResourceOwner, Service, and Category. type reason struct {
func (r Reason) Key() string { // tenant appears here so that when this is moved to an inject package nothing
return r.ResourceOwner + r.Service.String() + r.Category.String() // needs changed. However, kopia itself is blind to the fields in the reason
// struct and relies on helper functions to get the information it needs.
tenant string
resource string
service path.ServiceType
category path.CategoryType
}
func (r reason) Tenant() string {
return r.tenant
}
func (r reason) ProtectedResource() string {
return r.resource
}
func (r reason) Service() path.ServiceType {
return r.service
}
func (r reason) Category() path.CategoryType {
return r.category
}
func (r reason) SubtreePath() (path.Path, error) {
p, err := path.ServicePrefix(
r.Tenant(),
r.ProtectedResource(),
r.Service(),
r.Category())
return p, clues.Wrap(err, "building path").OrNil()
}
func tagKeys(r Reasoner) []string {
return []string{
r.ProtectedResource(),
serviceCatString(r.Service(), r.Category()),
}
}
// reasonKey returns the concatenation of the ProtectedResource, Service, and Category.
func reasonKey(r Reasoner) string {
return r.ProtectedResource() + r.Service().String() + r.Category().String()
} }
type BackupEntry struct { type BackupEntry struct {
*backup.Backup *backup.Backup
Reasons []Reason Reasons []Reasoner
} }
type ManifestEntry struct { type ManifestEntry struct {
*snapshot.Manifest *snapshot.Manifest
// Reason contains the ResourceOwners and Service/Categories that caused this // Reasons contains the ResourceOwners and Service/Categories that caused this
// snapshot to be selected as a base. We can't reuse OwnersCats here because // snapshot to be selected as a base. We can't reuse OwnersCats here because
// it's possible some ResourceOwners will have a subset of the Categories as // it's possible some ResourceOwners will have a subset of the Categories as
// the reason for selecting a snapshot. For example: // the reason for selecting a snapshot. For example:
// 1. backup user1 email,contacts -> B1 // 1. backup user1 email,contacts -> B1
// 2. backup user1 contacts -> B2 (uses B1 as base) // 2. backup user1 contacts -> B2 (uses B1 as base)
// 3. backup user1 email,contacts,events (uses B1 for email, B2 for contacts) // 3. backup user1 email,contacts,events (uses B1 for email, B2 for contacts)
Reasons []Reason Reasons []Reasoner
} }
func (me ManifestEntry) GetTag(key string) (string, bool) { func (me ManifestEntry) GetTag(key string) (string, bool) {
@ -157,7 +212,7 @@ func (b *baseFinder) getBackupModel(
// most recent complete backup as the base. // most recent complete backup as the base.
func (b *baseFinder) findBasesInSet( func (b *baseFinder) findBasesInSet(
ctx context.Context, ctx context.Context,
reason Reason, reason Reasoner,
metas []*manifest.EntryMetadata, metas []*manifest.EntryMetadata,
) (*BackupEntry, *ManifestEntry, []ManifestEntry, error) { ) (*BackupEntry, *ManifestEntry, []ManifestEntry, error) {
// Sort manifests by time so we can go through them sequentially. The code in // Sort manifests by time so we can go through them sequentially. The code in
@ -190,7 +245,7 @@ func (b *baseFinder) findBasesInSet(
kopiaAssistSnaps = append(kopiaAssistSnaps, ManifestEntry{ kopiaAssistSnaps = append(kopiaAssistSnaps, ManifestEntry{
Manifest: man, Manifest: man,
Reasons: []Reason{reason}, Reasons: []Reasoner{reason},
}) })
logger.Ctx(ictx).Info("found incomplete backup") logger.Ctx(ictx).Info("found incomplete backup")
@ -211,7 +266,7 @@ func (b *baseFinder) findBasesInSet(
kopiaAssistSnaps = append(kopiaAssistSnaps, ManifestEntry{ kopiaAssistSnaps = append(kopiaAssistSnaps, ManifestEntry{
Manifest: man, Manifest: man,
Reasons: []Reason{reason}, Reasons: []Reasoner{reason},
}) })
logger.Ctx(ictx).Info("found incomplete backup") logger.Ctx(ictx).Info("found incomplete backup")
@ -235,7 +290,7 @@ func (b *baseFinder) findBasesInSet(
kopiaAssistSnaps = append(kopiaAssistSnaps, ManifestEntry{ kopiaAssistSnaps = append(kopiaAssistSnaps, ManifestEntry{
Manifest: man, Manifest: man,
Reasons: []Reason{reason}, Reasons: []Reasoner{reason},
}) })
logger.Ctx(ictx).Infow( logger.Ctx(ictx).Infow(
@ -253,13 +308,13 @@ func (b *baseFinder) findBasesInSet(
me := ManifestEntry{ me := ManifestEntry{
Manifest: man, Manifest: man,
Reasons: []Reason{reason}, Reasons: []Reasoner{reason},
} }
kopiaAssistSnaps = append(kopiaAssistSnaps, me) kopiaAssistSnaps = append(kopiaAssistSnaps, me)
return &BackupEntry{ return &BackupEntry{
Backup: bup, Backup: bup,
Reasons: []Reason{reason}, Reasons: []Reasoner{reason},
}, &me, kopiaAssistSnaps, nil }, &me, kopiaAssistSnaps, nil
} }
@ -270,12 +325,12 @@ func (b *baseFinder) findBasesInSet(
func (b *baseFinder) getBase( func (b *baseFinder) getBase(
ctx context.Context, ctx context.Context,
reason Reason, r Reasoner,
tags map[string]string, tags map[string]string,
) (*BackupEntry, *ManifestEntry, []ManifestEntry, error) { ) (*BackupEntry, *ManifestEntry, []ManifestEntry, error) {
allTags := map[string]string{} allTags := map[string]string{}
for _, k := range reason.TagKeys() { for _, k := range tagKeys(r) {
allTags[k] = "" allTags[k] = ""
} }
@ -292,12 +347,12 @@ func (b *baseFinder) getBase(
return nil, nil, nil, nil return nil, nil, nil, nil
} }
return b.findBasesInSet(ctx, reason, metas) return b.findBasesInSet(ctx, r, metas)
} }
func (b *baseFinder) FindBases( func (b *baseFinder) FindBases(
ctx context.Context, ctx context.Context,
reasons []Reason, reasons []Reasoner,
tags map[string]string, tags map[string]string,
) BackupBases { ) BackupBases {
var ( var (
@ -310,14 +365,14 @@ func (b *baseFinder) FindBases(
kopiaAssistSnaps = map[manifest.ID]ManifestEntry{} kopiaAssistSnaps = map[manifest.ID]ManifestEntry{}
) )
for _, reason := range reasons { for _, searchReason := range reasons {
ictx := clues.Add( ictx := clues.Add(
ctx, ctx,
"search_service", reason.Service.String(), "search_service", searchReason.Service().String(),
"search_category", reason.Category.String()) "search_category", searchReason.Category().String())
logger.Ctx(ictx).Info("searching for previous manifests") logger.Ctx(ictx).Info("searching for previous manifests")
baseBackup, baseSnap, assistSnaps, err := b.getBase(ictx, reason, tags) baseBackup, baseSnap, assistSnaps, err := b.getBase(ictx, searchReason, tags)
if err != nil { if err != nil {
logger.Ctx(ctx).Info( logger.Ctx(ctx).Info(
"getting base, falling back to full backup for reason", "getting base, falling back to full backup for reason",

View File

@ -39,61 +39,24 @@ var (
testUser2 = "user2" testUser2 = "user2"
testUser3 = "user3" testUser3 = "user3"
testAllUsersAllCats = []Reason{ testAllUsersAllCats = []Reasoner{
{ // User1 email and events.
ResourceOwner: testUser1, NewReason("", testUser1, path.ExchangeService, path.EmailCategory),
Service: path.ExchangeService, NewReason("", testUser1, path.ExchangeService, path.EventsCategory),
Category: path.EmailCategory, // User2 email and events.
}, NewReason("", testUser2, path.ExchangeService, path.EmailCategory),
{ NewReason("", testUser2, path.ExchangeService, path.EventsCategory),
ResourceOwner: testUser1, // User3 email and events.
Service: path.ExchangeService, NewReason("", testUser3, path.ExchangeService, path.EmailCategory),
Category: path.EventsCategory, NewReason("", testUser3, path.ExchangeService, path.EventsCategory),
},
{
ResourceOwner: testUser2,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
{
ResourceOwner: testUser2,
Service: path.ExchangeService,
Category: path.EventsCategory,
},
{
ResourceOwner: testUser3,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
{
ResourceOwner: testUser3,
Service: path.ExchangeService,
Category: path.EventsCategory,
},
} }
testAllUsersMail = []Reason{ testAllUsersMail = []Reasoner{
{ NewReason("", testUser1, path.ExchangeService, path.EmailCategory),
ResourceOwner: testUser1, NewReason("", testUser2, path.ExchangeService, path.EmailCategory),
Service: path.ExchangeService, NewReason("", testUser3, path.ExchangeService, path.EmailCategory),
Category: path.EmailCategory,
},
{
ResourceOwner: testUser2,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
{
ResourceOwner: testUser3,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
} }
testUser1Mail = []Reason{ testUser1Mail = []Reasoner{
{ NewReason("", testUser1, path.ExchangeService, path.EmailCategory),
ResourceOwner: testUser1,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
} }
) )
@ -322,12 +285,8 @@ func (suite *BaseFinderUnitSuite) TestNoResult_NoBackupsOrSnapshots() {
sm: mockEmptySnapshotManager{}, sm: mockEmptySnapshotManager{},
bg: mockEmptyModelGetter{}, bg: mockEmptyModelGetter{},
} }
reasons := []Reason{ reasons := []Reasoner{
{ NewReason("", "a-user", path.ExchangeService, path.EmailCategory),
ResourceOwner: "a-user",
Service: path.ExchangeService,
Category: path.EmailCategory,
},
} }
bb := bf.FindBases(ctx, reasons, nil) bb := bf.FindBases(ctx, reasons, nil)
@ -345,12 +304,8 @@ func (suite *BaseFinderUnitSuite) TestNoResult_ErrorListingSnapshots() {
sm: &mockSnapshotManager{findErr: assert.AnError}, sm: &mockSnapshotManager{findErr: assert.AnError},
bg: mockEmptyModelGetter{}, bg: mockEmptyModelGetter{},
} }
reasons := []Reason{ reasons := []Reasoner{
{ NewReason("", "a-user", path.ExchangeService, path.EmailCategory),
ResourceOwner: "a-user",
Service: path.ExchangeService,
Category: path.EmailCategory,
},
} }
bb := bf.FindBases(ctx, reasons, nil) bb := bf.FindBases(ctx, reasons, nil)
@ -361,14 +316,14 @@ func (suite *BaseFinderUnitSuite) TestNoResult_ErrorListingSnapshots() {
func (suite *BaseFinderUnitSuite) TestGetBases() { func (suite *BaseFinderUnitSuite) TestGetBases() {
table := []struct { table := []struct {
name string name string
input []Reason input []Reasoner
manifestData []manifestInfo manifestData []manifestInfo
// Use this to denote the Reasons a base backup or base manifest is // Use this to denote the Reasons a base backup or base manifest is
// selected. The int maps to the index of the backup or manifest in data. // selected. The int maps to the index of the backup or manifest in data.
expectedBaseReasons map[int][]Reason expectedBaseReasons map[int][]Reasoner
// Use this to denote the Reasons a kopia assised incrementals manifest is // Use this to denote the Reasons a kopia assised incrementals manifest is
// selected. The int maps to the index of the manifest in data. // selected. The int maps to the index of the manifest in data.
expectedAssistManifestReasons map[int][]Reason expectedAssistManifestReasons map[int][]Reasoner
backupData []backupInfo backupData []backupInfo
}{ }{
{ {
@ -394,10 +349,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
testUser1, testUser1,
), ),
}, },
expectedBaseReasons: map[int][]Reason{ expectedBaseReasons: map[int][]Reasoner{
1: testUser1Mail, 1: testUser1Mail,
}, },
expectedAssistManifestReasons: map[int][]Reason{ expectedAssistManifestReasons: map[int][]Reasoner{
1: testUser1Mail, 1: testUser1Mail,
}, },
backupData: []backupInfo{ backupData: []backupInfo{
@ -428,10 +383,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
testUser1, testUser1,
), ),
}, },
expectedBaseReasons: map[int][]Reason{ expectedBaseReasons: map[int][]Reasoner{
1: testUser1Mail, 1: testUser1Mail,
}, },
expectedAssistManifestReasons: map[int][]Reason{ expectedAssistManifestReasons: map[int][]Reasoner{
0: testUser1Mail, 0: testUser1Mail,
1: testUser1Mail, 1: testUser1Mail,
}, },
@ -463,10 +418,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
testUser1, testUser1,
), ),
}, },
expectedBaseReasons: map[int][]Reason{ expectedBaseReasons: map[int][]Reasoner{
1: testUser1Mail, 1: testUser1Mail,
}, },
expectedAssistManifestReasons: map[int][]Reason{ expectedAssistManifestReasons: map[int][]Reasoner{
0: testUser1Mail, 0: testUser1Mail,
1: testUser1Mail, 1: testUser1Mail,
}, },
@ -492,10 +447,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
testUser3, testUser3,
), ),
}, },
expectedBaseReasons: map[int][]Reason{ expectedBaseReasons: map[int][]Reasoner{
0: testUser1Mail, 0: testUser1Mail,
}, },
expectedAssistManifestReasons: map[int][]Reason{ expectedAssistManifestReasons: map[int][]Reasoner{
0: testUser1Mail, 0: testUser1Mail,
}, },
backupData: []backupInfo{ backupData: []backupInfo{
@ -519,10 +474,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
testUser3, testUser3,
), ),
}, },
expectedBaseReasons: map[int][]Reason{ expectedBaseReasons: map[int][]Reasoner{
0: testAllUsersAllCats, 0: testAllUsersAllCats,
}, },
expectedAssistManifestReasons: map[int][]Reason{ expectedAssistManifestReasons: map[int][]Reasoner{
0: testAllUsersAllCats, 0: testAllUsersAllCats,
}, },
backupData: []backupInfo{ backupData: []backupInfo{
@ -557,76 +512,28 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
testUser3, testUser3,
), ),
}, },
expectedBaseReasons: map[int][]Reason{ expectedBaseReasons: map[int][]Reasoner{
0: { 0: {
{ NewReason("", testUser1, path.ExchangeService, path.EmailCategory),
ResourceOwner: testUser1, NewReason("", testUser2, path.ExchangeService, path.EmailCategory),
Service: path.ExchangeService, NewReason("", testUser3, path.ExchangeService, path.EmailCategory),
Category: path.EmailCategory,
},
{
ResourceOwner: testUser2,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
{
ResourceOwner: testUser3,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
}, },
1: { 1: {
Reason{ NewReason("", testUser1, path.ExchangeService, path.EventsCategory),
ResourceOwner: testUser1, NewReason("", testUser2, path.ExchangeService, path.EventsCategory),
Service: path.ExchangeService, NewReason("", testUser3, path.ExchangeService, path.EventsCategory),
Category: path.EventsCategory,
},
Reason{
ResourceOwner: testUser2,
Service: path.ExchangeService,
Category: path.EventsCategory,
},
Reason{
ResourceOwner: testUser3,
Service: path.ExchangeService,
Category: path.EventsCategory,
},
}, },
}, },
expectedAssistManifestReasons: map[int][]Reason{ expectedAssistManifestReasons: map[int][]Reasoner{
0: { 0: {
{ NewReason("", testUser1, path.ExchangeService, path.EmailCategory),
ResourceOwner: testUser1, NewReason("", testUser2, path.ExchangeService, path.EmailCategory),
Service: path.ExchangeService, NewReason("", testUser3, path.ExchangeService, path.EmailCategory),
Category: path.EmailCategory,
},
{
ResourceOwner: testUser2,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
{
ResourceOwner: testUser3,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
}, },
1: { 1: {
Reason{ NewReason("", testUser1, path.ExchangeService, path.EventsCategory),
ResourceOwner: testUser1, NewReason("", testUser2, path.ExchangeService, path.EventsCategory),
Service: path.ExchangeService, NewReason("", testUser3, path.ExchangeService, path.EventsCategory),
Category: path.EventsCategory,
},
Reason{
ResourceOwner: testUser2,
Service: path.ExchangeService,
Category: path.EventsCategory,
},
Reason{
ResourceOwner: testUser3,
Service: path.ExchangeService,
Category: path.EventsCategory,
},
}, },
}, },
backupData: []backupInfo{ backupData: []backupInfo{
@ -657,10 +564,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
testUser1, testUser1,
), ),
}, },
expectedBaseReasons: map[int][]Reason{ expectedBaseReasons: map[int][]Reasoner{
0: testUser1Mail, 0: testUser1Mail,
}, },
expectedAssistManifestReasons: map[int][]Reason{ expectedAssistManifestReasons: map[int][]Reasoner{
0: testUser1Mail, 0: testUser1Mail,
1: testUser1Mail, 1: testUser1Mail,
}, },
@ -693,10 +600,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
testUser1, testUser1,
), ),
}, },
expectedBaseReasons: map[int][]Reason{ expectedBaseReasons: map[int][]Reasoner{
1: testUser1Mail, 1: testUser1Mail,
}, },
expectedAssistManifestReasons: map[int][]Reason{ expectedAssistManifestReasons: map[int][]Reasoner{
1: testUser1Mail, 1: testUser1Mail,
}, },
backupData: []backupInfo{ backupData: []backupInfo{
@ -728,8 +635,8 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
testUser1, testUser1,
), ),
}, },
expectedBaseReasons: map[int][]Reason{}, expectedBaseReasons: map[int][]Reasoner{},
expectedAssistManifestReasons: map[int][]Reason{ expectedAssistManifestReasons: map[int][]Reasoner{
1: testUser1Mail, 1: testUser1Mail,
}, },
backupData: []backupInfo{ backupData: []backupInfo{
@ -752,10 +659,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
testUser1, testUser1,
), ),
}, },
expectedBaseReasons: map[int][]Reason{ expectedBaseReasons: map[int][]Reasoner{
0: testUser1Mail, 0: testUser1Mail,
}, },
expectedAssistManifestReasons: map[int][]Reason{ expectedAssistManifestReasons: map[int][]Reasoner{
0: testUser1Mail, 0: testUser1Mail,
}, },
backupData: []backupInfo{ backupData: []backupInfo{
@ -787,10 +694,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
testUser1, testUser1,
), ),
}, },
expectedBaseReasons: map[int][]Reason{ expectedBaseReasons: map[int][]Reasoner{
0: testUser1Mail, 0: testUser1Mail,
}, },
expectedAssistManifestReasons: map[int][]Reason{ expectedAssistManifestReasons: map[int][]Reasoner{
0: testUser1Mail, 0: testUser1Mail,
}, },
backupData: []backupInfo{ backupData: []backupInfo{
@ -857,17 +764,17 @@ func (suite *BaseFinderUnitSuite) TestFindBases_CustomTags() {
table := []struct { table := []struct {
name string name string
input []Reason input []Reasoner
tags map[string]string tags map[string]string
// Use this to denote which manifests in data should be expected. Allows // Use this to denote which manifests in data should be expected. Allows
// defining data in a table while not repeating things between data and // defining data in a table while not repeating things between data and
// expected. // expected.
expectedIdxs map[int][]Reason expectedIdxs map[int][]Reasoner
}{ }{
{ {
name: "no tags specified", name: "no tags specified",
tags: nil, tags: nil,
expectedIdxs: map[int][]Reason{ expectedIdxs: map[int][]Reasoner{
0: testUser1Mail, 0: testUser1Mail,
}, },
}, },
@ -877,14 +784,14 @@ func (suite *BaseFinderUnitSuite) TestFindBases_CustomTags() {
"fnords": "", "fnords": "",
"smarf": "", "smarf": "",
}, },
expectedIdxs: map[int][]Reason{ expectedIdxs: map[int][]Reasoner{
0: testUser1Mail, 0: testUser1Mail,
}, },
}, },
{ {
name: "subset of custom tags", name: "subset of custom tags",
tags: map[string]string{"fnords": ""}, tags: map[string]string{"fnords": ""},
expectedIdxs: map[int][]Reason{ expectedIdxs: map[int][]Reasoner{
0: testUser1Mail, 0: testUser1Mail,
}, },
}, },
@ -925,7 +832,7 @@ func checkManifestEntriesMatch(
t *testing.T, t *testing.T,
retSnaps []ManifestEntry, retSnaps []ManifestEntry,
allExpected []manifestInfo, allExpected []manifestInfo,
expectedIdxsAndReasons map[int][]Reason, expectedIdxsAndReasons map[int][]Reasoner,
) { ) {
// Check the proper snapshot manifests were returned. // Check the proper snapshot manifests were returned.
expected := make([]*snapshot.Manifest, 0, len(expectedIdxsAndReasons)) expected := make([]*snapshot.Manifest, 0, len(expectedIdxsAndReasons))
@ -941,7 +848,7 @@ func checkManifestEntriesMatch(
assert.ElementsMatch(t, expected, got) assert.ElementsMatch(t, expected, got)
// Check the reasons for selecting each manifest are correct. // Check the reasons for selecting each manifest are correct.
expectedReasons := make(map[manifest.ID][]Reason, len(expectedIdxsAndReasons)) expectedReasons := make(map[manifest.ID][]Reasoner, len(expectedIdxsAndReasons))
for idx, reasons := range expectedIdxsAndReasons { for idx, reasons := range expectedIdxsAndReasons {
expectedReasons[allExpected[idx].man.ID] = reasons expectedReasons[allExpected[idx].man.ID] = reasons
} }
@ -967,7 +874,7 @@ func checkBackupEntriesMatch(
t *testing.T, t *testing.T,
retBups []BackupEntry, retBups []BackupEntry,
allExpected []backupInfo, allExpected []backupInfo,
expectedIdxsAndReasons map[int][]Reason, expectedIdxsAndReasons map[int][]Reasoner,
) { ) {
// Check the proper snapshot manifests were returned. // Check the proper snapshot manifests were returned.
expected := make([]*backup.Backup, 0, len(expectedIdxsAndReasons)) expected := make([]*backup.Backup, 0, len(expectedIdxsAndReasons))
@ -983,7 +890,7 @@ func checkBackupEntriesMatch(
assert.ElementsMatch(t, expected, got) assert.ElementsMatch(t, expected, got)
// Check the reasons for selecting each manifest are correct. // Check the reasons for selecting each manifest are correct.
expectedReasons := make(map[model.StableID][]Reason, len(expectedIdxsAndReasons)) expectedReasons := make(map[model.StableID][]Reasoner, len(expectedIdxsAndReasons))
for idx, reasons := range expectedIdxsAndReasons { for idx, reasons := range expectedIdxsAndReasons {
expectedReasons[allExpected[idx].b.ID] = reasons expectedReasons[allExpected[idx].b.ID] = reasons
} }

View File

@ -12,12 +12,16 @@ import (
"github.com/kopia/kopia/repo/blob" "github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/compression" "github.com/kopia/kopia/repo/compression"
"github.com/kopia/kopia/repo/content" "github.com/kopia/kopia/repo/content"
"github.com/kopia/kopia/repo/format"
"github.com/kopia/kopia/repo/maintenance"
"github.com/kopia/kopia/repo/manifest" "github.com/kopia/kopia/repo/manifest"
"github.com/kopia/kopia/snapshot" "github.com/kopia/kopia/snapshot"
"github.com/kopia/kopia/snapshot/policy" "github.com/kopia/kopia/snapshot/policy"
"github.com/kopia/kopia/snapshot/snapshotfs" "github.com/kopia/kopia/snapshot/snapshotfs"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/kopia/retention"
"github.com/alcionai/corso/src/pkg/control/repository" "github.com/alcionai/corso/src/pkg/control/repository"
"github.com/alcionai/corso/src/pkg/storage" "github.com/alcionai/corso/src/pkg/storage"
) )
@ -70,7 +74,11 @@ func NewConn(s storage.Storage) *conn {
} }
} }
func (w *conn) Initialize(ctx context.Context, opts repository.Options) error { func (w *conn) Initialize(
ctx context.Context,
opts repository.Options,
retentionOpts repository.Retention,
) error {
bst, err := blobStoreByProvider(ctx, opts, w.storage) bst, err := blobStoreByProvider(ctx, opts, w.storage)
if err != nil { if err != nil {
return clues.Wrap(err, "initializing storage") return clues.Wrap(err, "initializing storage")
@ -82,8 +90,23 @@ func (w *conn) Initialize(ctx context.Context, opts repository.Options) error {
return clues.Stack(err).WithClues(ctx) return clues.Stack(err).WithClues(ctx)
} }
// todo - issue #75: nil here should be a storage.NewRepoOptions() rOpts := retention.NewOpts()
if err = repo.Initialize(ctx, bst, nil, cfg.CorsoPassphrase); err != nil { if err := rOpts.Set(retentionOpts); err != nil {
return clues.Wrap(err, "setting retention configuration").WithClues(ctx)
}
blobCfg, _, err := rOpts.AsConfigs(ctx)
if err != nil {
return clues.Stack(err)
}
// Minimal config for retention if caller requested it.
kopiaOpts := repo.NewRepositoryOptions{
RetentionMode: blobCfg.RetentionMode,
RetentionPeriod: blobCfg.RetentionPeriod,
}
if err = repo.Initialize(ctx, bst, &kopiaOpts, cfg.CorsoPassphrase); err != nil {
if errors.Is(err, repo.ErrAlreadyInitialized) { if errors.Is(err, repo.ErrAlreadyInitialized) {
return clues.Stack(ErrorRepoAlreadyExists, err).WithClues(ctx) return clues.Stack(ErrorRepoAlreadyExists, err).WithClues(ctx)
} }
@ -107,7 +130,10 @@ func (w *conn) Initialize(ctx context.Context, opts repository.Options) error {
return clues.Stack(err).WithClues(ctx) return clues.Stack(err).WithClues(ctx)
} }
return nil // Calling with all parameters here will set extend object locks for
// maintenance. Parameters for actual retention should have been set during
// initialization and won't be updated again.
return clues.Stack(w.setRetentionParameters(ctx, retentionOpts)).OrNil()
} }
func (w *conn) Connect(ctx context.Context, opts repository.Options) error { func (w *conn) Connect(ctx context.Context, opts repository.Options) error {
@ -326,12 +352,12 @@ func updateCompressionOnPolicy(compressor string, p *policy.Policy) (bool, error
return true, nil return true, nil
} }
func updateRetentionOnPolicy(retention policy.RetentionPolicy, p *policy.Policy) bool { func updateRetentionOnPolicy(retPolicy policy.RetentionPolicy, p *policy.Policy) bool {
if retention == p.RetentionPolicy { if retPolicy == p.RetentionPolicy {
return false return false
} }
p.RetentionPolicy = retention p.RetentionPolicy = retPolicy
return true return true
} }
@ -410,6 +436,118 @@ func checkCompressor(compressor compression.Name) error {
return clues.Stack(clues.New("unknown compressor type"), clues.New(string(compressor))) return clues.Stack(clues.New("unknown compressor type"), clues.New(string(compressor)))
} }
func (w *conn) setRetentionParameters(
ctx context.Context,
rrOpts repository.Retention,
) error {
if rrOpts.Mode == nil && rrOpts.Duration == nil && rrOpts.Extend == nil {
return nil
}
// Somewhat confusing case, when we have no retention but a non-zero duration
// it acts like we passed in only the duration and returns an error about
// having to set both. Return a clearer error here instead.
if ptr.Val(rrOpts.Mode) == repository.NoRetention && ptr.Val(rrOpts.Duration) != 0 {
return clues.New("duration must be 0 if rrOpts is disabled").WithClues(ctx)
}
dr, ok := w.Repository.(repo.DirectRepository)
if !ok {
return clues.New("getting handle to repo").WithClues(ctx)
}
blobCfg, params, err := getRetentionConfigs(ctx, dr)
if err != nil {
return clues.Stack(err)
}
opts := retention.OptsFromConfigs(*blobCfg, *params)
if err := opts.Set(rrOpts); err != nil {
return clues.Stack(err).WithClues(ctx)
}
return clues.Stack(persistRetentionConfigs(ctx, dr, opts)).OrNil()
}
func getRetentionConfigs(
ctx context.Context,
dr repo.DirectRepository,
) (*format.BlobStorageConfiguration, *maintenance.Params, error) {
blobCfg, err := dr.FormatManager().BlobCfgBlob()
if err != nil {
return nil, nil, clues.Wrap(err, "getting storage config").WithClues(ctx)
}
params, err := maintenance.GetParams(ctx, dr)
if err != nil {
return nil, nil, clues.Wrap(err, "getting maintenance config").WithClues(ctx)
}
return &blobCfg, params, nil
}
func persistRetentionConfigs(
ctx context.Context,
dr repo.DirectRepository,
opts *retention.Opts,
) error {
// Persist changes.
if !opts.BlobChanged() && !opts.ParamsChanged() {
return nil
}
blobCfg, params, err := opts.AsConfigs(ctx)
if err != nil {
return clues.Stack(err)
}
mp, err := dr.FormatManager().GetMutableParameters()
if err != nil {
return clues.Wrap(err, "getting mutable parameters").WithClues(ctx)
}
requiredFeatures, err := dr.FormatManager().RequiredFeatures()
if err != nil {
return clues.Wrap(err, "getting required features").WithClues(ctx)
}
// Must be the case that only blob changed.
if !opts.ParamsChanged() {
return clues.Wrap(
dr.FormatManager().SetParameters(ctx, mp, blobCfg, requiredFeatures),
"persisting storage config",
).WithClues(ctx).OrNil()
}
// Both blob and maintenance changed. A DirectWriteSession is required to
// update the maintenance config but not the blob config.
err = repo.DirectWriteSession(
ctx,
dr,
repo.WriteSessionOptions{
Purpose: "Corso immutable backups config",
},
func(ctx context.Context, dw repo.DirectRepositoryWriter) error {
// Set the maintenance config first as we can bail out of the write
// session later.
if err := maintenance.SetParams(ctx, dw, &params); err != nil {
return clues.Wrap(err, "maintenance config").
WithClues(ctx)
}
if !opts.BlobChanged() {
return nil
}
return clues.Wrap(
dr.FormatManager().SetParameters(ctx, mp, blobCfg, requiredFeatures),
"storage config",
).WithClues(ctx).OrNil()
})
return clues.Wrap(err, "persisting config changes").WithClues(ctx).OrNil()
}
func (w *conn) LoadSnapshot( func (w *conn) LoadSnapshot(
ctx context.Context, ctx context.Context,
id manifest.ID, id manifest.ID,

View File

@ -7,12 +7,15 @@ import (
"time" "time"
"github.com/alcionai/clues" "github.com/alcionai/clues"
"github.com/kopia/kopia/repo"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/snapshot" "github.com/kopia/kopia/snapshot"
"github.com/kopia/kopia/snapshot/policy" "github.com/kopia/kopia/snapshot/policy"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/control/repository" "github.com/alcionai/corso/src/pkg/control/repository"
"github.com/alcionai/corso/src/pkg/storage" "github.com/alcionai/corso/src/pkg/storage"
@ -26,7 +29,7 @@ func openKopiaRepo(
st := storeTD.NewPrefixedS3Storage(t) st := storeTD.NewPrefixedS3Storage(t)
k := NewConn(st) k := NewConn(st)
if err := k.Initialize(ctx, repository.Options{}); err != nil { if err := k.Initialize(ctx, repository.Options{}, repository.Retention{}); err != nil {
return nil, err return nil, err
} }
@ -82,13 +85,13 @@ func (suite *WrapperIntegrationSuite) TestRepoExistsError() {
st := storeTD.NewPrefixedS3Storage(t) st := storeTD.NewPrefixedS3Storage(t)
k := NewConn(st) k := NewConn(st)
err := k.Initialize(ctx, repository.Options{}) err := k.Initialize(ctx, repository.Options{}, repository.Retention{})
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
err = k.Close(ctx) err = k.Close(ctx)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
err = k.Initialize(ctx, repository.Options{}) err = k.Initialize(ctx, repository.Options{}, repository.Retention{})
assert.Error(t, err, clues.ToCore(err)) assert.Error(t, err, clues.ToCore(err))
assert.ErrorIs(t, err, ErrorRepoAlreadyExists) assert.ErrorIs(t, err, ErrorRepoAlreadyExists)
} }
@ -103,7 +106,7 @@ func (suite *WrapperIntegrationSuite) TestBadProviderErrors() {
st.Provider = storage.ProviderUnknown st.Provider = storage.ProviderUnknown
k := NewConn(st) k := NewConn(st)
err := k.Initialize(ctx, repository.Options{}) err := k.Initialize(ctx, repository.Options{}, repository.Retention{})
assert.Error(t, err, clues.ToCore(err)) assert.Error(t, err, clues.ToCore(err))
} }
@ -413,7 +416,7 @@ func (suite *WrapperIntegrationSuite) TestSetUserAndHost() {
st := storeTD.NewPrefixedS3Storage(t) st := storeTD.NewPrefixedS3Storage(t)
k := NewConn(st) k := NewConn(st)
err := k.Initialize(ctx, opts) err := k.Initialize(ctx, opts, repository.Retention{})
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
kopiaOpts := k.ClientOptions() kopiaOpts := k.ClientOptions()
@ -453,3 +456,72 @@ func (suite *WrapperIntegrationSuite) TestSetUserAndHost() {
err = k.Close(ctx) err = k.Close(ctx)
assert.NoError(t, err, clues.ToCore(err)) assert.NoError(t, err, clues.ToCore(err))
} }
// ---------------
// integration tests that require object locking to be enabled on the bucket.
// ---------------
type ConnRetentionIntegrationSuite struct {
tester.Suite
}
func TestConnRetentionIntegrationSuite(t *testing.T) {
suite.Run(t, &ConnRetentionIntegrationSuite{
Suite: tester.NewRetentionSuite(
t,
[][]string{storeTD.AWSStorageCredEnvs},
),
})
}
// Test that providing retention doesn't change anything but retention values
// from the default values that kopia uses.
func (suite *ConnRetentionIntegrationSuite) TestInitWithAndWithoutRetention() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
st1 := storeTD.NewPrefixedS3Storage(t)
k1 := NewConn(st1)
err := k1.Initialize(ctx, repository.Options{}, repository.Retention{})
require.NoError(t, err, "initializing repo 1: %v", clues.ToCore(err))
st2 := storeTD.NewPrefixedS3Storage(t)
k2 := NewConn(st2)
err = k2.Initialize(
ctx,
repository.Options{},
repository.Retention{
Mode: ptr.To(repository.GovernanceRetention),
Duration: ptr.To(time.Hour * 48),
Extend: ptr.To(true),
})
require.NoError(t, err, "initializing repo 2: %v", clues.ToCore(err))
dr1, ok := k1.Repository.(repo.DirectRepository)
require.True(t, ok, "getting direct repo 1")
dr2, ok := k2.Repository.(repo.DirectRepository)
require.True(t, ok, "getting direct repo 2")
format1 := dr1.FormatManager().ScrubbedContentFormat()
format2 := dr2.FormatManager().ScrubbedContentFormat()
assert.Equal(t, format1, format2)
blobCfg1, err := dr1.FormatManager().BlobCfgBlob()
require.NoError(t, err, "getting blob config 1: %v", clues.ToCore(err))
blobCfg2, err := dr2.FormatManager().BlobCfgBlob()
require.NoError(t, err, "getting retention config 2: %v", clues.ToCore(err))
assert.NotEqual(t, blobCfg1, blobCfg2)
// Check to make sure retention not enabled unexpectedly.
checkRetentionParams(t, ctx, k1, blob.RetentionMode(""), 0, assert.False)
// Some checks to make sure retention was fully initialized as expected.
checkRetentionParams(t, ctx, k2, blob.Governance, time.Hour*48, assert.True)
}

View File

@ -15,7 +15,8 @@ type (
BackupConsumer interface { BackupConsumer interface {
ConsumeBackupCollections( ConsumeBackupCollections(
ctx context.Context, ctx context.Context,
bases []kopia.IncrementalBase, backupReasons []kopia.Reasoner,
bases kopia.BackupBases,
cs []data.BackupCollection, cs []data.BackupCollection,
pmr prefixmatcher.StringSetReader, pmr prefixmatcher.StringSetReader,
tags map[string]string, tags map[string]string,
@ -37,7 +38,7 @@ type (
BaseFinder interface { BaseFinder interface {
FindBases( FindBases(
ctx context.Context, ctx context.Context,
reasons []kopia.Reason, reasons []kopia.Reasoner,
tags map[string]string, tags map[string]string,
) kopia.BackupBases ) kopia.BackupBases
} }

View File

@ -70,7 +70,9 @@ func (mc *mergeCollection) Items(
for _, c := range mc.cols { for _, c := range mc.cols {
// Unfortunately doesn't seem to be a way right now to see if the // Unfortunately doesn't seem to be a way right now to see if the
// iteration failed and we should be exiting early. // iteration failed and we should be exiting early.
ictx := clues.Add(ctx, "merged_collection_storage_path", c.storagePath) ictx := clues.Add(
ctx,
"merged_collection_storage_path", path.LoggableDir(c.storagePath))
logger.Ctx(ictx).Debug("sending items from merged collection") logger.Ctx(ictx).Debug("sending items from merged collection")
for item := range c.Items(ictx, errs) { for item := range c.Items(ictx, errs) {
@ -95,7 +97,9 @@ func (mc *mergeCollection) FetchItemByName(
"merged_collection_count", len(mc.cols)) "merged_collection_count", len(mc.cols))
for _, c := range mc.cols { for _, c := range mc.cols {
ictx := clues.Add(ctx, "merged_collection_storage_path", c.storagePath) ictx := clues.Add(
ctx,
"merged_collection_storage_path", path.LoggableDir(c.storagePath))
logger.Ctx(ictx).Debug("looking for item in merged collection") logger.Ctx(ictx).Debug("looking for item in merged collection")

View File

@ -808,7 +808,7 @@ func openConnAndModelStore(
st := storeTD.NewPrefixedS3Storage(t) st := storeTD.NewPrefixedS3Storage(t)
c := NewConn(st) c := NewConn(st)
err := c.Initialize(ctx, repository.Options{}) err := c.Initialize(ctx, repository.Options{}, repository.Retention{})
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
defer func() { defer func() {

View File

@ -0,0 +1,139 @@
package retention
import (
"context"
"time"
"github.com/alcionai/clues"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/format"
"github.com/kopia/kopia/repo/maintenance"
"github.com/alcionai/corso/src/pkg/control/repository"
)
type Opts struct {
blobCfg format.BlobStorageConfiguration
params maintenance.Params
blobChanged bool
paramsChanged bool
}
func NewOpts() *Opts {
return &Opts{}
}
func OptsFromConfigs(
blobCfg format.BlobStorageConfiguration,
params maintenance.Params,
) *Opts {
return &Opts{
blobCfg: blobCfg,
params: params,
}
}
func (r *Opts) AsConfigs(
ctx context.Context,
) (format.BlobStorageConfiguration, maintenance.Params, error) {
// Check the new config is valid.
if r.blobCfg.IsRetentionEnabled() {
if err := maintenance.CheckExtendRetention(ctx, r.blobCfg, &r.params); err != nil {
return format.BlobStorageConfiguration{}, maintenance.Params{}, clues.Wrap(
err,
"invalid retention config",
).WithClues(ctx)
}
}
return r.blobCfg, r.params, nil
}
func (r *Opts) BlobChanged() bool {
return r.blobChanged
}
func (r *Opts) ParamsChanged() bool {
return r.paramsChanged
}
func (r *Opts) Set(opts repository.Retention) error {
r.setMaintenanceParams(opts.Extend)
return clues.Wrap(
r.setBlobConfigParams(opts.Mode, opts.Duration),
"setting mode or duration",
).OrNil()
}
func (r *Opts) setMaintenanceParams(extend *bool) {
if extend != nil && r.params.ExtendObjectLocks != *extend {
r.params.ExtendObjectLocks = *extend
r.paramsChanged = true
}
}
func (r *Opts) setBlobConfigParams(
mode *repository.RetentionMode,
duration *time.Duration,
) error {
err := r.setBlobConfigMode(mode)
if err != nil {
return clues.Stack(err)
}
r.setBlobConfigDuration(duration)
return nil
}
func (r *Opts) setBlobConfigDuration(duration *time.Duration) {
if duration != nil && r.blobCfg.RetentionPeriod != *duration {
r.blobCfg.RetentionPeriod = *duration
r.blobChanged = true
}
}
func (r *Opts) setBlobConfigMode(
mode *repository.RetentionMode,
) error {
if mode == nil {
return nil
}
startMode := r.blobCfg.RetentionMode
switch *mode {
case repository.NoRetention:
if !r.blobCfg.IsRetentionEnabled() {
return nil
}
r.blobCfg.RetentionMode = ""
r.blobCfg.RetentionPeriod = 0
case repository.GovernanceRetention:
r.blobCfg.RetentionMode = blob.Governance
case repository.ComplianceRetention:
r.blobCfg.RetentionMode = blob.Compliance
default:
return clues.New("unknown retention mode").
With("provided_retention_mode", mode.String())
}
// Only check if the retention mode is not empty. IsValid errors out if it's
// empty.
if len(r.blobCfg.RetentionMode) > 0 && !r.blobCfg.RetentionMode.IsValid() {
return clues.New("invalid retention mode").
With("retention_mode", r.blobCfg.RetentionMode)
}
// Take into account previous operations on r that could have already updated
// blobChanged.
r.blobChanged = r.blobChanged || startMode != r.blobCfg.RetentionMode
return nil
}

View File

@ -0,0 +1,204 @@
package retention_test
import (
"testing"
"time"
"github.com/alcionai/clues"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/format"
"github.com/kopia/kopia/repo/maintenance"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/kopia/retention"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/control/repository"
)
type OptsUnitSuite struct {
tester.Suite
}
func TestOptsUnitSuite(t *testing.T) {
suite.Run(t, &OptsUnitSuite{Suite: tester.NewUnitSuite(t)})
}
func (suite *OptsUnitSuite) TestOptsFromConfigs() {
var (
t = suite.T()
mode = blob.Governance
duration = time.Hour * 48
extend = true
blobCfgInput = format.BlobStorageConfiguration{
RetentionMode: mode,
RetentionPeriod: duration,
}
paramsInput = maintenance.Params{ExtendObjectLocks: extend}
)
ctx, flush := tester.NewContext(t)
defer flush()
opts := retention.OptsFromConfigs(blobCfgInput, paramsInput)
assert.False(t, opts.BlobChanged(), "BlobChanged")
assert.False(t, opts.ParamsChanged(), "ParamsChanged")
blobCfg, params, err := opts.AsConfigs(ctx)
require.NoError(t, err, "AsConfigs: %v", clues.ToCore(err))
assert.Equal(t, blobCfgInput, blobCfg)
assert.Equal(t, paramsInput, params)
}
func (suite *OptsUnitSuite) TestSet() {
var (
kopiaMode = blob.Governance
mode = repository.GovernanceRetention
duration = time.Hour * 48
)
table := []struct {
name string
inputBlob format.BlobStorageConfiguration
inputParams maintenance.Params
ctrlOpts repository.Retention
setErr require.ErrorAssertionFunc
expectMode blob.RetentionMode
expectDuration time.Duration
expectExtend bool
expectBlobChanged bool
expectParamsChanged bool
}{
{
name: "All Nils",
setErr: require.NoError,
},
{
name: "All Off",
ctrlOpts: repository.Retention{
Mode: ptr.To(repository.NoRetention),
Duration: ptr.To(time.Duration(0)),
Extend: ptr.To(false),
},
setErr: require.NoError,
},
{
name: "UnknownRetention",
ctrlOpts: repository.Retention{
Mode: ptr.To(repository.UnknownRetention),
Duration: ptr.To(duration),
},
setErr: require.Error,
},
{
name: "Invalid Retention Mode",
ctrlOpts: repository.Retention{
Mode: ptr.To(repository.RetentionMode(-1)),
Duration: ptr.To(duration),
},
setErr: require.Error,
},
{
name: "Valid Set All",
ctrlOpts: repository.Retention{
Mode: ptr.To(mode),
Duration: ptr.To(duration),
Extend: ptr.To(true),
},
setErr: require.NoError,
expectMode: kopiaMode,
expectDuration: duration,
expectExtend: true,
expectBlobChanged: true,
expectParamsChanged: true,
},
{
name: "Valid Set BlobConfig",
ctrlOpts: repository.Retention{
Mode: ptr.To(mode),
Duration: ptr.To(duration),
},
setErr: require.NoError,
expectMode: kopiaMode,
expectDuration: duration,
expectBlobChanged: true,
},
{
name: "Valid Set Params",
ctrlOpts: repository.Retention{
Extend: ptr.To(true),
},
setErr: require.NoError,
expectExtend: true,
expectParamsChanged: true,
},
{
name: "Partial BlobConfig Change",
inputBlob: format.BlobStorageConfiguration{
RetentionMode: kopiaMode,
RetentionPeriod: duration,
},
ctrlOpts: repository.Retention{
Duration: ptr.To(duration + time.Hour),
},
setErr: require.NoError,
expectMode: kopiaMode,
expectDuration: duration + time.Hour,
expectBlobChanged: true,
},
{
name: "No BlobConfig Change",
inputBlob: format.BlobStorageConfiguration{
RetentionMode: kopiaMode,
RetentionPeriod: duration,
},
ctrlOpts: repository.Retention{
Mode: ptr.To(mode),
Duration: ptr.To(duration),
},
setErr: require.NoError,
expectMode: kopiaMode,
expectDuration: duration,
},
{
name: "No Params Change",
inputParams: maintenance.Params{ExtendObjectLocks: true},
ctrlOpts: repository.Retention{
Extend: ptr.To(true),
},
setErr: require.NoError,
expectExtend: true,
},
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
opts := retention.OptsFromConfigs(test.inputBlob, test.inputParams)
err := opts.Set(test.ctrlOpts)
test.setErr(t, err, "setting params: %v", clues.ToCore(err))
if err != nil {
return
}
blobCfg, params, err := opts.AsConfigs(ctx)
require.NoError(t, err, "getting configs: %v", clues.ToCore(err))
assert.Equal(t, test.expectMode, blobCfg.RetentionMode, "mode")
assert.Equal(t, test.expectDuration, blobCfg.RetentionPeriod, "duration")
assert.Equal(t, test.expectExtend, params.ExtendObjectLocks, "extend locks")
assert.Equal(t, test.expectBlobChanged, opts.BlobChanged(), "blob changed")
assert.Equal(t, test.expectParamsChanged, opts.ParamsChanged(), "params changed")
})
}
}

View File

@ -20,6 +20,7 @@ import (
"github.com/kopia/kopia/fs/virtualfs" "github.com/kopia/kopia/fs/virtualfs"
"github.com/kopia/kopia/repo/manifest" "github.com/kopia/kopia/repo/manifest"
"github.com/kopia/kopia/snapshot/snapshotfs" "github.com/kopia/kopia/snapshot/snapshotfs"
"golang.org/x/exp/maps"
"github.com/alcionai/corso/src/internal/common/prefixmatcher" "github.com/alcionai/corso/src/internal/common/prefixmatcher"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
@ -27,6 +28,7 @@ import (
"github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/m365/graph"
"github.com/alcionai/corso/src/internal/m365/graph/metadata" "github.com/alcionai/corso/src/internal/m365/graph/metadata"
"github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/count"
"github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/logger"
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
@ -250,7 +252,9 @@ func (cp *corsoProgress) FinishedHashingFile(fname string, bs int64) {
sl[i] = string(rdt) sl[i] = string(rdt)
} }
logger.Ctx(context.Background()).Debugw("finished hashing file", "path", sl[2:]) logger.Ctx(cp.ctx).Debugw(
"finished hashing file",
"path", clues.Hide(path.Elements(sl[2:])))
atomic.AddInt64(&cp.totalBytes, bs) atomic.AddInt64(&cp.totalBytes, bs)
} }
@ -440,12 +444,12 @@ func streamBaseEntries(
ctx = clues.Add( ctx = clues.Add(
ctx, ctx,
"current_item_path", curPath, "current_directory_path", curPath,
"longest_prefix", longest) "longest_prefix", path.LoggableDir(longest))
err := dir.IterateEntries(ctx, func(innerCtx context.Context, entry fs.Entry) error { err := dir.IterateEntries(ctx, func(innerCtx context.Context, entry fs.Entry) error {
if err := innerCtx.Err(); err != nil { if err := innerCtx.Err(); err != nil {
return err return clues.Stack(err).WithClues(ctx)
} }
// Don't walk subdirectories in this function. // Don't walk subdirectories in this function.
@ -462,7 +466,9 @@ func streamBaseEntries(
entName, err := decodeElement(entry.Name()) entName, err := decodeElement(entry.Name())
if err != nil { if err != nil {
return clues.Wrap(err, "decoding entry name: "+entry.Name()) return clues.Wrap(err, "decoding entry name").
WithClues(ctx).
With("entry_name", entry.Name())
} }
// This entry was marked as deleted by a service that can't tell us the // This entry was marked as deleted by a service that can't tell us the
@ -474,7 +480,7 @@ func streamBaseEntries(
// For now assuming that item IDs don't need escaping. // For now assuming that item IDs don't need escaping.
itemPath, err := curPath.AppendItem(entName) itemPath, err := curPath.AppendItem(entName)
if err != nil { if err != nil {
return clues.Wrap(err, "getting full item path for base entry") return clues.Wrap(err, "getting full item path for base entry").WithClues(ctx)
} }
// We need the previous path so we can find this item in the base snapshot's // We need the previous path so we can find this item in the base snapshot's
@ -483,7 +489,7 @@ func streamBaseEntries(
// to look for. // to look for.
prevItemPath, err := prevPath.AppendItem(entName) prevItemPath, err := prevPath.AppendItem(entName)
if err != nil { if err != nil {
return clues.Wrap(err, "getting previous full item path for base entry") return clues.Wrap(err, "getting previous full item path for base entry").WithClues(ctx)
} }
// Meta files aren't in backup details since it's the set of items the user // Meta files aren't in backup details since it's the set of items the user
@ -507,13 +513,15 @@ func streamBaseEntries(
} }
if err := ctr(ctx, entry); err != nil { if err := ctr(ctx, entry); err != nil {
return clues.Wrap(err, "executing callback on item").With("item_path", itemPath) return clues.Wrap(err, "executing callback on item").
WithClues(ctx).
With("item_path", itemPath)
} }
return nil return nil
}) })
if err != nil { if err != nil {
return clues.Wrap(err, "traversing items in base snapshot directory") return clues.Wrap(err, "traversing items in base snapshot directory").WithClues(ctx)
} }
return nil return nil
@ -824,7 +832,9 @@ func inflateCollectionTree(
} }
if node.collection != nil && node.collection.State() == data.NotMovedState { if node.collection != nil && node.collection.State() == data.NotMovedState {
return nil, nil, clues.New("conflicting states for collection").With("changed_path", p) return nil, nil, clues.New("conflicting states for collection").
WithClues(ctx).
With("changed_path", p)
} }
} }
@ -851,13 +861,14 @@ func traverseBaseDir(
expectedDirPath *path.Builder, expectedDirPath *path.Builder,
dir fs.Directory, dir fs.Directory,
roots map[string]*treeMap, roots map[string]*treeMap,
stats *count.Bus,
) error { ) error {
ctx = clues.Add(ctx, ctx = clues.Add(ctx,
"old_dir_path", oldDirPath, "old_dir_path", oldDirPath,
"expected_dir_path", expectedDirPath) "expected_dir_path", expectedDirPath)
if depth >= maxInflateTraversalDepth { if depth >= maxInflateTraversalDepth {
return clues.New("base snapshot tree too tall") return clues.New("base snapshot tree too tall").WithClues(ctx)
} }
// Wrapper base64 encodes all file and folder names to avoid issues with // Wrapper base64 encodes all file and folder names to avoid issues with
@ -865,7 +876,9 @@ func traverseBaseDir(
// from kopia we need to do the decoding here. // from kopia we need to do the decoding here.
dirName, err := decodeElement(dir.Name()) dirName, err := decodeElement(dir.Name())
if err != nil { if err != nil {
return clues.Wrap(err, "decoding base directory name").With("dir_name", dir.Name()) return clues.Wrap(err, "decoding base directory name").
WithClues(ctx).
With("dir_name", clues.Hide(dir.Name()))
} }
// Form the path this directory would be at if the hierarchy remained the same // Form the path this directory would be at if the hierarchy remained the same
@ -884,14 +897,29 @@ func traverseBaseDir(
currentPath = currentPath.Append(dirName) currentPath = currentPath.Append(dirName)
} }
var explicitMention bool
if upb, ok := updatedPaths[oldDirPath.String()]; ok { if upb, ok := updatedPaths[oldDirPath.String()]; ok {
// This directory was deleted. // This directory was deleted.
if upb == nil { if upb == nil {
currentPath = nil currentPath = nil
stats.Inc(statDel)
} else { } else {
// This directory was moved/renamed and the new location is in upb. // This directory was explicitly mentioned and the new (possibly
// unchanged) location is in upb.
currentPath = upb.ToBuilder() currentPath = upb.ToBuilder()
// Below we check if the collection was marked as new or DoNotMerge which
// disables merging behavior. That means we can't directly update stats
// here else we'll miss delta token refreshes and whatnot. Instead note
// that we did see the path explicitly so it's not counted as a recursive
// operation.
explicitMention = true
} }
} else if currentPath == nil {
// Just stats tracking stuff.
stats.Inc(statRecursiveDel)
} }
ctx = clues.Add(ctx, "new_path", currentPath) ctx = clues.Add(ctx, "new_path", currentPath)
@ -919,10 +947,11 @@ func traverseBaseDir(
oldDirPath, oldDirPath,
currentPath, currentPath,
dEntry, dEntry,
roots) roots,
stats)
}) })
if err != nil { if err != nil {
return clues.Wrap(err, "traversing base directory") return clues.Wrap(err, "traversing base directory").WithClues(ctx)
} }
// We only need to add this base directory to the tree we're building if it // We only need to add this base directory to the tree we're building if it
@ -939,7 +968,7 @@ func traverseBaseDir(
// in the if-block though as that is an optimization. // in the if-block though as that is an optimization.
node := getTreeNode(roots, currentPath.Elements()) node := getTreeNode(roots, currentPath.Elements())
if node == nil { if node == nil {
return clues.New("getting tree node") return clues.New("getting tree node").WithClues(ctx)
} }
// Now that we have the node we need to check if there is a collection // Now that we have the node we need to check if there is a collection
@ -949,17 +978,28 @@ func traverseBaseDir(
// directories. The expected usecase for this is delta token expiry in M365. // directories. The expected usecase for this is delta token expiry in M365.
if node.collection != nil && if node.collection != nil &&
(node.collection.DoNotMergeItems() || node.collection.State() == data.NewState) { (node.collection.DoNotMergeItems() || node.collection.State() == data.NewState) {
stats.Inc(statSkipMerge)
return nil return nil
} }
// Just stats tracking stuff.
if oldDirPath.String() == currentPath.String() {
stats.Inc(statNoMove)
} else if explicitMention {
stats.Inc(statMove)
} else {
stats.Inc(statRecursiveMove)
}
curP, err := path.FromDataLayerPath(currentPath.String(), false) curP, err := path.FromDataLayerPath(currentPath.String(), false)
if err != nil { if err != nil {
return clues.New("converting current path to path.Path") return clues.New("converting current path to path.Path").WithClues(ctx)
} }
oldP, err := path.FromDataLayerPath(oldDirPath.String(), false) oldP, err := path.FromDataLayerPath(oldDirPath.String(), false)
if err != nil { if err != nil {
return clues.New("converting old path to path.Path") return clues.New("converting old path to path.Path").WithClues(ctx)
} }
node.baseDir = dir node.baseDir = dir
@ -970,10 +1010,50 @@ func traverseBaseDir(
return nil return nil
} }
func logBaseInfo(ctx context.Context, m ManifestEntry) {
svcs := map[string]struct{}{}
cats := map[string]struct{}{}
for _, r := range m.Reasons {
svcs[r.Service().String()] = struct{}{}
cats[r.Category().String()] = struct{}{}
}
mbID, _ := m.GetTag(TagBackupID)
if len(mbID) == 0 {
mbID = "no_backup_id_tag"
}
logger.Ctx(ctx).Infow(
"using base for backup",
"base_snapshot_id", m.ID,
"services", maps.Keys(svcs),
"categories", maps.Keys(cats),
"base_backup_id", mbID)
}
const (
// statNoMove denotes an directory that wasn't moved at all.
statNoMove = "directories_not_moved"
// statMove denotes an directory that was explicitly moved.
statMove = "directories_explicitly_moved"
// statRecursiveMove denotes an directory that moved because one or more or
// its ancestors moved and it wasn't explicitly mentioned.
statRecursiveMove = "directories_recursively_moved"
// statDel denotes a directory that was explicitly deleted.
statDel = "directories_explicitly_deleted"
// statRecursiveDel denotes a directory that was deleted because one or more
// of its ancestors was deleted and it wasn't explicitly mentioned.
statRecursiveDel = "directories_recursively_deleted"
// statSkipMerge denotes the number of directories that weren't merged because
// they were marked either DoNotMerge or New.
statSkipMerge = "directories_skipped_merging"
)
func inflateBaseTree( func inflateBaseTree(
ctx context.Context, ctx context.Context,
loader snapshotLoader, loader snapshotLoader,
snap IncrementalBase, snap ManifestEntry,
updatedPaths map[string]path.Path, updatedPaths map[string]path.Path,
roots map[string]*treeMap, roots map[string]*treeMap,
) error { ) error {
@ -996,13 +1076,25 @@ func inflateBaseTree(
return clues.New("snapshot root is not a directory").WithClues(ctx) return clues.New("snapshot root is not a directory").WithClues(ctx)
} }
// Some logging to help track things.
logBaseInfo(ctx, snap)
// For each subtree corresponding to the tuple // For each subtree corresponding to the tuple
// (resource owner, service, category) merge the directories in the base with // (resource owner, service, category) merge the directories in the base with
// what has been reported in the collections we got. // what has been reported in the collections we got.
for _, subtreePath := range snap.SubtreePaths { for _, r := range snap.Reasons {
ictx := clues.Add(
ctx,
"subtree_service", r.Service().String(),
"subtree_category", r.Category().String())
subtreePath, err := r.SubtreePath()
if err != nil {
return clues.Wrap(err, "building subtree path").WithClues(ictx)
}
// We're starting from the root directory so don't need it in the path. // We're starting from the root directory so don't need it in the path.
pathElems := encodeElements(subtreePath.PopFront().Elements()...) pathElems := encodeElements(subtreePath.PopFront().Elements()...)
ictx := clues.Add(ctx, "subtree_path", subtreePath)
ent, err := snapshotfs.GetNestedEntry(ictx, dir, pathElems) ent, err := snapshotfs.GetNestedEntry(ictx, dir, pathElems)
if err != nil { if err != nil {
@ -1022,22 +1114,35 @@ func inflateBaseTree(
// This ensures that a migration on the directory prefix can complete. // This ensures that a migration on the directory prefix can complete.
// The prefix is the tenant/service/owner/category set, which remains // The prefix is the tenant/service/owner/category set, which remains
// otherwise unchecked in tree inflation below this point. // otherwise unchecked in tree inflation below this point.
newSubtreePath := subtreePath newSubtreePath := subtreePath.ToBuilder()
if p, ok := updatedPaths[subtreePath.String()]; ok { if p, ok := updatedPaths[subtreePath.String()]; ok {
newSubtreePath = p.ToBuilder() newSubtreePath = p.ToBuilder()
} }
stats := count.New()
if err = traverseBaseDir( if err = traverseBaseDir(
ictx, ictx,
0, 0,
updatedPaths, updatedPaths,
subtreePath.Dir(), subtreePath.ToBuilder().Dir(),
newSubtreePath.Dir(), newSubtreePath.Dir(),
subtreeDir, subtreeDir,
roots, roots,
stats,
); err != nil { ); err != nil {
return clues.Wrap(err, "traversing base snapshot").WithClues(ictx) return clues.Wrap(err, "traversing base snapshot").WithClues(ictx)
} }
logger.Ctx(ctx).Infow(
"merge subtree stats",
statNoMove, stats.Get(statNoMove),
statMove, stats.Get(statMove),
statRecursiveMove, stats.Get(statRecursiveMove),
statDel, stats.Get(statDel),
statRecursiveDel, stats.Get(statRecursiveDel),
statSkipMerge, stats.Get(statSkipMerge))
} }
return nil return nil
@ -1059,7 +1164,7 @@ func inflateBaseTree(
func inflateDirTree( func inflateDirTree(
ctx context.Context, ctx context.Context,
loader snapshotLoader, loader snapshotLoader,
baseSnaps []IncrementalBase, baseSnaps []ManifestEntry,
collections []data.BackupCollection, collections []data.BackupCollection,
globalExcludeSet prefixmatcher.StringSetReader, globalExcludeSet prefixmatcher.StringSetReader,
progress *corsoProgress, progress *corsoProgress,
@ -1089,7 +1194,7 @@ func inflateDirTree(
} }
if len(roots) > 1 { if len(roots) > 1 {
return nil, clues.New("multiple root directories") return nil, clues.New("multiple root directories").WithClues(ctx)
} }
var res fs.Directory var res fs.Directory

View File

@ -946,21 +946,22 @@ func (msw *mockSnapshotWalker) SnapshotRoot(*snapshot.Manifest) (fs.Entry, error
return msw.snapshotRoot, nil return msw.snapshotRoot, nil
} }
func mockIncrementalBase( func makeManifestEntry(
id, tenant, resourceOwner string, id, tenant, resourceOwner string,
service path.ServiceType, service path.ServiceType,
categories ...path.CategoryType, categories ...path.CategoryType,
) IncrementalBase { ) ManifestEntry {
stps := []*path.Builder{} var reasons []Reasoner
for _, c := range categories { for _, c := range categories {
stps = append(stps, path.Builder{}.Append(tenant, service.String(), resourceOwner, c.String())) reasons = append(reasons, NewReason(tenant, resourceOwner, service, c))
} }
return IncrementalBase{ return ManifestEntry{
Manifest: &snapshot.Manifest{ Manifest: &snapshot.Manifest{
ID: manifest.ID(id), ID: manifest.ID(id),
}, },
SubtreePaths: stps, Reasons: reasons,
} }
} }
@ -1331,8 +1332,8 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() {
dirTree, err := inflateDirTree( dirTree, err := inflateDirTree(
ctx, ctx,
msw, msw,
[]IncrementalBase{ []ManifestEntry{
mockIncrementalBase("", testTenant, testUser, path.ExchangeService, path.EmailCategory), makeManifestEntry("", testTenant, testUser, path.ExchangeService, path.EmailCategory),
}, },
test.inputCollections(), test.inputCollections(),
pmMock.NewPrefixMap(nil), pmMock.NewPrefixMap(nil),
@ -2260,8 +2261,8 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
dirTree, err := inflateDirTree( dirTree, err := inflateDirTree(
ctx, ctx,
msw, msw,
[]IncrementalBase{ []ManifestEntry{
mockIncrementalBase("", testTenant, testUser, path.ExchangeService, path.EmailCategory), makeManifestEntry("", testTenant, testUser, path.ExchangeService, path.EmailCategory),
}, },
test.inputCollections(t), test.inputCollections(t),
ie, ie,
@ -2425,8 +2426,8 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSkipsDeletedSubtre
dirTree, err := inflateDirTree( dirTree, err := inflateDirTree(
ctx, ctx,
msw, msw,
[]IncrementalBase{ []ManifestEntry{
mockIncrementalBase("", testTenant, testUser, path.ExchangeService, path.EmailCategory), makeManifestEntry("", testTenant, testUser, path.ExchangeService, path.EmailCategory),
}, },
collections, collections,
pmMock.NewPrefixMap(nil), pmMock.NewPrefixMap(nil),
@ -2531,8 +2532,8 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_HandleEmptyBase()
dirTree, err := inflateDirTree( dirTree, err := inflateDirTree(
ctx, ctx,
msw, msw,
[]IncrementalBase{ []ManifestEntry{
mockIncrementalBase("", testTenant, testUser, path.ExchangeService, path.EmailCategory), makeManifestEntry("", testTenant, testUser, path.ExchangeService, path.EmailCategory),
}, },
collections, collections,
pmMock.NewPrefixMap(nil), pmMock.NewPrefixMap(nil),
@ -2782,9 +2783,9 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsCorrectSubt
dirTree, err := inflateDirTree( dirTree, err := inflateDirTree(
ctx, ctx,
msw, msw,
[]IncrementalBase{ []ManifestEntry{
mockIncrementalBase("id1", testTenant, testUser, path.ExchangeService, path.ContactsCategory), makeManifestEntry("id1", testTenant, testUser, path.ExchangeService, path.ContactsCategory),
mockIncrementalBase("id2", testTenant, testUser, path.ExchangeService, path.EmailCategory), makeManifestEntry("id2", testTenant, testUser, path.ExchangeService, path.EmailCategory),
}, },
collections, collections,
pmMock.NewPrefixMap(nil), pmMock.NewPrefixMap(nil),
@ -2948,8 +2949,8 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsMigrateSubt
dirTree, err := inflateDirTree( dirTree, err := inflateDirTree(
ctx, ctx,
msw, msw,
[]IncrementalBase{ []ManifestEntry{
mockIncrementalBase("id1", testTenant, testUser, path.ExchangeService, path.EmailCategory, path.ContactsCategory), makeManifestEntry("id1", testTenant, testUser, path.ExchangeService, path.EmailCategory, path.ContactsCategory),
}, },
[]data.BackupCollection{mce, mcc}, []data.BackupCollection{mce, mcc},
pmMock.NewPrefixMap(nil), pmMock.NewPrefixMap(nil),

View File

@ -4,22 +4,19 @@ import (
"context" "context"
"errors" "errors"
"strings" "strings"
"time"
"github.com/alcionai/clues" "github.com/alcionai/clues"
"github.com/kopia/kopia/fs" "github.com/kopia/kopia/fs"
"github.com/kopia/kopia/repo" "github.com/kopia/kopia/repo"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/format"
"github.com/kopia/kopia/repo/maintenance" "github.com/kopia/kopia/repo/maintenance"
"github.com/kopia/kopia/repo/manifest" "github.com/kopia/kopia/repo/manifest"
"github.com/kopia/kopia/snapshot" "github.com/kopia/kopia/snapshot"
"github.com/kopia/kopia/snapshot/policy" "github.com/kopia/kopia/snapshot/policy"
"github.com/kopia/kopia/snapshot/snapshotfs" "github.com/kopia/kopia/snapshot/snapshotfs"
"github.com/kopia/kopia/snapshot/snapshotmaintenance" "github.com/kopia/kopia/snapshot/snapshotmaintenance"
"golang.org/x/exp/maps"
"github.com/alcionai/corso/src/internal/common/prefixmatcher" "github.com/alcionai/corso/src/internal/common/prefixmatcher"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/diagnostics" "github.com/alcionai/corso/src/internal/diagnostics"
"github.com/alcionai/corso/src/internal/observe" "github.com/alcionai/corso/src/internal/observe"
@ -132,11 +129,6 @@ func (w *Wrapper) Close(ctx context.Context) error {
return nil return nil
} }
type IncrementalBase struct {
*snapshot.Manifest
SubtreePaths []*path.Builder
}
// ConsumeBackupCollections takes a set of collections and creates a kopia snapshot // ConsumeBackupCollections takes a set of collections and creates a kopia snapshot
// with the data that they contain. previousSnapshots is used for incremental // with the data that they contain. previousSnapshots is used for incremental
// backups and should represent the base snapshot from which metadata is sourced // backups and should represent the base snapshot from which metadata is sourced
@ -145,10 +137,11 @@ type IncrementalBase struct {
// complete backup of all data. // complete backup of all data.
func (w Wrapper) ConsumeBackupCollections( func (w Wrapper) ConsumeBackupCollections(
ctx context.Context, ctx context.Context,
previousSnapshots []IncrementalBase, backupReasons []Reasoner,
bases BackupBases,
collections []data.BackupCollection, collections []data.BackupCollection,
globalExcludeSet prefixmatcher.StringSetReader, globalExcludeSet prefixmatcher.StringSetReader,
tags map[string]string, additionalTags map[string]string,
buildTreeWithBase bool, buildTreeWithBase bool,
errs *fault.Bus, errs *fault.Bus,
) (*BackupStats, *details.Builder, DetailsMergeInfoer, error) { ) (*BackupStats, *details.Builder, DetailsMergeInfoer, error) {
@ -174,15 +167,23 @@ func (w Wrapper) ConsumeBackupCollections(
// When running an incremental backup, we need to pass the prior // When running an incremental backup, we need to pass the prior
// snapshot bases into inflateDirTree so that the new snapshot // snapshot bases into inflateDirTree so that the new snapshot
// includes historical data. // includes historical data.
var base []IncrementalBase var (
if buildTreeWithBase { mergeBase []ManifestEntry
base = previousSnapshots assistBase []ManifestEntry
)
if bases != nil {
if buildTreeWithBase {
mergeBase = bases.MergeBases()
}
assistBase = bases.AssistBases()
} }
dirTree, err := inflateDirTree( dirTree, err := inflateDirTree(
ctx, ctx,
w.c, w.c,
base, mergeBase,
collections, collections,
globalExcludeSet, globalExcludeSet,
progress) progress)
@ -190,9 +191,22 @@ func (w Wrapper) ConsumeBackupCollections(
return nil, nil, nil, clues.Wrap(err, "building kopia directories") return nil, nil, nil, clues.Wrap(err, "building kopia directories")
} }
// Add some extra tags so we can look things up by reason.
tags := maps.Clone(additionalTags)
if tags == nil {
// Some platforms seem to return nil if the input is nil.
tags = map[string]string{}
}
for _, r := range backupReasons {
for _, k := range tagKeys(r) {
tags[k] = ""
}
}
s, err := w.makeSnapshotWithRoot( s, err := w.makeSnapshotWithRoot(
ctx, ctx,
previousSnapshots, assistBase,
dirTree, dirTree,
tags, tags,
progress) progress)
@ -205,7 +219,7 @@ func (w Wrapper) ConsumeBackupCollections(
func (w Wrapper) makeSnapshotWithRoot( func (w Wrapper) makeSnapshotWithRoot(
ctx context.Context, ctx context.Context,
prevSnapEntries []IncrementalBase, prevSnapEntries []ManifestEntry,
root fs.Directory, root fs.Directory,
addlTags map[string]string, addlTags map[string]string,
progress *corsoProgress, progress *corsoProgress,
@ -225,8 +239,8 @@ func (w Wrapper) makeSnapshotWithRoot(
ctx = clues.Add( ctx = clues.Add(
ctx, ctx,
"len_prev_base_snapshots", len(prevSnapEntries), "num_assist_snapshots", len(prevSnapEntries),
"assist_snap_ids", snapIDs, "assist_snapshot_ids", snapIDs,
"additional_tags", addlTags) "additional_tags", addlTags)
if len(snapIDs) > 0 { if len(snapIDs) > 0 {
@ -310,7 +324,7 @@ func (w Wrapper) makeSnapshotWithRoot(
// Telling kopia to always flush may hide other errors if it fails while // Telling kopia to always flush may hide other errors if it fails while
// flushing the write session (hence logging above). // flushing the write session (hence logging above).
if err != nil { if err != nil {
return nil, clues.Wrap(err, "kopia backup") return nil, clues.Wrap(err, "kopia backup").WithClues(ctx)
} }
res := manifestToStats(man, progress, bc) res := manifestToStats(man, progress, bc)
@ -355,7 +369,7 @@ func getDir(
encodeElements(dirPath.PopFront().Elements()...)) encodeElements(dirPath.PopFront().Elements()...))
if err != nil { if err != nil {
if isErrEntryNotFound(err) { if isErrEntryNotFound(err) {
err = clues.Stack(data.ErrNotFound, err) err = clues.Stack(data.ErrNotFound, err).WithClues(ctx)
} }
return nil, clues.Wrap(err, "getting nested object handle").WithClues(ctx) return nil, clues.Wrap(err, "getting nested object handle").WithClues(ctx)
@ -473,7 +487,7 @@ func (w Wrapper) ProduceRestoreCollections(
// load it here. // load it here.
snapshotRoot, err := w.getSnapshotRoot(ctx, snapshotID) snapshotRoot, err := w.getSnapshotRoot(ctx, snapshotID)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "loading snapshot root") return nil, clues.Wrap(err, "loading snapshot root").WithClues(ctx)
} }
var ( var (
@ -493,8 +507,8 @@ func (w Wrapper) ProduceRestoreCollections(
// items from a single directory instance lower down. // items from a single directory instance lower down.
ictx := clues.Add( ictx := clues.Add(
ctx, ctx,
"item_path", itemPaths.StoragePath.String(), "item_path", itemPaths.StoragePath,
"restore_path", itemPaths.RestorePath.String()) "restore_path", itemPaths.RestorePath)
parentStoragePath, err := itemPaths.StoragePath.Dir() parentStoragePath, err := itemPaths.StoragePath.Dir()
if err != nil { if err != nil {
@ -538,7 +552,7 @@ func (w Wrapper) ProduceRestoreCollections(
// then load the items from the directory. // then load the items from the directory.
res, err := loadDirsAndItems(ctx, snapshotRoot, bcounter, dirsToItems, errs) res, err := loadDirsAndItems(ctx, snapshotRoot, bcounter, dirsToItems, errs)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "loading items") return nil, clues.Wrap(err, "loading items").WithClues(ctx)
} }
return res, el.Failure() return res, el.Failure()
@ -596,12 +610,12 @@ func (w Wrapper) RepoMaintenance(
) error { ) error {
kopiaSafety, err := translateSafety(opts.Safety) kopiaSafety, err := translateSafety(opts.Safety)
if err != nil { if err != nil {
return clues.Wrap(err, "identifying safety level") return clues.Wrap(err, "identifying safety level").WithClues(ctx)
} }
mode, err := translateMode(opts.Type) mode, err := translateMode(opts.Type)
if err != nil { if err != nil {
return clues.Wrap(err, "identifying maintenance mode") return clues.Wrap(err, "identifying maintenance mode").WithClues(ctx)
} }
currentOwner := w.c.ClientOptions().UsernameAtHost() currentOwner := w.c.ClientOptions().UsernameAtHost()
@ -722,202 +736,5 @@ func (w *Wrapper) SetRetentionParameters(
ctx context.Context, ctx context.Context,
retention repository.Retention, retention repository.Retention,
) error { ) error {
if retention.Mode == nil && retention.Duration == nil && retention.Extend == nil { return clues.Stack(w.c.setRetentionParameters(ctx, retention)).OrNil()
return nil
}
// Somewhat confusing case, when we have no retention but a non-zero duration
// it acts like we passed in only the duration and returns an error about
// having to set both. Return a clearer error here instead. Check if mode is
// set so we still allow changing duration if mode is already set.
if m, ok := ptr.ValOK(retention.Mode); ok && m == repository.NoRetention && ptr.Val(retention.Duration) != 0 {
return clues.New("duration must be 0 if retention is disabled").WithClues(ctx)
}
dr, ok := w.c.Repository.(repo.DirectRepository)
if !ok {
return clues.New("getting handle to repo").WithClues(ctx)
}
blobCfg, params, err := getRetentionConfigs(ctx, dr)
if err != nil {
return clues.Stack(err)
}
// Update blob config information.
blobChanged, err := w.setBlobConfigParams(retention.Mode, retention.Duration, blobCfg)
if err != nil {
return clues.Wrap(err, "setting retention mode or duration").WithClues(ctx)
}
// Update maintenance config information.
var maintenanceChanged bool
if retention.Extend != nil && params.ExtendObjectLocks != *retention.Extend {
params.ExtendObjectLocks = *retention.Extend
maintenanceChanged = true
}
// Check the new config is valid.
if blobCfg.IsRetentionEnabled() {
if err := maintenance.CheckExtendRetention(ctx, *blobCfg, params); err != nil {
return clues.Wrap(err, "invalid retention config").WithClues(ctx)
}
}
return clues.Stack(persistRetentionConfigs(
ctx,
dr,
blobCfg,
blobChanged,
params,
maintenanceChanged,
)).OrNil()
}
func getRetentionConfigs(
ctx context.Context,
dr repo.DirectRepository,
) (*format.BlobStorageConfiguration, *maintenance.Params, error) {
blobCfg, err := dr.FormatManager().BlobCfgBlob()
if err != nil {
return nil, nil, clues.Wrap(err, "getting storage config").WithClues(ctx)
}
params, err := maintenance.GetParams(ctx, dr)
if err != nil {
return nil, nil, clues.Wrap(err, "getting maintenance config").WithClues(ctx)
}
return &blobCfg, params, nil
}
func persistRetentionConfigs(
ctx context.Context,
dr repo.DirectRepository,
blobCfg *format.BlobStorageConfiguration,
blobChanged bool,
params *maintenance.Params,
maintenanceChanged bool,
) error {
// Persist changes.
if !blobChanged && !maintenanceChanged {
return nil
}
mp, err := dr.FormatManager().GetMutableParameters()
if err != nil {
return clues.Wrap(err, "getting mutable parameters")
}
requiredFeatures, err := dr.FormatManager().RequiredFeatures()
if err != nil {
return clues.Wrap(err, "getting required features").WithClues(ctx)
}
// Must be the case that only blob changed.
if !maintenanceChanged {
return clues.Wrap(
dr.FormatManager().SetParameters(ctx, mp, *blobCfg, requiredFeatures),
"persisting storage config",
).WithClues(ctx).OrNil()
}
// Both blob and maintenance changed. A DirectWriteSession is required to
// update the maintenance config but not the blob config.
err = repo.DirectWriteSession(
ctx,
dr,
repo.WriteSessionOptions{
Purpose: "Corso immutable backups config",
},
func(ctx context.Context, dw repo.DirectRepositoryWriter) error {
// Set the maintenance config first as we can bail out of the write
// session later.
if err := maintenance.SetParams(ctx, dw, params); err != nil {
return clues.Wrap(err, "maintenance config").
WithClues(ctx)
}
if !blobChanged {
return nil
}
return clues.Wrap(
dr.FormatManager().SetParameters(ctx, mp, *blobCfg, requiredFeatures),
"storage config",
).WithClues(ctx).OrNil()
})
return clues.Wrap(err, "persisting config changes").WithClues(ctx).OrNil()
}
func (w Wrapper) setBlobConfigParams(
mode *repository.RetentionMode,
duration *time.Duration,
blobCfg *format.BlobStorageConfiguration,
) (bool, error) {
changed, err := setBlobConfigMode(mode, blobCfg)
if err != nil {
return false, clues.Stack(err)
}
tmp := setBlobConfigDuration(duration, blobCfg)
changed = changed || tmp
return changed, nil
}
func setBlobConfigDuration(
duration *time.Duration,
blobCfg *format.BlobStorageConfiguration,
) bool {
var changed bool
if duration != nil && blobCfg.RetentionPeriod != *duration {
blobCfg.RetentionPeriod = *duration
changed = true
}
return changed
}
func setBlobConfigMode(
mode *repository.RetentionMode,
blobCfg *format.BlobStorageConfiguration,
) (bool, error) {
if mode == nil {
return false, nil
}
startMode := blobCfg.RetentionMode
switch *mode {
case repository.NoRetention:
if !blobCfg.IsRetentionEnabled() {
return false, nil
}
blobCfg.RetentionMode = ""
blobCfg.RetentionPeriod = 0
case repository.GovernanceRetention:
blobCfg.RetentionMode = blob.Governance
case repository.ComplianceRetention:
blobCfg.RetentionMode = blob.Compliance
default:
return false, clues.New("unknown retention mode").
With("provided_retention_mode", mode.String())
}
// Only check if the retention mode is not empty. IsValid errors out if it's
// empty.
if len(blobCfg.RetentionMode) > 0 && !blobCfg.RetentionMode.IsValid() {
return false, clues.New("invalid retention mode").
With("retention_mode", blobCfg.RetentionMode)
}
return startMode != blobCfg.RetentionMode, nil
} }

View File

@ -696,6 +696,24 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections() {
42), 42),
} }
c1 := exchMock.NewCollection(
suite.storePath1,
suite.locPath1,
0)
c1.ColState = data.NotMovedState
c1.PrevPath = suite.storePath1
c2 := exchMock.NewCollection(
suite.storePath2,
suite.locPath2,
0)
c2.ColState = data.NotMovedState
c2.PrevPath = suite.storePath2
// Make empty collections at the same locations to force a backup with no
// changes. Needed to ensure we force a backup even if nothing has changed.
emptyCollections := []data.BackupCollection{c1, c2}
// tags that are supplied by the caller. This includes basic tags to support // tags that are supplied by the caller. This includes basic tags to support
// lookups and extra tags the caller may want to apply. // lookups and extra tags the caller may want to apply.
tags := map[string]string{ tags := map[string]string{
@ -703,108 +721,246 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections() {
"brunhilda": "", "brunhilda": "",
} }
reasons := []Reason{ reasons := []Reasoner{
{ NewReason(
ResourceOwner: suite.storePath1.ResourceOwner(), testTenant,
Service: suite.storePath1.Service(), suite.storePath1.ResourceOwner(),
Category: suite.storePath1.Category(), suite.storePath1.Service(),
}, suite.storePath1.Category(),
{ ),
ResourceOwner: suite.storePath2.ResourceOwner(), NewReason(
Service: suite.storePath2.Service(), testTenant,
Category: suite.storePath2.Category(), suite.storePath2.ResourceOwner(),
}, suite.storePath2.Service(),
} suite.storePath2.Category(),
),
for _, r := range reasons {
for _, k := range r.TagKeys() {
tags[k] = ""
}
} }
expectedTags := map[string]string{} expectedTags := map[string]string{}
maps.Copy(expectedTags, normalizeTagKVs(tags)) maps.Copy(expectedTags, tags)
table := []struct { for _, r := range reasons {
name string for _, k := range tagKeys(r) {
expectedUploadedFiles int expectedTags[k] = ""
expectedCachedFiles int }
// Whether entries in the resulting details should be marked as updated.
deetsUpdated bool
}{
{
name: "Uncached",
expectedUploadedFiles: 47,
expectedCachedFiles: 0,
deetsUpdated: true,
},
{
name: "Cached",
expectedUploadedFiles: 0,
expectedCachedFiles: 47,
deetsUpdated: false,
},
} }
prevSnaps := []IncrementalBase{} expectedTags = normalizeTagKVs(expectedTags)
type testCase struct {
name string
baseBackups func(base ManifestEntry) BackupBases
collections []data.BackupCollection
expectedUploadedFiles int
expectedCachedFiles int
// We're either going to get details entries or entries in the details
// merger. Details is populated when there's entries in the collection. The
// details merger is populated for cached entries. The details merger
// doesn't count folders, only items.
//
// Setting this to true looks for details merger entries. Setting it to
// false looks for details entries.
expectMerge bool
// Whether entries in the resulting details should be marked as updated.
deetsUpdated assert.BoolAssertionFunc
hashedBytesCheck assert.ValueAssertionFunc
// Range of bytes (inclusive) to expect as uploaded. A little fragile, but
// allows us to differentiate between content that wasn't uploaded due to
// being cached/deduped/skipped due to existing dir entries and stuff that
// was actually pushed to S3.
uploadedBytes []int64
}
// Initial backup. All files should be considered new by kopia.
baseBackupCase := testCase{
name: "Uncached",
baseBackups: func(ManifestEntry) BackupBases {
return NewMockBackupBases()
},
collections: collections,
expectedUploadedFiles: 47,
expectedCachedFiles: 0,
deetsUpdated: assert.True,
hashedBytesCheck: assert.NotZero,
uploadedBytes: []int64{8000, 10000},
}
runAndTestBackup := func(test testCase, base ManifestEntry) ManifestEntry {
var res ManifestEntry
for _, test := range table {
suite.Run(test.name, func() { suite.Run(test.name, func() {
t := suite.T() t := suite.T()
stats, deets, _, err := suite.w.ConsumeBackupCollections( ctx, flush := tester.NewContext(t)
suite.ctx, defer flush()
prevSnaps,
collections, bbs := test.baseBackups(base)
stats, deets, deetsMerger, err := suite.w.ConsumeBackupCollections(
ctx,
reasons,
bbs,
test.collections,
nil, nil,
tags, tags,
true, true,
fault.New(true)) fault.New(true))
assert.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, test.expectedUploadedFiles, stats.TotalFileCount, "total files") assert.Equal(t, test.expectedUploadedFiles, stats.TotalFileCount, "total files")
assert.Equal(t, test.expectedUploadedFiles, stats.UncachedFileCount, "uncached files") assert.Equal(t, test.expectedUploadedFiles, stats.UncachedFileCount, "uncached files")
assert.Equal(t, test.expectedCachedFiles, stats.CachedFileCount, "cached files") assert.Equal(t, test.expectedCachedFiles, stats.CachedFileCount, "cached files")
assert.Equal(t, 6, stats.TotalDirectoryCount) assert.Equal(t, 4+len(test.collections), stats.TotalDirectoryCount, "directory count")
assert.Equal(t, 0, stats.IgnoredErrorCount) assert.Equal(t, 0, stats.IgnoredErrorCount)
assert.Equal(t, 0, stats.ErrorCount) assert.Equal(t, 0, stats.ErrorCount)
assert.False(t, stats.Incomplete) assert.False(t, stats.Incomplete)
test.hashedBytesCheck(t, stats.TotalHashedBytes, "hashed bytes")
// 47 file and 2 folder entries. assert.LessOrEqual(
details := deets.Details().Entries
assert.Len(
t, t,
details, test.uploadedBytes[0],
test.expectedUploadedFiles+test.expectedCachedFiles+2, stats.TotalUploadedBytes,
) "low end of uploaded bytes")
assert.GreaterOrEqual(
t,
test.uploadedBytes[1],
stats.TotalUploadedBytes,
"high end of uploaded bytes")
for _, entry := range details { if test.expectMerge {
assert.Equal(t, test.deetsUpdated, entry.Updated) assert.Empty(t, deets.Details().Entries, "details entries")
assert.Equal(
t,
test.expectedUploadedFiles+test.expectedCachedFiles,
deetsMerger.ItemsToMerge(),
"details merger entries")
} else {
assert.Zero(t, deetsMerger.ItemsToMerge(), "details merger entries")
details := deets.Details().Entries
assert.Len(
t,
details,
// 47 file and 2 folder entries.
test.expectedUploadedFiles+test.expectedCachedFiles+2,
)
for _, entry := range details {
test.deetsUpdated(t, entry.Updated)
}
} }
checkSnapshotTags( checkSnapshotTags(
t, t,
suite.ctx, ctx,
suite.w.c, suite.w.c,
expectedTags, expectedTags,
stats.SnapshotID, stats.SnapshotID,
) )
snap, err := snapshot.LoadSnapshot( snap, err := snapshot.LoadSnapshot(
suite.ctx, ctx,
suite.w.c, suite.w.c,
manifest.ID(stats.SnapshotID), manifest.ID(stats.SnapshotID),
) )
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
prevSnaps = append(prevSnaps, IncrementalBase{ res = ManifestEntry{
Manifest: snap, Manifest: snap,
SubtreePaths: []*path.Builder{ Reasons: reasons,
suite.storePath1.ToBuilder().Dir(), }
},
})
}) })
return res
}
base := runAndTestBackup(baseBackupCase, ManifestEntry{})
table := []testCase{
{
name: "Kopia Assist And Merge All Files Changed",
baseBackups: func(base ManifestEntry) BackupBases {
return NewMockBackupBases().WithMergeBases(base)
},
collections: collections,
expectedUploadedFiles: 0,
expectedCachedFiles: 47,
deetsUpdated: assert.False,
hashedBytesCheck: assert.Zero,
uploadedBytes: []int64{4000, 6000},
},
{
name: "Kopia Assist And Merge No Files Changed",
baseBackups: func(base ManifestEntry) BackupBases {
return NewMockBackupBases().WithMergeBases(base)
},
// Pass in empty collections to force a backup. Otherwise we'll skip
// actually trying to do anything because we'll see there's nothing that
// changed. The real goal is to get it to deal with the merged collections
// again though.
collections: emptyCollections,
// Should hit cached check prior to dir entry check so we see them as
// cached.
expectedUploadedFiles: 0,
expectedCachedFiles: 47,
// Entries go into the details merger because we never materialize details
// info for the items since they're from the base.
expectMerge: true,
// Not used since there's no details entries.
deetsUpdated: assert.False,
hashedBytesCheck: assert.Zero,
uploadedBytes: []int64{4000, 6000},
},
{
name: "Kopia Assist Only",
baseBackups: func(base ManifestEntry) BackupBases {
return NewMockBackupBases().WithAssistBases(base)
},
collections: collections,
expectedUploadedFiles: 0,
expectedCachedFiles: 47,
deetsUpdated: assert.False,
hashedBytesCheck: assert.Zero,
uploadedBytes: []int64{4000, 6000},
},
{
name: "Merge Only",
baseBackups: func(base ManifestEntry) BackupBases {
return NewMockBackupBases().WithMergeBases(base).ClearMockAssistBases()
},
// Pass in empty collections to force a backup. Otherwise we'll skip
// actually trying to do anything because we'll see there's nothing that
// changed. The real goal is to get it to deal with the merged collections
// again though.
collections: emptyCollections,
expectedUploadedFiles: 47,
expectedCachedFiles: 0,
expectMerge: true,
// Not used since there's no details entries.
deetsUpdated: assert.False,
// Kopia still counts these bytes as "hashed" even though it shouldn't
// read the file data since they already have dir entries it can reuse.
hashedBytesCheck: assert.NotZero,
uploadedBytes: []int64{4000, 6000},
},
{
name: "Content Hash Only",
baseBackups: func(base ManifestEntry) BackupBases {
return NewMockBackupBases()
},
collections: collections,
expectedUploadedFiles: 47,
expectedCachedFiles: 0,
// Marked as updated because we still fall into the uploadFile handler in
// kopia instead of the cachedFile handler.
deetsUpdated: assert.True,
hashedBytesCheck: assert.NotZero,
uploadedBytes: []int64{4000, 6000},
},
}
for _, test := range table {
runAndTestBackup(test, base)
} }
} }
@ -837,23 +993,25 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() {
"brunhilda": "", "brunhilda": "",
} }
reasons := []Reason{ reasons := []Reasoner{
{ NewReason(
ResourceOwner: storePath.ResourceOwner(), testTenant,
Service: storePath.Service(), storePath.ResourceOwner(),
Category: storePath.Category(), storePath.Service(),
}, storePath.Category()),
}
for _, r := range reasons {
for _, k := range r.TagKeys() {
tags[k] = ""
}
} }
expectedTags := map[string]string{} expectedTags := map[string]string{}
maps.Copy(expectedTags, normalizeTagKVs(tags)) maps.Copy(expectedTags, tags)
for _, r := range reasons {
for _, k := range tagKeys(r) {
expectedTags[k] = ""
}
}
expectedTags = normalizeTagKVs(expectedTags)
table := []struct { table := []struct {
name string name string
@ -931,7 +1089,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() {
}, },
} }
prevSnaps := []IncrementalBase{} prevSnaps := NewMockBackupBases()
for _, test := range table { for _, test := range table {
suite.Run(test.name, func() { suite.Run(test.name, func() {
@ -940,6 +1098,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() {
stats, deets, prevShortRefs, err := suite.w.ConsumeBackupCollections( stats, deets, prevShortRefs, err := suite.w.ConsumeBackupCollections(
suite.ctx, suite.ctx,
reasons,
prevSnaps, prevSnaps,
collections, collections,
nil, nil,
@ -992,12 +1151,12 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() {
manifest.ID(stats.SnapshotID)) manifest.ID(stats.SnapshotID))
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
prevSnaps = append(prevSnaps, IncrementalBase{ prevSnaps.WithMergeBases(
Manifest: snap, ManifestEntry{
SubtreePaths: []*path.Builder{ Manifest: snap,
storePath.ToBuilder().Dir(), Reasons: reasons,
}, },
}) )
}) })
} }
} }
@ -1016,16 +1175,7 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
w := &Wrapper{k} w := &Wrapper{k}
tags := map[string]string{} r := NewReason(testTenant, testUser, path.ExchangeService, path.EmailCategory)
reason := Reason{
ResourceOwner: testUser,
Service: path.ExchangeService,
Category: path.EmailCategory,
}
for _, k := range reason.TagKeys() {
tags[k] = ""
}
dc1 := exchMock.NewCollection(suite.storePath1, suite.locPath1, 1) dc1 := exchMock.NewCollection(suite.storePath1, suite.locPath1, 1)
dc2 := exchMock.NewCollection(suite.storePath2, suite.locPath2, 1) dc2 := exchMock.NewCollection(suite.storePath2, suite.locPath2, 1)
@ -1038,10 +1188,11 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
stats, _, _, err := w.ConsumeBackupCollections( stats, _, _, err := w.ConsumeBackupCollections(
ctx, ctx,
[]Reasoner{r},
nil, nil,
[]data.BackupCollection{dc1, dc2}, []data.BackupCollection{dc1, dc2},
nil, nil,
tags, nil,
true, true,
fault.New(true)) fault.New(true))
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
@ -1112,16 +1263,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
loc1 := path.Builder{}.Append(suite.storePath1.Folders()...) loc1 := path.Builder{}.Append(suite.storePath1.Folders()...)
loc2 := path.Builder{}.Append(suite.storePath2.Folders()...) loc2 := path.Builder{}.Append(suite.storePath2.Folders()...)
tags := map[string]string{} r := NewReason(testTenant, testUser, path.ExchangeService, path.EmailCategory)
reason := Reason{
ResourceOwner: testUser,
Service: path.ExchangeService,
Category: path.EmailCategory,
}
for _, k := range reason.TagKeys() {
tags[k] = ""
}
collections := []data.BackupCollection{ collections := []data.BackupCollection{
&mockBackupCollection{ &mockBackupCollection{
@ -1164,10 +1306,11 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
stats, deets, _, err := suite.w.ConsumeBackupCollections( stats, deets, _, err := suite.w.ConsumeBackupCollections(
suite.ctx, suite.ctx,
[]Reasoner{r},
nil, nil,
collections, collections,
nil, nil,
tags, nil,
true, true,
fault.New(true)) fault.New(true))
require.Error(t, err, clues.ToCore(err)) require.Error(t, err, clues.ToCore(err))
@ -1239,6 +1382,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollectionsHandlesNoCollections()
s, d, _, err := suite.w.ConsumeBackupCollections( s, d, _, err := suite.w.ConsumeBackupCollections(
ctx, ctx,
nil, nil,
nil,
test.collections, test.collections,
nil, nil,
nil, nil,
@ -1391,23 +1535,15 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
collections = append(collections, collection) collections = append(collections, collection)
} }
tags := map[string]string{} r := NewReason(testTenant, testUser, path.ExchangeService, path.EmailCategory)
reason := Reason{
ResourceOwner: testUser,
Service: path.ExchangeService,
Category: path.EmailCategory,
}
for _, k := range reason.TagKeys() {
tags[k] = ""
}
stats, deets, _, err := suite.w.ConsumeBackupCollections( stats, deets, _, err := suite.w.ConsumeBackupCollections(
suite.ctx, suite.ctx,
[]Reasoner{r},
nil, nil,
collections, collections,
nil, nil,
tags, nil,
false, false,
fault.New(true)) fault.New(true))
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
@ -1437,32 +1573,11 @@ func (c *i64counter) Count(i int64) {
} }
func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() { func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
reason := Reason{ r := NewReason(testTenant, testUser, path.ExchangeService, path.EmailCategory)
ResourceOwner: testUser,
Service: path.ExchangeService,
Category: path.EmailCategory,
}
subtreePathTmp, err := path.Build(
testTenant,
testUser,
path.ExchangeService,
path.EmailCategory,
false,
"tmp")
require.NoError(suite.T(), err, clues.ToCore(err))
subtreePath := subtreePathTmp.ToBuilder().Dir()
man, err := suite.w.c.LoadSnapshot(suite.ctx, suite.snapshotID) man, err := suite.w.c.LoadSnapshot(suite.ctx, suite.snapshotID)
require.NoError(suite.T(), err, "getting base snapshot: %v", clues.ToCore(err)) require.NoError(suite.T(), err, "getting base snapshot: %v", clues.ToCore(err))
tags := map[string]string{}
for _, k := range reason.TagKeys() {
tags[k] = ""
}
table := []struct { table := []struct {
name string name string
excludeItem bool excludeItem bool
@ -1551,17 +1666,16 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
stats, _, _, err := suite.w.ConsumeBackupCollections( stats, _, _, err := suite.w.ConsumeBackupCollections(
suite.ctx, suite.ctx,
[]IncrementalBase{ []Reasoner{r},
{ NewMockBackupBases().WithMergeBases(
ManifestEntry{
Manifest: man, Manifest: man,
SubtreePaths: []*path.Builder{ Reasons: []Reasoner{r},
subtreePath,
},
}, },
}, ),
test.cols(), test.cols(),
excluded, excluded,
tags, nil,
true, true,
fault.New(true)) fault.New(true))
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))

View File

@ -2,7 +2,6 @@ package m365
import ( import (
"context" "context"
"strings"
"github.com/alcionai/clues" "github.com/alcionai/clues"
@ -44,7 +43,7 @@ func (ctrl *Controller) ProduceBackupCollections(
ctx, end := diagnostics.Span( ctx, end := diagnostics.Span(
ctx, ctx,
"m365:produceBackupCollections", "m365:produceBackupCollections",
diagnostics.Index("service", sels.Service.String())) diagnostics.Index("service", sels.PathService().String()))
defer end() defer end()
ctx = graph.BindRateLimiterConfig(ctx, graph.LimiterCfg{Service: sels.PathService()}) ctx = graph.BindRateLimiterConfig(ctx, graph.LimiterCfg{Service: sels.PathService()})
@ -61,8 +60,8 @@ func (ctrl *Controller) ProduceBackupCollections(
serviceEnabled, canMakeDeltaQueries, err := checkServiceEnabled( serviceEnabled, canMakeDeltaQueries, err := checkServiceEnabled(
ctx, ctx,
ctrl.AC.Users(), ctrl.AC.Users(),
path.ServiceType(sels.Service), sels.PathService(),
sels.DiscreteOwner) owner.ID())
if err != nil { if err != nil {
return nil, nil, false, err return nil, nil, false, err
} }
@ -194,10 +193,8 @@ func verifyBackupInputs(sels selectors.Selector, siteIDs []string) error {
ids = siteIDs ids = siteIDs
} }
resourceOwner := strings.ToLower(sels.DiscreteOwner) if !filters.Contains(ids).Compare(sels.ID()) {
return clues.Stack(graph.ErrResourceOwnerNotFound).With("missing_protected_resource", sels.DiscreteOwner)
if !filters.Equal(ids).Compare(resourceOwner) {
return clues.Stack(graph.ErrResourceOwnerNotFound).With("missing_resource_owner", sels.DiscreteOwner)
} }
return nil return nil

View File

@ -57,7 +57,7 @@ func (suite *DataCollectionIntgSuite) SetupSuite() {
suite.tenantID = creds.AzureTenantID suite.tenantID = creds.AzureTenantID
suite.ac, err = api.NewClient(creds) suite.ac, err = api.NewClient(creds, control.DefaultOptions())
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
} }
@ -120,7 +120,7 @@ func (suite *DataCollectionIntgSuite) TestExchangeDataCollection() {
sel := test.getSelector(t) sel := test.getSelector(t)
uidn := inMock.NewProvider(sel.ID(), sel.Name()) uidn := inMock.NewProvider(sel.ID(), sel.Name())
ctrlOpts := control.Defaults() ctrlOpts := control.DefaultOptions()
ctrlOpts.ToggleFeatures.DisableDelta = !canMakeDeltaQueries ctrlOpts.ToggleFeatures.DisableDelta = !canMakeDeltaQueries
collections, excludes, canUsePreviousBackup, err := exchange.ProduceBackupCollections( collections, excludes, canUsePreviousBackup, err := exchange.ProduceBackupCollections(
@ -239,7 +239,7 @@ func (suite *DataCollectionIntgSuite) TestDataCollections_invalidResourceOwner()
test.getSelector(t), test.getSelector(t),
nil, nil,
version.NoBackup, version.NoBackup,
control.Defaults(), control.DefaultOptions(),
fault.New(true)) fault.New(true))
assert.Error(t, err, clues.ToCore(err)) assert.Error(t, err, clues.ToCore(err))
assert.False(t, canUsePreviousBackup, "can use previous backup") assert.False(t, canUsePreviousBackup, "can use previous backup")
@ -296,7 +296,7 @@ func (suite *DataCollectionIntgSuite) TestSharePointDataCollection() {
nil, nil,
ctrl.credentials, ctrl.credentials,
ctrl, ctrl,
control.Defaults(), control.DefaultOptions(),
fault.New(true)) fault.New(true))
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
assert.True(t, canUsePreviousBackup, "can use previous backup") assert.True(t, canUsePreviousBackup, "can use previous backup")
@ -367,7 +367,7 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Libraries() {
siteIDs = []string{siteID} siteIDs = []string{siteID}
) )
id, name, err := ctrl.PopulateOwnerIDAndNamesFrom(ctx, siteID, nil) id, name, err := ctrl.PopulateProtectedResourceIDAndName(ctx, siteID, nil)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
sel := selectors.NewSharePointBackup(siteIDs) sel := selectors.NewSharePointBackup(siteIDs)
@ -381,7 +381,7 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Libraries() {
sel.Selector, sel.Selector,
nil, nil,
version.NoBackup, version.NoBackup,
control.Defaults(), control.DefaultOptions(),
fault.New(true)) fault.New(true))
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
assert.True(t, canUsePreviousBackup, "can use previous backup") assert.True(t, canUsePreviousBackup, "can use previous backup")
@ -414,7 +414,7 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Lists() {
siteIDs = []string{siteID} siteIDs = []string{siteID}
) )
id, name, err := ctrl.PopulateOwnerIDAndNamesFrom(ctx, siteID, nil) id, name, err := ctrl.PopulateProtectedResourceIDAndName(ctx, siteID, nil)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
sel := selectors.NewSharePointBackup(siteIDs) sel := selectors.NewSharePointBackup(siteIDs)
@ -428,7 +428,7 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Lists() {
sel.Selector, sel.Selector,
nil, nil,
version.NoBackup, version.NoBackup,
control.Defaults(), control.DefaultOptions(),
fault.New(true)) fault.New(true))
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
assert.True(t, canUsePreviousBackup, "can use previous backup") assert.True(t, canUsePreviousBackup, "can use previous backup")

View File

@ -14,6 +14,7 @@ import (
"github.com/alcionai/corso/src/internal/m365/support" "github.com/alcionai/corso/src/internal/m365/support"
"github.com/alcionai/corso/src/internal/operations/inject" "github.com/alcionai/corso/src/internal/operations/inject"
"github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/services/m365/api" "github.com/alcionai/corso/src/pkg/services/m365/api"
@ -23,6 +24,7 @@ import (
var ( var (
_ inject.BackupProducer = &Controller{} _ inject.BackupProducer = &Controller{}
_ inject.RestoreConsumer = &Controller{} _ inject.RestoreConsumer = &Controller{}
_ inject.ExportConsumer = &Controller{}
) )
// Controller is a struct used to wrap the GraphServiceClient and // Controller is a struct used to wrap the GraphServiceClient and
@ -47,6 +49,11 @@ type Controller struct {
// mutex used to synchronize updates to `status` // mutex used to synchronize updates to `status`
mu sync.Mutex mu sync.Mutex
status support.ControllerOperationStatus // contains the status of the last run status status support.ControllerOperationStatus // contains the status of the last run status
// backupDriveIDNames is populated on restore. It maps the backup's
// drive names to their id. Primarily for use when creating or looking
// up a new drive.
backupDriveIDNames idname.CacheBuilder
} }
func NewController( func NewController(
@ -63,7 +70,7 @@ func NewController(
return nil, clues.Wrap(err, "retrieving m365 account configuration").WithClues(ctx) return nil, clues.Wrap(err, "retrieving m365 account configuration").WithClues(ctx)
} }
ac, err := api.NewClient(creds) ac, err := api.NewClient(creds, co)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "creating api client").WithClues(ctx) return nil, clues.Wrap(err, "creating api client").WithClues(ctx)
} }
@ -77,10 +84,11 @@ func NewController(
AC: ac, AC: ac,
IDNameLookup: idname.NewCache(nil), IDNameLookup: idname.NewCache(nil),
credentials: creds, credentials: creds,
ownerLookup: rCli, ownerLookup: rCli,
tenant: acct.ID(), tenant: acct.ID(),
wg: &sync.WaitGroup{}, wg: &sync.WaitGroup{},
backupDriveIDNames: idname.NewCache(nil),
} }
return &ctrl, nil return &ctrl, nil
@ -142,6 +150,16 @@ func (ctrl *Controller) incrementAwaitingMessages() {
ctrl.wg.Add(1) ctrl.wg.Add(1)
} }
func (ctrl *Controller) CacheItemInfo(dii details.ItemInfo) {
if dii.SharePoint != nil {
ctrl.backupDriveIDNames.Add(dii.SharePoint.DriveID, dii.SharePoint.DriveName)
}
if dii.OneDrive != nil {
ctrl.backupDriveIDNames.Add(dii.OneDrive.DriveID, dii.OneDrive.DriveName)
}
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Resource Lookup Handling // Resource Lookup Handling
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@ -228,15 +246,15 @@ func (r resourceClient) getOwnerIDAndNameFrom(
return id, name, nil return id, name, nil
} }
// PopulateOwnerIDAndNamesFrom takes the provided owner identifier and produces // PopulateProtectedResourceIDAndName takes the provided owner identifier and produces
// the owner's name and ID from that value. Returns an error if the owner is // the owner's name and ID from that value. Returns an error if the owner is
// not recognized by the current tenant. // not recognized by the current tenant.
// //
// The id-name swapper is optional. Some processes will look up all owners in // The id-name cacher is optional. Some processes will look up all owners in
// the tenant before reaching this step. In that case, the data gets handed // the tenant before reaching this step. In that case, the data gets handed
// down for this func to consume instead of performing further queries. The // down for this func to consume instead of performing further queries. The
// data gets stored inside the controller instance for later re-use. // data gets stored inside the controller instance for later re-use.
func (ctrl *Controller) PopulateOwnerIDAndNamesFrom( func (ctrl *Controller) PopulateProtectedResourceIDAndName(
ctx context.Context, ctx context.Context,
owner string, // input value, can be either id or name owner string, // input value, can be either id or name
ins idname.Cacher, ins idname.Cacher,

View File

@ -12,16 +12,22 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/common/dttm"
"github.com/alcionai/corso/src/internal/common/idname"
inMock "github.com/alcionai/corso/src/internal/common/idname/mock" inMock "github.com/alcionai/corso/src/internal/common/idname/mock"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
dataMock "github.com/alcionai/corso/src/internal/data/mock"
exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock" exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock"
"github.com/alcionai/corso/src/internal/m365/graph"
"github.com/alcionai/corso/src/internal/m365/mock" "github.com/alcionai/corso/src/internal/m365/mock"
"github.com/alcionai/corso/src/internal/m365/resource" "github.com/alcionai/corso/src/internal/m365/resource"
"github.com/alcionai/corso/src/internal/m365/stub" "github.com/alcionai/corso/src/internal/m365/stub"
"github.com/alcionai/corso/src/internal/m365/support" "github.com/alcionai/corso/src/internal/m365/support"
"github.com/alcionai/corso/src/internal/operations/inject"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/internal/version" "github.com/alcionai/corso/src/internal/version"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/control/testdata" "github.com/alcionai/corso/src/pkg/control/testdata"
"github.com/alcionai/corso/src/pkg/count" "github.com/alcionai/corso/src/pkg/count"
@ -220,7 +226,7 @@ func (suite *ControllerUnitSuite) TestPopulateOwnerIDAndNamesFrom() {
ctrl := &Controller{ownerLookup: test.rc} ctrl := &Controller{ownerLookup: test.rc}
rID, rName, err := ctrl.PopulateOwnerIDAndNamesFrom(ctx, test.owner, test.ins) rID, rName, err := ctrl.PopulateProtectedResourceIDAndName(ctx, test.owner, test.ins)
test.expectErr(t, err, clues.ToCore(err)) test.expectErr(t, err, clues.ToCore(err))
assert.Equal(t, test.expectID, rID, "id") assert.Equal(t, test.expectID, rID, "id")
assert.Equal(t, test.expectName, rName, "name") assert.Equal(t, test.expectName, rName, "name")
@ -260,6 +266,82 @@ func (suite *ControllerUnitSuite) TestController_Wait() {
assert.Equal(t, int64(4), result.Bytes) assert.Equal(t, int64(4), result.Bytes)
} }
func (suite *ControllerUnitSuite) TestController_CacheItemInfo() {
var (
odid = "od-id"
odname = "od-name"
spid = "sp-id"
spname = "sp-name"
// intentionally declared outside the test loop
ctrl = &Controller{
wg: &sync.WaitGroup{},
region: &trace.Region{},
backupDriveIDNames: idname.NewCache(nil),
}
)
table := []struct {
name string
service path.ServiceType
cat path.CategoryType
dii details.ItemInfo
expectID string
expectName string
}{
{
name: "exchange",
dii: details.ItemInfo{
Exchange: &details.ExchangeInfo{},
},
expectID: "",
expectName: "",
},
{
name: "folder",
dii: details.ItemInfo{
Folder: &details.FolderInfo{},
},
expectID: "",
expectName: "",
},
{
name: "onedrive",
dii: details.ItemInfo{
OneDrive: &details.OneDriveInfo{
DriveID: odid,
DriveName: odname,
},
},
expectID: odid,
expectName: odname,
},
{
name: "sharepoint",
dii: details.ItemInfo{
SharePoint: &details.SharePointInfo{
DriveID: spid,
DriveName: spname,
},
},
expectID: spid,
expectName: spname,
},
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
ctrl.CacheItemInfo(test.dii)
name, _ := ctrl.backupDriveIDNames.NameOf(test.expectID)
assert.Equal(t, test.expectName, name)
id, _ := ctrl.backupDriveIDNames.IDOf(test.expectName)
assert.Equal(t, test.expectID, id)
})
}
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Integration tests // Integration tests
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@ -306,20 +388,24 @@ func (suite *ControllerIntegrationSuite) TestRestoreFailsBadService() {
} }
) )
restoreCfg.IncludePermissions = true
rcc := inject.RestoreConsumerConfig{
BackupVersion: version.Backup,
Options: control.DefaultOptions(),
ProtectedResource: sel,
RestoreConfig: restoreCfg,
Selector: sel,
}
deets, err := suite.ctrl.ConsumeRestoreCollections( deets, err := suite.ctrl.ConsumeRestoreCollections(
ctx, ctx,
version.Backup, rcc,
sel, []data.RestoreCollection{&dataMock.Collection{}},
restoreCfg,
control.Options{
RestorePermissions: true,
ToggleFeatures: control.Toggles{},
},
nil,
fault.New(true), fault.New(true),
count.New()) count.New())
assert.Error(t, err, clues.ToCore(err)) assert.Error(t, err, graph.ErrServiceNotEnabled, clues.ToCore(err))
assert.NotNil(t, deets) assert.Nil(t, deets)
status := suite.ctrl.Wait() status := suite.ctrl.Wait()
assert.Equal(t, 0, status.Objects) assert.Equal(t, 0, status.Objects)
@ -329,6 +415,8 @@ func (suite *ControllerIntegrationSuite) TestRestoreFailsBadService() {
func (suite *ControllerIntegrationSuite) TestEmptyCollections() { func (suite *ControllerIntegrationSuite) TestEmptyCollections() {
restoreCfg := testdata.DefaultRestoreConfig("") restoreCfg := testdata.DefaultRestoreConfig("")
restoreCfg.IncludePermissions = true
table := []struct { table := []struct {
name string name string
col []data.RestoreCollection col []data.RestoreCollection
@ -385,25 +473,22 @@ func (suite *ControllerIntegrationSuite) TestEmptyCollections() {
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
defer flush() defer flush()
rcc := inject.RestoreConsumerConfig{
BackupVersion: version.Backup,
Options: control.DefaultOptions(),
ProtectedResource: test.sel,
RestoreConfig: restoreCfg,
Selector: test.sel,
}
deets, err := suite.ctrl.ConsumeRestoreCollections( deets, err := suite.ctrl.ConsumeRestoreCollections(
ctx, ctx,
version.Backup, rcc,
test.sel,
restoreCfg,
control.Options{
RestorePermissions: true,
ToggleFeatures: control.Toggles{},
},
test.col, test.col,
fault.New(true), fault.New(true),
count.New()) count.New())
require.NoError(t, err, clues.ToCore(err)) require.Error(t, err, clues.ToCore(err))
assert.NotNil(t, deets) assert.Nil(t, deets)
stats := suite.ctrl.Wait()
assert.Zero(t, stats.Objects)
assert.Zero(t, stats.Folders)
assert.Zero(t, stats.Successes)
}) })
} }
} }
@ -429,12 +514,18 @@ func runRestore(
restoreCtrl := newController(ctx, t, sci.Resource, path.ExchangeService) restoreCtrl := newController(ctx, t, sci.Resource, path.ExchangeService)
restoreSel := getSelectorWith(t, sci.Service, sci.ResourceOwners, true) restoreSel := getSelectorWith(t, sci.Service, sci.ResourceOwners, true)
rcc := inject.RestoreConsumerConfig{
BackupVersion: backupVersion,
Options: control.DefaultOptions(),
ProtectedResource: restoreSel,
RestoreConfig: sci.RestoreCfg,
Selector: restoreSel,
}
deets, err := restoreCtrl.ConsumeRestoreCollections( deets, err := restoreCtrl.ConsumeRestoreCollections(
ctx, ctx,
backupVersion, rcc,
restoreSel,
sci.RestoreCfg,
sci.Opts,
collections, collections,
fault.New(true), fault.New(true),
count.New()) count.New())
@ -536,6 +627,7 @@ func runRestoreBackupTest(
tenant string, tenant string,
resourceOwners []string, resourceOwners []string,
opts control.Options, opts control.Options,
restoreCfg control.RestoreConfig,
) { ) {
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
defer flush() defer flush()
@ -546,7 +638,7 @@ func runRestoreBackupTest(
Service: test.service, Service: test.service,
Tenant: tenant, Tenant: tenant,
ResourceOwners: resourceOwners, ResourceOwners: resourceOwners,
RestoreCfg: testdata.DefaultRestoreConfig(""), RestoreCfg: restoreCfg,
} }
totalItems, totalKopiaItems, collections, expectedData, err := stub.GetCollectionsAndExpected( totalItems, totalKopiaItems, collections, expectedData, err := stub.GetCollectionsAndExpected(
@ -581,6 +673,7 @@ func runRestoreTestWithVersion(
tenant string, tenant string,
resourceOwners []string, resourceOwners []string,
opts control.Options, opts control.Options,
restoreCfg control.RestoreConfig,
) { ) {
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
defer flush() defer flush()
@ -591,7 +684,7 @@ func runRestoreTestWithVersion(
Service: test.service, Service: test.service,
Tenant: tenant, Tenant: tenant,
ResourceOwners: resourceOwners, ResourceOwners: resourceOwners,
RestoreCfg: testdata.DefaultRestoreConfig(""), RestoreCfg: restoreCfg,
} }
totalItems, _, collections, _, err := stub.GetCollectionsAndExpected( totalItems, _, collections, _, err := stub.GetCollectionsAndExpected(
@ -618,6 +711,7 @@ func runRestoreBackupTestVersions(
tenant string, tenant string,
resourceOwners []string, resourceOwners []string,
opts control.Options, opts control.Options,
restoreCfg control.RestoreConfig,
) { ) {
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
defer flush() defer flush()
@ -628,7 +722,7 @@ func runRestoreBackupTestVersions(
Service: test.service, Service: test.service,
Tenant: tenant, Tenant: tenant,
ResourceOwners: resourceOwners, ResourceOwners: resourceOwners,
RestoreCfg: testdata.DefaultRestoreConfig(""), RestoreCfg: restoreCfg,
} }
totalItems, _, collections, _, err := stub.GetCollectionsAndExpected( totalItems, _, collections, _, err := stub.GetCollectionsAndExpected(
@ -662,7 +756,7 @@ func runRestoreBackupTestVersions(
test.collectionsLatest) test.collectionsLatest)
} }
func (suite *ControllerIntegrationSuite) TestRestoreAndBackup() { func (suite *ControllerIntegrationSuite) TestRestoreAndBackup_core() {
bodyText := "This email has some text. However, all the text is on the same line." bodyText := "This email has some text. However, all the text is on the same line."
subjectText := "Test message for restore" subjectText := "Test message for restore"
@ -921,10 +1015,8 @@ func (suite *ControllerIntegrationSuite) TestRestoreAndBackup() {
test, test,
suite.ctrl.tenant, suite.ctrl.tenant,
[]string{suite.user}, []string{suite.user},
control.Options{ control.DefaultOptions(),
RestorePermissions: true, control.DefaultRestoreConfig(dttm.HumanReadableDriveItem))
ToggleFeatures: control.Toggles{},
})
}) })
} }
} }
@ -1005,6 +1097,8 @@ func (suite *ControllerIntegrationSuite) TestMultiFolderBackupDifferentNames() {
for i, collection := range test.collections { for i, collection := range test.collections {
// Get a restoreCfg per collection so they're independent. // Get a restoreCfg per collection so they're independent.
restoreCfg := testdata.DefaultRestoreConfig("") restoreCfg := testdata.DefaultRestoreConfig("")
restoreCfg.IncludePermissions = true
expectedDests = append(expectedDests, destAndCats{ expectedDests = append(expectedDests, destAndCats{
resourceOwner: suite.user, resourceOwner: suite.user,
dest: restoreCfg.Location, dest: restoreCfg.Location,
@ -1037,15 +1131,18 @@ func (suite *ControllerIntegrationSuite) TestMultiFolderBackupDifferentNames() {
) )
restoreCtrl := newController(ctx, t, test.resourceCat, path.ExchangeService) restoreCtrl := newController(ctx, t, test.resourceCat, path.ExchangeService)
rcc := inject.RestoreConsumerConfig{
BackupVersion: version.Backup,
Options: control.DefaultOptions(),
ProtectedResource: restoreSel,
RestoreConfig: restoreCfg,
Selector: restoreSel,
}
deets, err := restoreCtrl.ConsumeRestoreCollections( deets, err := restoreCtrl.ConsumeRestoreCollections(
ctx, ctx,
version.Backup, rcc,
restoreSel,
restoreCfg,
control.Options{
RestorePermissions: true,
ToggleFeatures: control.Toggles{},
},
collections, collections,
fault.New(true), fault.New(true),
count.New()) count.New())
@ -1077,10 +1174,7 @@ func (suite *ControllerIntegrationSuite) TestMultiFolderBackupDifferentNames() {
backupSel, backupSel,
nil, nil,
version.NoBackup, version.NoBackup,
control.Options{ control.DefaultOptions(),
RestorePermissions: true,
ToggleFeatures: control.Toggles{},
},
fault.New(true)) fault.New(true))
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
assert.True(t, canUsePreviousBackup, "can use previous backup") assert.True(t, canUsePreviousBackup, "can use previous backup")
@ -1089,10 +1183,13 @@ func (suite *ControllerIntegrationSuite) TestMultiFolderBackupDifferentNames() {
t.Log("Backup enumeration complete") t.Log("Backup enumeration complete")
restoreCfg := control.DefaultRestoreConfig(dttm.HumanReadableDriveItem)
restoreCfg.IncludePermissions = true
ci := stub.ConfigInfo{ ci := stub.ConfigInfo{
Opts: control.Options{RestorePermissions: true}, Opts: control.DefaultOptions(),
// Alright to be empty, needed for OneDrive. // Alright to be empty, needed for OneDrive.
RestoreCfg: control.RestoreConfig{}, RestoreCfg: restoreCfg,
} }
// Pull the data prior to waiting for the status as otherwise it will // Pull the data prior to waiting for the status as otherwise it will
@ -1130,16 +1227,16 @@ func (suite *ControllerIntegrationSuite) TestRestoreAndBackup_largeMailAttachmen
}, },
} }
restoreCfg := control.DefaultRestoreConfig(dttm.HumanReadableDriveItem)
restoreCfg.IncludePermissions = true
runRestoreBackupTest( runRestoreBackupTest(
suite.T(), suite.T(),
test, test,
suite.ctrl.tenant, suite.ctrl.tenant,
[]string{suite.user}, []string{suite.user},
control.Options{ control.DefaultOptions(),
RestorePermissions: true, restoreCfg)
ToggleFeatures: control.Toggles{},
},
)
} }
func (suite *ControllerIntegrationSuite) TestBackup_CreatesPrefixCollections() { func (suite *ControllerIntegrationSuite) TestBackup_CreatesPrefixCollections() {
@ -1158,8 +1255,7 @@ func (suite *ControllerIntegrationSuite) TestBackup_CreatesPrefixCollections() {
sel.Include( sel.Include(
sel.ContactFolders([]string{selectors.NoneTgt}), sel.ContactFolders([]string{selectors.NoneTgt}),
sel.EventCalendars([]string{selectors.NoneTgt}), sel.EventCalendars([]string{selectors.NoneTgt}),
sel.MailFolders([]string{selectors.NoneTgt}), sel.MailFolders([]string{selectors.NoneTgt}))
)
return sel.Selector return sel.Selector
}, },
@ -1222,23 +1318,20 @@ func (suite *ControllerIntegrationSuite) TestBackup_CreatesPrefixCollections() {
start = time.Now() start = time.Now()
) )
id, name, err := backupCtrl.PopulateOwnerIDAndNamesFrom(ctx, backupSel.DiscreteOwner, nil) id, name, err := backupCtrl.PopulateProtectedResourceIDAndName(ctx, backupSel.DiscreteOwner, nil)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
backupSel.SetDiscreteOwnerIDName(id, name) backupSel.SetDiscreteOwnerIDName(id, name)
dcs, excludes, canUsePreviousBackup, err := backupCtrl.ProduceBackupCollections( dcs, excludes, canUsePreviousBackup, err := backupCtrl.ProduceBackupCollections(
ctx, ctx,
inMock.NewProvider(id, name), idname.NewProvider(id, name),
backupSel, backupSel,
nil, nil,
version.NoBackup, version.NoBackup,
control.Options{ control.DefaultOptions(),
RestorePermissions: false,
ToggleFeatures: control.Toggles{},
},
fault.New(true)) fault.New(true))
require.NoError(t, err) require.NoError(t, err, clues.ToCore(err))
assert.True(t, canUsePreviousBackup, "can use previous backup") assert.True(t, canUsePreviousBackup, "can use previous backup")
// No excludes yet because this isn't an incremental backup. // No excludes yet because this isn't an incremental backup.
assert.True(t, excludes.Empty()) assert.True(t, excludes.Empty())

View File

@ -414,7 +414,7 @@ func (suite *BackupIntgSuite) SetupSuite() {
creds, err := acct.M365Config() creds, err := acct.M365Config()
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
suite.ac, err = api.NewClient(creds) suite.ac, err = api.NewClient(creds, control.DefaultOptions())
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
suite.tenantID = creds.AzureTenantID suite.tenantID = creds.AzureTenantID
@ -466,7 +466,7 @@ func (suite *BackupIntgSuite) TestMailFetch() {
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
defer flush() defer flush()
ctrlOpts := control.Defaults() ctrlOpts := control.DefaultOptions()
ctrlOpts.ToggleFeatures.DisableDelta = !test.canMakeDeltaQueries ctrlOpts.ToggleFeatures.DisableDelta = !test.canMakeDeltaQueries
collections, err := createCollections( collections, err := createCollections(
@ -554,7 +554,7 @@ func (suite *BackupIntgSuite) TestDelta() {
inMock.NewProvider(userID, userID), inMock.NewProvider(userID, userID),
test.scope, test.scope,
DeltaPaths{}, DeltaPaths{},
control.Defaults(), control.DefaultOptions(),
func(status *support.ControllerOperationStatus) {}, func(status *support.ControllerOperationStatus) {},
fault.New(true)) fault.New(true))
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
@ -587,7 +587,7 @@ func (suite *BackupIntgSuite) TestDelta() {
inMock.NewProvider(userID, userID), inMock.NewProvider(userID, userID),
test.scope, test.scope,
dps, dps,
control.Defaults(), control.DefaultOptions(),
func(status *support.ControllerOperationStatus) {}, func(status *support.ControllerOperationStatus) {},
fault.New(true)) fault.New(true))
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
@ -633,7 +633,7 @@ func (suite *BackupIntgSuite) TestMailSerializationRegression() {
inMock.NewProvider(suite.user, suite.user), inMock.NewProvider(suite.user, suite.user),
sel.Scopes()[0], sel.Scopes()[0],
DeltaPaths{}, DeltaPaths{},
control.Defaults(), control.DefaultOptions(),
newStatusUpdater(t, &wg), newStatusUpdater(t, &wg),
fault.New(true)) fault.New(true))
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
@ -709,7 +709,7 @@ func (suite *BackupIntgSuite) TestContactSerializationRegression() {
inMock.NewProvider(suite.user, suite.user), inMock.NewProvider(suite.user, suite.user),
test.scope, test.scope,
DeltaPaths{}, DeltaPaths{},
control.Defaults(), control.DefaultOptions(),
newStatusUpdater(t, &wg), newStatusUpdater(t, &wg),
fault.New(true)) fault.New(true))
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
@ -834,7 +834,7 @@ func (suite *BackupIntgSuite) TestEventsSerializationRegression() {
inMock.NewProvider(suite.user, suite.user), inMock.NewProvider(suite.user, suite.user),
test.scope, test.scope,
DeltaPaths{}, DeltaPaths{},
control.Defaults(), control.DefaultOptions(),
newStatusUpdater(t, &wg), newStatusUpdater(t, &wg),
fault.New(true)) fault.New(true))
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
@ -1995,7 +1995,7 @@ func (suite *CollectionPopulationSuite) TestFilterContainersAndFillCollections_i
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
defer flush() defer flush()
ctrlOpts := control.Defaults() ctrlOpts := control.DefaultOptions()
ctrlOpts.ToggleFeatures.DisableDelta = !deltaAfter ctrlOpts.ToggleFeatures.DisableDelta = !deltaAfter
getter := test.getter getter := test.getter

View File

@ -178,7 +178,7 @@ func (suite *CollectionSuite) TestNewCollection_state() {
test.curr, test.prev, test.loc, test.curr, test.prev, test.loc,
0, 0,
&mockItemer{}, nil, &mockItemer{}, nil,
control.Defaults(), control.DefaultOptions(),
false) false)
assert.Equal(t, test.expect, c.State(), "collection state") assert.Equal(t, test.expect, c.State(), "collection state")
assert.Equal(t, test.curr, c.fullPath, "full path") assert.Equal(t, test.curr, c.fullPath, "full path")

View File

@ -17,6 +17,7 @@ import (
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/services/m365/api" "github.com/alcionai/corso/src/pkg/services/m365/api"
@ -698,7 +699,7 @@ func (suite *ContainerResolverSuite) SetupSuite() {
} }
func (suite *ContainerResolverSuite) TestPopulate() { func (suite *ContainerResolverSuite) TestPopulate() {
ac, err := api.NewClient(suite.credentials) ac, err := api.NewClient(suite.credentials, control.DefaultOptions())
require.NoError(suite.T(), err, clues.ToCore(err)) require.NoError(suite.T(), err, clues.ToCore(err))
eventFunc := func(t *testing.T) graph.ContainerResolver { eventFunc := func(t *testing.T) graph.ContainerResolver {

View File

@ -9,6 +9,7 @@ import (
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/services/m365/api" "github.com/alcionai/corso/src/pkg/services/m365/api"
) )
@ -30,7 +31,7 @@ func newIntegrationTesterSetup(t *testing.T) intgTesterSetup {
its.creds = creds its.creds = creds
its.ac, err = api.NewClient(creds) its.ac, err = api.NewClient(creds, control.DefaultOptions())
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
its.userID = tconfig.GetM365UserID(ctx) its.userID = tconfig.GetM365UserID(ctx)

View File

@ -12,6 +12,7 @@ import (
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/services/m365/api" "github.com/alcionai/corso/src/pkg/services/m365/api"
) )
@ -83,7 +84,7 @@ func (suite *MailFolderCacheIntegrationSuite) TestDeltaFetch() {
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
defer flush() defer flush()
ac, err := api.NewClient(suite.credentials) ac, err := api.NewClient(suite.credentials, control.DefaultOptions())
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
acm := ac.Mail() acm := ac.Mail()

View File

@ -14,6 +14,7 @@ import (
"github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/m365/graph"
"github.com/alcionai/corso/src/internal/m365/support" "github.com/alcionai/corso/src/internal/m365/support"
"github.com/alcionai/corso/src/internal/observe" "github.com/alcionai/corso/src/internal/observe"
"github.com/alcionai/corso/src/internal/operations/inject"
"github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/count" "github.com/alcionai/corso/src/pkg/count"
@ -28,7 +29,7 @@ import (
func ConsumeRestoreCollections( func ConsumeRestoreCollections(
ctx context.Context, ctx context.Context,
ac api.Client, ac api.Client,
restoreCfg control.RestoreConfig, rcc inject.RestoreConsumerConfig,
dcs []data.RestoreCollection, dcs []data.RestoreCollection,
deets *details.Builder, deets *details.Builder,
errs *fault.Bus, errs *fault.Bus,
@ -39,16 +40,13 @@ func ConsumeRestoreCollections(
} }
var ( var (
userID = dcs[0].FullPath().ResourceOwner() resourceID = rcc.ProtectedResource.ID()
directoryCache = make(map[path.CategoryType]graph.ContainerResolver) directoryCache = make(map[path.CategoryType]graph.ContainerResolver)
handlers = restoreHandlers(ac) handlers = restoreHandlers(ac)
metrics support.CollectionMetrics metrics support.CollectionMetrics
el = errs.Local() el = errs.Local()
) )
// FIXME: should be user name
ctx = clues.Add(ctx, "resource_owner", clues.Hide(userID))
for _, dc := range dcs { for _, dc := range dcs {
if el.Failure() != nil { if el.Failure() != nil {
break break
@ -69,7 +67,7 @@ func ConsumeRestoreCollections(
} }
if directoryCache[category] == nil { if directoryCache[category] == nil {
gcr := handler.newContainerCache(userID) gcr := handler.newContainerCache(resourceID)
if err := gcr.Populate(ctx, errs, handler.defaultRootContainer()); err != nil { if err := gcr.Populate(ctx, errs, handler.defaultRootContainer()); err != nil {
return nil, clues.Wrap(err, "populating container cache") return nil, clues.Wrap(err, "populating container cache")
} }
@ -80,8 +78,8 @@ func ConsumeRestoreCollections(
containerID, gcc, err := createDestination( containerID, gcc, err := createDestination(
ictx, ictx,
handler, handler,
handler.formatRestoreDestination(restoreCfg.Location, dc.FullPath()), handler.formatRestoreDestination(rcc.RestoreConfig.Location, dc.FullPath()),
userID, resourceID,
directoryCache[category], directoryCache[category],
errs) errs)
if err != nil { if err != nil {
@ -92,7 +90,7 @@ func ConsumeRestoreCollections(
directoryCache[category] = gcc directoryCache[category] = gcc
ictx = clues.Add(ictx, "restore_destination_id", containerID) ictx = clues.Add(ictx, "restore_destination_id", containerID)
collisionKeyToItemID, err := handler.getItemsInContainerByCollisionKey(ctx, userID, containerID) collisionKeyToItemID, err := handler.getItemsInContainerByCollisionKey(ctx, resourceID, containerID)
if err != nil { if err != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "building item collision cache")) el.AddRecoverable(ctx, clues.Wrap(err, "building item collision cache"))
continue continue
@ -102,10 +100,10 @@ func ConsumeRestoreCollections(
ictx, ictx,
handler, handler,
dc, dc,
userID, resourceID,
containerID, containerID,
collisionKeyToItemID, collisionKeyToItemID,
restoreCfg.OnCollision, rcc.RestoreConfig.OnCollision,
deets, deets,
errs, errs,
ctr) ctr)
@ -126,7 +124,7 @@ func ConsumeRestoreCollections(
support.Restore, support.Restore,
len(dcs), len(dcs),
metrics, metrics,
restoreCfg.Location) rcc.RestoreConfig.Location)
return status, el.Failure() return status, el.Failure()
} }
@ -136,7 +134,7 @@ func restoreCollection(
ctx context.Context, ctx context.Context,
ir itemRestorer, ir itemRestorer,
dc data.RestoreCollection, dc data.RestoreCollection,
userID, destinationID string, resourceID, destinationID string,
collisionKeyToItemID map[string]string, collisionKeyToItemID map[string]string,
collisionPolicy control.CollisionPolicy, collisionPolicy control.CollisionPolicy,
deets *details.Builder, deets *details.Builder,
@ -187,7 +185,7 @@ func restoreCollection(
info, err := ir.restore( info, err := ir.restore(
ictx, ictx,
body, body,
userID, resourceID,
destinationID, destinationID,
collisionKeyToItemID, collisionKeyToItemID,
collisionPolicy, collisionPolicy,
@ -240,7 +238,7 @@ func createDestination(
ctx context.Context, ctx context.Context,
ca containerAPI, ca containerAPI,
destination *path.Builder, destination *path.Builder,
userID string, resourceID string,
gcr graph.ContainerResolver, gcr graph.ContainerResolver,
errs *fault.Bus, errs *fault.Bus,
) (string, graph.ContainerResolver, error) { ) (string, graph.ContainerResolver, error) {
@ -264,7 +262,7 @@ func createDestination(
ca, ca,
cache, cache,
restoreLoc, restoreLoc,
userID, resourceID,
containerParentID, containerParentID,
container, container,
errs) errs)
@ -285,7 +283,7 @@ func getOrPopulateContainer(
ca containerAPI, ca containerAPI,
gcr graph.ContainerResolver, gcr graph.ContainerResolver,
restoreLoc *path.Builder, restoreLoc *path.Builder,
userID, containerParentID, containerName string, resourceID, containerParentID, containerName string,
errs *fault.Bus, errs *fault.Bus,
) (string, error) { ) (string, error) {
cached, ok := gcr.LocationInCache(restoreLoc.String()) cached, ok := gcr.LocationInCache(restoreLoc.String())
@ -293,7 +291,7 @@ func getOrPopulateContainer(
return cached, nil return cached, nil
} }
c, err := ca.CreateContainer(ctx, userID, containerParentID, containerName) c, err := ca.CreateContainer(ctx, resourceID, containerParentID, containerName)
// 409 handling case: // 409 handling case:
// attempt to fetch the container by name and add that result to the cache. // attempt to fetch the container by name and add that result to the cache.
@ -301,7 +299,7 @@ func getOrPopulateContainer(
// sometimes the backend will create the folder despite the 5xx response, // sometimes the backend will create the folder despite the 5xx response,
// leaving our local containerResolver with inconsistent state. // leaving our local containerResolver with inconsistent state.
if graph.IsErrFolderExists(err) { if graph.IsErrFolderExists(err) {
cc, e := ca.GetContainerByName(ctx, userID, containerParentID, containerName) cc, e := ca.GetContainerByName(ctx, resourceID, containerParentID, containerName)
if e != nil { if e != nil {
err = clues.Stack(err, e) err = clues.Stack(err, e)
} else { } else {
@ -327,7 +325,7 @@ func uploadAttachments(
ctx context.Context, ctx context.Context,
ap attachmentPoster, ap attachmentPoster,
as []models.Attachmentable, as []models.Attachmentable,
userID, destinationID, itemID string, resourceID, destinationID, itemID string,
errs *fault.Bus, errs *fault.Bus,
) error { ) error {
el := errs.Local() el := errs.Local()
@ -340,7 +338,7 @@ func uploadAttachments(
err := uploadAttachment( err := uploadAttachment(
ctx, ctx,
ap, ap,
userID, resourceID,
destinationID, destinationID,
itemID, itemID,
a) a)

View File

@ -44,7 +44,7 @@ func (suite *RestoreIntgSuite) SetupSuite() {
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
suite.credentials = m365 suite.credentials = m365
suite.ac, err = api.NewClient(m365) suite.ac, err = api.NewClient(m365, control.DefaultOptions())
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
} }

View File

@ -0,0 +1,62 @@
package m365
import (
"context"
"github.com/alcionai/clues"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/diagnostics"
"github.com/alcionai/corso/src/internal/m365/graph"
"github.com/alcionai/corso/src/internal/m365/onedrive"
"github.com/alcionai/corso/src/internal/m365/support"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/export"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/selectors"
)
// ProduceExportCollections exports data from the specified collections
func (ctrl *Controller) ProduceExportCollections(
ctx context.Context,
backupVersion int,
sels selectors.Selector,
exportCfg control.ExportConfig,
opts control.Options,
dcs []data.RestoreCollection,
errs *fault.Bus,
) ([]export.Collection, error) {
ctx, end := diagnostics.Span(ctx, "m365:export")
defer end()
ctx = graph.BindRateLimiterConfig(ctx, graph.LimiterCfg{Service: sels.PathService()})
ctx = clues.Add(ctx, "export_config", exportCfg) // TODO(meain): needs PII control
var (
expCollections []export.Collection
status *support.ControllerOperationStatus
deets = &details.Builder{}
err error
)
switch sels.Service {
case selectors.ServiceOneDrive, selectors.ServiceSharePoint:
// OneDrive and SharePoint can share the code to create collections
expCollections, err = onedrive.ProduceExportCollections(
ctx,
backupVersion,
exportCfg,
opts,
dcs,
deets,
errs)
default:
err = clues.Wrap(clues.New(sels.Service.String()), "service not supported")
}
ctrl.incrementAwaitingMessages()
ctrl.UpdateStatus(status)
return expCollections, err
}

View File

@ -271,7 +271,9 @@ func Wrap(ctx context.Context, e error, msg string) *clues.Err {
e = clues.Stack(e, clues.New(mainMsg)) e = clues.Stack(e, clues.New(mainMsg))
} }
return setLabels(clues.Wrap(e, msg).WithClues(ctx).With(data...), innerMsg) ce := clues.Wrap(e, msg).WithClues(ctx).With(data...).WithTrace(1)
return setLabels(ce, innerMsg)
} }
// Stack is a helper function that extracts ODataError metadata from // Stack is a helper function that extracts ODataError metadata from
@ -292,7 +294,9 @@ func Stack(ctx context.Context, e error) *clues.Err {
e = clues.Stack(e, clues.New(mainMsg)) e = clues.Stack(e, clues.New(mainMsg))
} }
return setLabels(clues.Stack(e).WithClues(ctx).With(data...), innerMsg) ce := clues.Stack(e).WithClues(ctx).With(data...).WithTrace(1)
return setLabels(ce, innerMsg)
} }
// stackReq is a helper function that extracts ODataError metadata from // stackReq is a helper function that extracts ODataError metadata from
@ -361,7 +365,7 @@ func errData(err odataerrors.ODataErrorable) (string, []any, string) {
msgConcat += ptr.Val(d.GetMessage()) msgConcat += ptr.Val(d.GetMessage())
} }
inner := mainErr.GetInnererror() inner := mainErr.GetInnerError()
if inner != nil { if inner != nil {
data = appendIf(data, "odataerror_inner_cli_req_id", inner.GetClientRequestId()) data = appendIf(data, "odataerror_inner_cli_req_id", inner.GetClientRequestId())
data = appendIf(data, "odataerror_inner_req_id", inner.GetRequestId()) data = appendIf(data, "odataerror_inner_req_id", inner.GetRequestId())

View File

@ -93,7 +93,7 @@ func (suite *HTTPWrapperUnitSuite) TestNewHTTPWrapper_redirectMiddleware() {
hdr.Set("Location", "localhost:99999999/smarfs") hdr.Set("Location", "localhost:99999999/smarfs")
toResp := &http.Response{ toResp := &http.Response{
StatusCode: 302, StatusCode: http.StatusFound,
Header: hdr, Header: hdr,
} }

View File

@ -796,8 +796,8 @@ func compareDriveItem(
assert.Equal(t, expectedMeta.FileName, itemMeta.FileName) assert.Equal(t, expectedMeta.FileName, itemMeta.FileName)
} }
if !mci.Opts.RestorePermissions { if !mci.RestoreCfg.IncludePermissions {
assert.Equal(t, 0, len(itemMeta.Permissions)) assert.Empty(t, itemMeta.Permissions, "no permissions should be included in restore")
return true return true
} }

View File

@ -10,6 +10,7 @@ import (
"github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/count" "github.com/alcionai/corso/src/pkg/count"
"github.com/alcionai/corso/src/pkg/export"
"github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/selectors"
@ -26,6 +27,10 @@ type Controller struct {
Err error Err error
Stats data.CollectionStats Stats data.CollectionStats
ProtectedResourceID string
ProtectedResourceName string
ProtectedResourceErr error
} }
func (ctrl Controller) ProduceBackupCollections( func (ctrl Controller) ProduceBackupCollections(
@ -59,13 +64,34 @@ func (ctrl Controller) Wait() *data.CollectionStats {
func (ctrl Controller) ConsumeRestoreCollections( func (ctrl Controller) ConsumeRestoreCollections(
_ context.Context, _ context.Context,
_ int, _ inject.RestoreConsumerConfig,
_ selectors.Selector,
_ control.RestoreConfig,
_ control.Options,
_ []data.RestoreCollection, _ []data.RestoreCollection,
_ *fault.Bus, _ *fault.Bus,
_ *count.Bus, _ *count.Bus,
) (*details.Details, error) { ) (*details.Details, error) {
return ctrl.Deets, ctrl.Err return ctrl.Deets, ctrl.Err
} }
func (ctrl Controller) CacheItemInfo(dii details.ItemInfo) {}
func (ctrl Controller) ProduceExportCollections(
_ context.Context,
_ int,
_ selectors.Selector,
_ control.ExportConfig,
_ control.Options,
_ []data.RestoreCollection,
_ *fault.Bus,
) ([]export.Collection, error) {
return nil, ctrl.Err
}
func (ctrl Controller) PopulateProtectedResourceIDAndName(
ctx context.Context,
protectedResource string, // input value, can be either id or name
ins idname.Cacher,
) (string, string, error) {
return ctrl.ProtectedResourceID,
ctrl.ProtectedResourceName,
ctrl.ProtectedResourceErr
}

View File

@ -945,7 +945,7 @@ func (suite *CollectionUnitTestSuite) TestItemExtensions() {
nil, nil,
} }
opts := control.Defaults() opts := control.DefaultOptions()
opts.ItemExtensionFactory = append( opts.ItemExtensionFactory = append(
opts.ItemExtensionFactory, opts.ItemExtensionFactory,
test.factories...) test.factories...)

View File

@ -0,0 +1,166 @@
package onedrive
import (
"context"
"strings"
"github.com/alcionai/clues"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/m365/onedrive/metadata"
"github.com/alcionai/corso/src/internal/version"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/export"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/path"
)
var _ export.Collection = &exportCollection{}
// exportCollection is the implementation of export.ExportCollection for OneDrive
type exportCollection struct {
// baseDir contains the path of the collection
baseDir string
// backingCollection is the restore collection from which we will
// create the export collection.
backingCollection data.RestoreCollection
// backupVersion is the backupVersion of the backup this collection was part
// of. This is required to figure out how to get the name of the
// item.
backupVersion int
}
func (ec exportCollection) BasePath() string {
return ec.baseDir
}
func (ec exportCollection) Items(ctx context.Context) <-chan export.Item {
ch := make(chan export.Item)
go items(ctx, ec, ch)
return ch
}
// items converts items in backing collection to export items
func items(ctx context.Context, ec exportCollection, ch chan<- export.Item) {
defer close(ch)
errs := fault.New(false)
// There will only be a single item in the backingCollections
// for OneDrive
for item := range ec.backingCollection.Items(ctx, errs) {
itemUUID := item.UUID()
if isMetadataFile(itemUUID, ec.backupVersion) {
continue
}
name, err := getItemName(ctx, itemUUID, ec.backupVersion, ec.backingCollection)
ch <- export.Item{
ID: itemUUID,
Data: export.ItemData{
Name: name,
Body: item.ToReader(),
},
Error: err,
}
}
eitems, erecovereable := errs.ItemsAndRecovered()
// Return all the items that we failed to get from kopia at the end
for _, err := range eitems {
ch <- export.Item{
ID: err.ID,
Error: &err,
}
}
for _, ec := range erecovereable {
ch <- export.Item{
Error: ec,
}
}
}
// isMetadataFile is used to determine if a path corresponds to a
// metadata file. This is OneDrive specific logic and depends on the
// version of the backup unlike metadata.IsMetadataFile which only has
// to be concerned about the current version.
func isMetadataFile(id string, backupVersion int) bool {
if backupVersion < version.OneDrive1DataAndMetaFiles {
return false
}
return strings.HasSuffix(id, metadata.MetaFileSuffix) ||
strings.HasSuffix(id, metadata.DirMetaFileSuffix)
}
// getItemName is used to get the name of the item.
// How we get the name depends on the version of the backup.
func getItemName(
ctx context.Context,
id string,
backupVersion int,
fin data.FetchItemByNamer,
) (string, error) {
if backupVersion < version.OneDrive1DataAndMetaFiles {
return id, nil
}
if backupVersion < version.OneDrive5DirMetaNoName {
return strings.TrimSuffix(id, metadata.DataFileSuffix), nil
}
if strings.HasSuffix(id, metadata.DataFileSuffix) {
trimmedName := strings.TrimSuffix(id, metadata.DataFileSuffix)
metaName := trimmedName + metadata.MetaFileSuffix
meta, err := fetchAndReadMetadata(ctx, fin, metaName)
if err != nil {
return "", clues.Wrap(err, "getting metadata").WithClues(ctx)
}
return meta.FileName, nil
}
return "", clues.New("invalid item id").WithClues(ctx)
}
// ProduceExportCollections will create the export collections for the
// given restore collections.
func ProduceExportCollections(
ctx context.Context,
backupVersion int,
exportCfg control.ExportConfig,
opts control.Options,
dcs []data.RestoreCollection,
deets *details.Builder,
errs *fault.Bus,
) ([]export.Collection, error) {
var (
el = errs.Local()
ec = make([]export.Collection, 0, len(dcs))
)
for _, dc := range dcs {
drivePath, err := path.ToDrivePath(dc.FullPath())
if err != nil {
return nil, clues.Wrap(err, "transforming path to drive path").WithClues(ctx)
}
baseDir := path.Builder{}.Append(drivePath.Folders...)
ec = append(ec, exportCollection{
baseDir: baseDir.String(),
backingCollection: dc,
backupVersion: backupVersion,
})
}
return ec, el.Failure()
}

View File

@ -0,0 +1,463 @@
package onedrive
import (
"bytes"
"context"
"io"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/data"
odConsts "github.com/alcionai/corso/src/internal/m365/onedrive/consts"
"github.com/alcionai/corso/src/internal/m365/onedrive/metadata"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/version"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/export"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/path"
)
type ExportUnitSuite struct {
tester.Suite
}
func TestExportUnitSuite(t *testing.T) {
suite.Run(t, &ExportUnitSuite{Suite: tester.NewUnitSuite(t)})
}
func (suite *ExportUnitSuite) TestIsMetadataFile() {
table := []struct {
name string
id string
backupVersion int
isMeta bool
}{
{
name: "legacy",
backupVersion: version.OneDrive1DataAndMetaFiles,
isMeta: false,
},
{
name: "metadata file",
backupVersion: version.OneDrive3IsMetaMarker,
id: "name" + metadata.MetaFileSuffix,
isMeta: true,
},
{
name: "dir metadata file",
backupVersion: version.OneDrive3IsMetaMarker,
id: "name" + metadata.DirMetaFileSuffix,
isMeta: true,
},
{
name: "non metadata file",
backupVersion: version.OneDrive3IsMetaMarker,
id: "name" + metadata.DataFileSuffix,
isMeta: false,
},
}
for _, test := range table {
suite.Run(test.name, func() {
assert.Equal(suite.T(), test.isMeta, isMetadataFile(test.id, test.backupVersion), "is metadata")
})
}
}
type metadataStream struct {
id string
name string
}
func (ms metadataStream) ToReader() io.ReadCloser {
return io.NopCloser(bytes.NewBufferString(`{"filename": "` + ms.name + `"}`))
}
func (ms metadataStream) UUID() string { return ms.id }
func (ms metadataStream) Deleted() bool { return false }
type finD struct {
id string
name string
err error
}
func (fd finD) FetchItemByName(ctx context.Context, name string) (data.Stream, error) {
if fd.err != nil {
return nil, fd.err
}
if name == fd.id {
return metadataStream{id: fd.id, name: fd.name}, nil
}
return nil, assert.AnError
}
func (suite *ExportUnitSuite) TestGetItemName() {
table := []struct {
tname string
id string
backupVersion int
name string
fin data.FetchItemByNamer
errFunc assert.ErrorAssertionFunc
}{
{
tname: "legacy",
id: "name",
backupVersion: version.OneDrive1DataAndMetaFiles,
name: "name",
errFunc: assert.NoError,
},
{
tname: "name in filename",
id: "name.data",
backupVersion: version.OneDrive4DirIncludesPermissions,
name: "name",
errFunc: assert.NoError,
},
{
tname: "name in metadata",
id: "id.data",
backupVersion: version.Backup,
name: "name",
fin: finD{id: "id.meta", name: "name"},
errFunc: assert.NoError,
},
{
tname: "name in metadata but error",
id: "id.data",
backupVersion: version.Backup,
name: "",
fin: finD{err: assert.AnError},
errFunc: assert.Error,
},
}
for _, test := range table {
suite.Run(test.tname, func() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
name, err := getItemName(
ctx,
test.id,
test.backupVersion,
test.fin,
)
test.errFunc(t, err)
assert.Equal(t, test.name, name, "name")
})
}
}
type mockRestoreCollection struct {
path path.Path
items []mockDataStream
}
func (rc mockRestoreCollection) Items(ctx context.Context, errs *fault.Bus) <-chan data.Stream {
ch := make(chan data.Stream)
go func() {
defer close(ch)
el := errs.Local()
for _, item := range rc.items {
if item.err != nil {
el.AddRecoverable(ctx, item.err)
continue
}
ch <- item
}
}()
return ch
}
func (rc mockRestoreCollection) FullPath() path.Path {
return rc.path
}
type mockDataStream struct {
id string
data string
err error
}
func (ms mockDataStream) ToReader() io.ReadCloser {
if ms.data != "" {
return io.NopCloser(bytes.NewBufferString(ms.data))
}
return nil
}
func (ms mockDataStream) UUID() string { return ms.id }
func (ms mockDataStream) Deleted() bool { return false }
func (suite *ExportUnitSuite) TestGetItems() {
table := []struct {
name string
version int
backingCollection data.RestoreCollection
expectedItems []export.Item
}{
{
name: "single item",
version: 1,
backingCollection: data.NoFetchRestoreCollection{
Collection: mockRestoreCollection{
items: []mockDataStream{
{id: "name1", data: "body1"},
},
},
},
expectedItems: []export.Item{
{
ID: "name1",
Data: export.ItemData{
Name: "name1",
Body: io.NopCloser((bytes.NewBufferString("body1"))),
},
},
},
},
{
name: "multiple items",
version: 1,
backingCollection: data.NoFetchRestoreCollection{
Collection: mockRestoreCollection{
items: []mockDataStream{
{id: "name1", data: "body1"},
{id: "name2", data: "body2"},
},
},
},
expectedItems: []export.Item{
{
ID: "name1",
Data: export.ItemData{
Name: "name1",
Body: io.NopCloser((bytes.NewBufferString("body1"))),
},
},
{
ID: "name2",
Data: export.ItemData{
Name: "name2",
Body: io.NopCloser((bytes.NewBufferString("body2"))),
},
},
},
},
{
name: "single item with data suffix",
version: 2,
backingCollection: data.NoFetchRestoreCollection{
Collection: mockRestoreCollection{
items: []mockDataStream{
{id: "name1.data", data: "body1"},
},
},
},
expectedItems: []export.Item{
{
ID: "name1.data",
Data: export.ItemData{
Name: "name1",
Body: io.NopCloser((bytes.NewBufferString("body1"))),
},
},
},
},
{
name: "single item name from metadata",
version: version.Backup,
backingCollection: data.FetchRestoreCollection{
Collection: mockRestoreCollection{
items: []mockDataStream{
{id: "id1.data", data: "body1"},
},
},
FetchItemByNamer: finD{id: "id1.meta", name: "name1"},
},
expectedItems: []export.Item{
{
ID: "id1.data",
Data: export.ItemData{
Name: "name1",
Body: io.NopCloser((bytes.NewBufferString("body1"))),
},
},
},
},
{
name: "single item name from metadata with error",
version: version.Backup,
backingCollection: data.FetchRestoreCollection{
Collection: mockRestoreCollection{
items: []mockDataStream{
{id: "id1.data"},
},
},
FetchItemByNamer: finD{err: assert.AnError},
},
expectedItems: []export.Item{
{
ID: "id1.data",
Error: assert.AnError,
},
},
},
{
name: "items with success and metadata read error",
version: version.Backup,
backingCollection: data.FetchRestoreCollection{
Collection: mockRestoreCollection{
items: []mockDataStream{
{id: "missing.data"},
{id: "id1.data", data: "body1"},
},
},
FetchItemByNamer: finD{id: "id1.meta", name: "name1"},
},
expectedItems: []export.Item{
{
ID: "missing.data",
Error: assert.AnError,
},
{
ID: "id1.data",
Data: export.ItemData{
Name: "name1",
Body: io.NopCloser(bytes.NewBufferString("body1")),
},
},
},
},
{
name: "items with success and fetch error",
version: version.OneDrive1DataAndMetaFiles,
backingCollection: data.FetchRestoreCollection{
Collection: mockRestoreCollection{
items: []mockDataStream{
{id: "name0", data: "body0"},
{id: "name1", err: assert.AnError},
{id: "name2", data: "body2"},
},
},
},
expectedItems: []export.Item{
{
ID: "name0",
Data: export.ItemData{
Name: "name0",
Body: io.NopCloser(bytes.NewBufferString("body0")),
},
},
{
ID: "name2",
Data: export.ItemData{
Name: "name2",
Body: io.NopCloser(bytes.NewBufferString("body2")),
},
},
{
ID: "",
Error: assert.AnError,
},
},
},
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
ec := exportCollection{
baseDir: "",
backingCollection: test.backingCollection,
backupVersion: test.version,
}
items := ec.Items(ctx)
fitems := []export.Item{}
for item := range items {
fitems = append(fitems, item)
}
assert.Len(t, fitems, len(test.expectedItems), "num of items")
// We do not have any grantees about the ordering of the
// items in the SDK, but leaving the test this way for now
// to simplify testing.
for i, item := range fitems {
assert.Equal(t, test.expectedItems[i].ID, item.ID, "id")
assert.Equal(t, test.expectedItems[i].Data.Name, item.Data.Name, "name")
assert.Equal(t, test.expectedItems[i].Data.Body, item.Data.Body, "body")
assert.ErrorIs(t, item.Error, test.expectedItems[i].Error)
}
})
}
}
func (suite *ExportUnitSuite) TestExportRestoreCollections() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
dpb := odConsts.DriveFolderPrefixBuilder("driveID1")
p, err := dpb.ToDataLayerOneDrivePath("t", "u", false)
assert.NoError(t, err, "build path")
dcs := []data.RestoreCollection{
data.FetchRestoreCollection{
Collection: mockRestoreCollection{
path: p,
items: []mockDataStream{
{id: "id1.data", data: "body1"},
},
},
FetchItemByNamer: finD{id: "id1.meta", name: "name1"},
},
}
expectedItems := []export.Item{
{
ID: "id1.data",
Data: export.ItemData{
Name: "name1",
Body: io.NopCloser((bytes.NewBufferString("body1"))),
},
},
}
exportCfg := control.ExportConfig{}
ecs, err := ProduceExportCollections(ctx, int(version.Backup), exportCfg, control.Options{}, dcs, nil, fault.New(true))
assert.NoError(t, err, "export collections error")
assert.Len(t, ecs, 1, "num of collections")
items := ecs[0].Items(ctx)
fitems := []export.Item{}
for item := range items {
fitems = append(fitems, item)
}
assert.Equal(t, expectedItems, fitems, "items")
}

Some files were not shown because too many files have changed in this diff Show More