Merge branch 'kiota-bump-jul21' of https://github.com/alcionai/corso into teamsDiscovery
This commit is contained in:
commit
b2f0870c27
13
.github/actions/purge-m365-data/action.yml
vendored
13
.github/actions/purge-m365-data/action.yml
vendored
@ -19,7 +19,9 @@ inputs:
|
||||
site:
|
||||
description: Sharepoint site where data is to be purged.
|
||||
libraries:
|
||||
description: List of library names within site where data is to be purged.
|
||||
description: List of library names within the site where data is to be purged.
|
||||
library-prefix:
|
||||
description: List of library names within the site where the library will get deleted entirely.
|
||||
folder-prefix:
|
||||
description: Name of the folder to be purged. If falsy, will purge the set of static, well known folders instead.
|
||||
older-than:
|
||||
@ -51,8 +53,7 @@ runs:
|
||||
AZURE_CLIENT_ID: ${{ inputs.azure-client-id }}
|
||||
AZURE_CLIENT_SECRET: ${{ inputs.azure-client-secret }}
|
||||
AZURE_TENANT_ID: ${{ inputs.azure-tenant-id }}
|
||||
run: |
|
||||
./exchangePurge.ps1 -User ${{ inputs.user }} -FolderNamePurgeList PersonMetadata -FolderPrefixPurgeList "${{ inputs.folder-prefix }}".Split(",") -PurgeBeforeTimestamp ${{ inputs.older-than }}
|
||||
run: ./exchangePurge.ps1 -User ${{ inputs.user }} -FolderNamePurgeList PersonMetadata -FolderPrefixPurgeList "${{ inputs.folder-prefix }}".Split(",") -PurgeBeforeTimestamp ${{ inputs.older-than }}
|
||||
|
||||
- name: Reset retention for all mailboxes to 0
|
||||
if: ${{ inputs.user == '' }}
|
||||
@ -61,8 +62,7 @@ runs:
|
||||
env:
|
||||
M365_TENANT_ADMIN_USER: ${{ inputs.m365-admin-user }}
|
||||
M365_TENANT_ADMIN_PASSWORD: ${{ inputs.m365-admin-password }}
|
||||
run: |
|
||||
./exchangeRetention.ps1
|
||||
run: ./exchangeRetention.ps1
|
||||
|
||||
################################################################################################################
|
||||
# OneDrive
|
||||
@ -89,5 +89,4 @@ runs:
|
||||
env:
|
||||
M365_TENANT_ADMIN_USER: ${{ inputs.m365-admin-user }}
|
||||
M365_TENANT_ADMIN_PASSWORD: ${{ inputs.m365-admin-password }}
|
||||
run: |
|
||||
./onedrivePurge.ps1 -Site ${{ inputs.site }} -LibraryNameList "${{ inputs.libraries }}".split(",") -FolderPrefixPurgeList ${{ inputs.folder-prefix }} -PurgeBeforeTimestamp ${{ inputs.older-than }}
|
||||
run: ./onedrivePurge.ps1 -Site ${{ inputs.site }} -LibraryNameList "${{ inputs.libraries }}".split(",") -FolderPrefixPurgeList ${{ inputs.folder-prefix }} -LibraryPrefixDeleteList ${{ inputs.library-prefix }} -PurgeBeforeTimestamp ${{ inputs.older-than }}
|
||||
|
||||
15
.github/workflows/ci_test_cleanup.yml
vendored
15
.github/workflows/ci_test_cleanup.yml
vendored
@ -12,7 +12,7 @@ jobs:
|
||||
continue-on-error: true
|
||||
strategy:
|
||||
matrix:
|
||||
user: [ CORSO_M365_TEST_USER_ID, CORSO_SECONDARY_M365_TEST_USER_ID, EXT_SDK_TEST_USER_ID, '' ]
|
||||
user: [ CORSO_M365_TEST_USER_ID, CORSO_SECONDARY_M365_TEST_USER_ID, '' ]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@ -22,8 +22,7 @@ jobs:
|
||||
# At 20 minutes ago, we should be safe from conflicts.
|
||||
# The additional 10 minutes is just to be good citizens.
|
||||
- name: Set purge boundary
|
||||
run: |
|
||||
echo "HALF_HOUR_AGO=$(date -d '30 minutes ago' -u +"%Y-%m-%dT%H:%M:%SZ")" >> $GITHUB_ENV
|
||||
run: echo "HALF_HOUR_AGO=$(date -d '30 minutes ago' -u +"%Y-%m-%dT%H:%M:%SZ")" >> $GITHUB_ENV
|
||||
|
||||
- name: Purge CI-Produced Folders for Users
|
||||
uses: ./.github/actions/purge-m365-data
|
||||
@ -43,7 +42,7 @@ jobs:
|
||||
continue-on-error: true
|
||||
strategy:
|
||||
matrix:
|
||||
site: [ CORSO_M365_TEST_SITE_URL, EXT_SDK_TEST_SITE_URL ]
|
||||
site: [ CORSO_M365_TEST_SITE_URL ]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@ -53,15 +52,15 @@ jobs:
|
||||
# At 20 minutes ago, we should be safe from conflicts.
|
||||
# The additional 10 minutes is just to be good citizens.
|
||||
- name: Set purge boundary
|
||||
run: |
|
||||
echo "HALF_HOUR_AGO=$(date -d '30 minutes ago' -u +"%Y-%m-%dT%H:%M:%SZ")" >> $GITHUB_ENV
|
||||
run: echo "HALF_HOUR_AGO=$(date -d '30 minutes ago' -u +"%Y-%m-%dT%H:%M:%SZ")" >> $GITHUB_ENV
|
||||
|
||||
- name: Purge CI-Produced Folders for Sites
|
||||
uses: ./.github/actions/purge-m365-data
|
||||
with:
|
||||
site: ${{ vars[matrix.site] }}
|
||||
folder-prefix: ${{ vars.CORSO_M365_TEST_PREFIXES }}
|
||||
libraries: ${{ vars.CORSO_M365_TEST_SITE_LIBRARIES }}
|
||||
folder-prefix: ${{ vars.CORSO_M365_TEST_PREFIXES }}
|
||||
libraries: ${{ vars.CORSO_M365_TEST_SITE_LIBRARIES }}
|
||||
library-prefix: ${{ vars.CORSO_M365_TEST_PREFIXES }}
|
||||
older-than: ${{ env.HALF_HOUR_AGO }}
|
||||
azure-client-id: ${{ secrets.CLIENT_ID }}
|
||||
azure-client-secret: ${{ secrets.CLIENT_SECRET }}
|
||||
|
||||
@ -7,6 +7,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
## [Unreleased] (beta)
|
||||
|
||||
### Fixed
|
||||
- SharePoint document libraries deleted after the last backup can now be restored.
|
||||
|
||||
## [v0.11.1] (beta) - 2023-07-20
|
||||
|
||||
### Fixed
|
||||
- Allow repo connect to succeed when a `corso.toml` file was not provided but configuration is specified using environment variables and flags.
|
||||
|
||||
@ -21,6 +26,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
### Fixed
|
||||
- Return a ServiceNotEnabled error when a tenant has no active SharePoint license.
|
||||
- Added retries for http/2 stream connection failures when downloading large item content.
|
||||
- SharePoint document libraries that were deleted after the last backup can now be restored.
|
||||
|
||||
### Known issues
|
||||
- If a link share is created for an item with inheritance disabled
|
||||
@ -328,7 +334,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
- Miscellaneous
|
||||
- Optional usage statistics reporting ([RM-35](https://github.com/alcionai/corso-roadmap/issues/35))
|
||||
|
||||
[Unreleased]: https://github.com/alcionai/corso/compare/v0.11.0...HEAD
|
||||
[Unreleased]: https://github.com/alcionai/corso/compare/v0.11.1...HEAD
|
||||
[v0.11.1]: https://github.com/alcionai/corso/compare/v0.11.0...v0.11.1
|
||||
[v0.11.0]: https://github.com/alcionai/corso/compare/v0.10.0...v0.11.0
|
||||
[v0.10.0]: https://github.com/alcionai/corso/compare/v0.9.0...v0.10.0
|
||||
[v0.9.0]: https://github.com/alcionai/corso/compare/v0.8.1...v0.9.0
|
||||
|
||||
@ -19,14 +19,17 @@ Param (
|
||||
[datetime]$PurgeBeforeTimestamp,
|
||||
|
||||
[Parameter(Mandatory = $True, HelpMessage = "Purge folders with this prefix")]
|
||||
[String[]]$FolderPrefixPurgeList
|
||||
[String[]]$FolderPrefixPurgeList,
|
||||
|
||||
[Parameter(Mandatory = $False, HelpMessage = "Delete document libraries with this prefix")]
|
||||
[String[]]$LibraryPrefixDeleteList = @()
|
||||
)
|
||||
|
||||
Set-StrictMode -Version 2.0
|
||||
# Attempt to set network timeout to 10min
|
||||
[System.Net.ServicePointManager]::MaxServicePointIdleTime = 600000
|
||||
|
||||
function Get-TimestampFromName {
|
||||
function Get-TimestampFromFolderName {
|
||||
param (
|
||||
[Parameter(Mandatory = $True, HelpMessage = "Folder ")]
|
||||
[Microsoft.SharePoint.Client.Folder]$folder
|
||||
@ -54,6 +57,36 @@ function Get-TimestampFromName {
|
||||
|
||||
return $timestamp
|
||||
}
|
||||
|
||||
function Get-TimestampFromListName {
|
||||
param (
|
||||
[Parameter(Mandatory = $True, HelpMessage = "List ")]
|
||||
[Microsoft.SharePoint.Client.List]$list
|
||||
)
|
||||
|
||||
$name = $list.Title
|
||||
|
||||
#fallback on list create time
|
||||
[datetime]$timestamp = $list.LastItemUserModifiedDate
|
||||
|
||||
try {
|
||||
# Assumes that the timestamp is at the end and starts with yyyy-mm-ddT and is ISO8601
|
||||
if ($name -imatch "(\d{4}}-\d{2}-\d{2}T.*)") {
|
||||
$timestamp = [System.Convert]::ToDatetime($Matches.0)
|
||||
}
|
||||
|
||||
# Assumes that the timestamp is at the end and starts with dd-MMM-yyyy_HH-MM-SS
|
||||
if ($name -imatch "(\d{2}-[a-zA-Z]{3}-\d{4}_\d{2}-\d{2}-\d{2})") {
|
||||
$timestamp = [datetime]::ParseExact($Matches.0, "dd-MMM-yyyy_HH-mm-ss", [CultureInfo]::InvariantCulture, "AssumeUniversal")
|
||||
}
|
||||
}
|
||||
catch {}
|
||||
|
||||
Write-Verbose "List: $name, create timestamp: $timestamp"
|
||||
|
||||
return $timestamp
|
||||
}
|
||||
|
||||
function Purge-Library {
|
||||
[CmdletBinding(SupportsShouldProcess)]
|
||||
Param (
|
||||
@ -77,7 +110,7 @@ function Purge-Library {
|
||||
|
||||
foreach ($f in $folders) {
|
||||
$folderName = $f.Name
|
||||
$createTime = Get-TimestampFromName -Folder $f
|
||||
$createTime = Get-TimestampFromFolderName -Folder $f
|
||||
|
||||
if ($PurgeBeforeTimestamp -gt $createTime) {
|
||||
foreach ($p in $FolderPrefixPurgeList) {
|
||||
@ -97,7 +130,7 @@ function Purge-Library {
|
||||
if ($f.ServerRelativeUrl -imatch "$SiteSuffix/{0,1}(.+?)/{0,1}$folderName$") {
|
||||
$siteRelativeParentPath = $Matches.1
|
||||
}
|
||||
|
||||
|
||||
if ($PSCmdlet.ShouldProcess("Name: " + $f.Name + " Parent: " + $siteRelativeParentPath, "Remove folder")) {
|
||||
Write-Host "Deleting folder: "$f.Name" with parent: $siteRelativeParentPath"
|
||||
try {
|
||||
@ -110,6 +143,54 @@ function Purge-Library {
|
||||
}
|
||||
}
|
||||
|
||||
function Delete-LibraryByPrefix {
|
||||
[CmdletBinding(SupportsShouldProcess)]
|
||||
Param (
|
||||
[Parameter(Mandatory = $True, HelpMessage = "Document library root")]
|
||||
[String]$LibraryNamePrefix,
|
||||
|
||||
[Parameter(Mandatory = $True, HelpMessage = "Purge folders before this date time (UTC)")]
|
||||
[datetime]$PurgeBeforeTimestamp,
|
||||
|
||||
[Parameter(Mandatory = $True, HelpMessage = "Site suffix")]
|
||||
[String[]]$SiteSuffix
|
||||
)
|
||||
|
||||
Write-Host "`nDeleting library: $LibraryNamePrefix"
|
||||
|
||||
$listsToDelete = @()
|
||||
$lists = Get-PnPList
|
||||
|
||||
foreach ($l in $lists) {
|
||||
$listName = $l.Title
|
||||
$createTime = Get-TimestampFromListName -List $l
|
||||
|
||||
if ($PurgeBeforeTimestamp -gt $createTime) {
|
||||
foreach ($p in $FolderPrefixPurgeList) {
|
||||
if ($listName -like "$p*") {
|
||||
$listsToDelete += $l
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Write-Host "Found"$listsToDelete.count"lists to delete"
|
||||
|
||||
foreach ($l in $listsToDelete) {
|
||||
$listName = $l.Title
|
||||
|
||||
if ($PSCmdlet.ShouldProcess("Name: " + $l.Title + "Remove folder")) {
|
||||
Write-Host "Deleting list: "$l.Title
|
||||
try {
|
||||
Remove-PnPList -Identity $l.Id -Force
|
||||
}
|
||||
catch [ System.Management.Automation.ItemNotFoundException ] {
|
||||
Write-Host "List: "$f.Name" is already deleted. Skipping..."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
######## MAIN #########
|
||||
|
||||
# Setup SharePointPnP
|
||||
@ -176,4 +257,8 @@ $FolderPrefixPurgeList = $FolderPrefixPurgeList | ForEach-Object { @($_.Split(',
|
||||
|
||||
foreach ($library in $LibraryNameList) {
|
||||
Purge-Library -LibraryName $library -PurgeBeforeTimestamp $PurgeBeforeTimestamp -FolderPrefixPurgeList $FolderPrefixPurgeList -SiteSuffix $siteSuffix
|
||||
}
|
||||
}
|
||||
|
||||
foreach ($libraryPfx in $LibraryPrefixDeleteList) {
|
||||
Delete-LibraryByPrefix -LibraryNamePrefix $libraryPfx -PurgeBeforeTimestamp $PurgeBeforeTimestamp -SiteSuffix $siteSuffix
|
||||
}
|
||||
|
||||
15
src/go.mod
15
src/go.mod
@ -5,21 +5,21 @@ go 1.20
|
||||
replace github.com/kopia/kopia => github.com/alcionai/kopia v0.12.2-0.20230713235606-4c85869e9377
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0
|
||||
github.com/alcionai/clues v0.0.0-20230630194723-e24d7940e07a
|
||||
github.com/armon/go-metrics v0.4.1
|
||||
github.com/aws/aws-sdk-go v1.44.302
|
||||
github.com/aws/aws-sdk-go v1.44.305
|
||||
github.com/aws/aws-xray-sdk-go v1.8.1
|
||||
github.com/cenkalti/backoff/v4 v4.2.1
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/h2non/gock v1.2.0
|
||||
github.com/kopia/kopia v0.12.2-0.20230327171220-747baeebdab1
|
||||
github.com/microsoft/kiota-abstractions-go v1.0.0
|
||||
github.com/microsoft/kiota-abstractions-go v1.1.0
|
||||
github.com/microsoft/kiota-authentication-azure-go v1.0.0
|
||||
github.com/microsoft/kiota-http-go v1.0.0
|
||||
github.com/microsoft/kiota-serialization-form-go v1.0.0
|
||||
github.com/microsoft/kiota-serialization-json-go v1.0.2
|
||||
github.com/microsoftgraph/msgraph-sdk-go v1.4.0
|
||||
github.com/microsoft/kiota-serialization-json-go v1.0.4
|
||||
github.com/microsoftgraph/msgraph-sdk-go v1.12.0
|
||||
github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/puzpuzpuz/xsync/v2 v2.4.1
|
||||
@ -42,7 +42,6 @@ require (
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
|
||||
github.com/andybalholm/brotli v1.0.4 // indirect
|
||||
github.com/dnaeon/go-vcr v1.2.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 // indirect
|
||||
@ -62,9 +61,9 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
|
||||
30
src/go.sum
30
src/go.sum
@ -36,14 +36,14 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1 h1:SEy2xmstIphdPwNBUi7uhvjyjhVKISfwjfOJmuy7kg4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 h1:t/W5MYAuQy81cvM8VUNfRLzhtKpXhVUAN7Cd7KVbTyc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0/go.mod h1:NBanQUfSWiWn3QEpWDTCU0IjBECKOYvl2R8xdRtMtiM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 h1:8q4SaHjFsClSvuVne0ID/5Ka8u3fcIHyqkLjcFpNRHQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 h1:VgSJlZH5u0k2qxSpqyghcFQKmvYckj46uymKK5XzkBM=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0/go.mod h1:BDJ5qMFKx9DugEg3+uQSDCdbYPr5s9vBTrL9P8TpqOU=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DATA-DOG/go-sqlmock v1.4.1 h1:ThlnYciV1iM/V0OSF/dtkqWb6xo5qITT1TJBG1MRDJM=
|
||||
@ -66,8 +66,8 @@ github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY
|
||||
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||
github.com/aws/aws-sdk-go v1.44.302 h1:ST3ko6GrJKn3Xi+nAvxjG3uk/V1pW8KC52WLeIxqqNk=
|
||||
github.com/aws/aws-sdk-go v1.44.302/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go v1.44.305 h1:fU/5lY3WyBjGU9fkmQYd8o4fZu+2RaOv/i+sPaJVvFg=
|
||||
github.com/aws/aws-sdk-go v1.44.305/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo=
|
||||
github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
@ -102,7 +102,6 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ=
|
||||
@ -272,20 +271,20 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI=
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||
github.com/microsoft/kiota-abstractions-go v1.0.0 h1:teQS3yOmcTyps+O48AD17LI8TR1B3wCEwGFcwC6K75c=
|
||||
github.com/microsoft/kiota-abstractions-go v1.0.0/go.mod h1:2yaRQnx2KU7UaenYSApiTT4pf7fFkPV0B71Rm2uYynQ=
|
||||
github.com/microsoft/kiota-abstractions-go v1.1.0 h1:X1aKlsYCRs/0RSChr/fbq4j/+kxRzbSY5GeWhtHQNYI=
|
||||
github.com/microsoft/kiota-abstractions-go v1.1.0/go.mod h1:RkxyZ5x87Njik7iVeQY9M2wtrrL1MJZcXiI/BxD/82g=
|
||||
github.com/microsoft/kiota-authentication-azure-go v1.0.0 h1:29FNZZ/4nnCOwFcGWlB/sxPvWz487HA2bXH8jR5k2Rk=
|
||||
github.com/microsoft/kiota-authentication-azure-go v1.0.0/go.mod h1:rnx3PRlkGdXDcA/0lZQTbBwyYGmc+3POt7HpE/e4jGw=
|
||||
github.com/microsoft/kiota-http-go v1.0.0 h1:F1hd6gMlLeEgH2CkRB7z13ow7LxMKMWEmms/t0VfS+k=
|
||||
github.com/microsoft/kiota-http-go v1.0.0/go.mod h1:eujxJliqodotsYepIc6ihhK+vXMMt5Q8YiSNL7+7M7U=
|
||||
github.com/microsoft/kiota-serialization-form-go v1.0.0 h1:UNdrkMnLFqUCccQZerKjblsyVgifS11b3WCx+eFEsAI=
|
||||
github.com/microsoft/kiota-serialization-form-go v1.0.0/go.mod h1:h4mQOO6KVTNciMF6azi1J9QB19ujSw3ULKcSNyXXOMA=
|
||||
github.com/microsoft/kiota-serialization-json-go v1.0.2 h1:RXan8v7yWBD88XxVZ2W38BBcqu2UqWtgS54nCbOS5ow=
|
||||
github.com/microsoft/kiota-serialization-json-go v1.0.2/go.mod h1:AUItT9exyxmjZQE8IeFD9ygP77q9GKVb+AQE2V5Ikho=
|
||||
github.com/microsoft/kiota-serialization-json-go v1.0.4 h1:5TaISWwd2Me8clrK7SqNATo0tv9seOq59y4I5953egQ=
|
||||
github.com/microsoft/kiota-serialization-json-go v1.0.4/go.mod h1:rM4+FsAY+9AEpBsBzkFFis+b/LZLlNKKewuLwK9Q6Mg=
|
||||
github.com/microsoft/kiota-serialization-text-go v1.0.0 h1:XOaRhAXy+g8ZVpcq7x7a0jlETWnWrEum0RhmbYrTFnA=
|
||||
github.com/microsoft/kiota-serialization-text-go v1.0.0/go.mod h1:sM1/C6ecnQ7IquQOGUrUldaO5wj+9+v7G2W3sQ3fy6M=
|
||||
github.com/microsoftgraph/msgraph-sdk-go v1.4.0 h1:ibNwMDEZ6HikA9BVXu+TljCzCiE+yFsD6wLpJbTc1tc=
|
||||
github.com/microsoftgraph/msgraph-sdk-go v1.4.0/go.mod h1:JIDL1xENx92B60NjO2ACyqGeKvtYkdl9rirgajIgryw=
|
||||
github.com/microsoftgraph/msgraph-sdk-go v1.12.0 h1:/jZJ1KCtVlvxStKq31VsEPOQQ5Iy26R1pgvc+RYt7XI=
|
||||
github.com/microsoftgraph/msgraph-sdk-go v1.12.0/go.mod h1:ccLv84FJFtwdSzYWM/HlTes5FLzkzzBsYh9kg93/WS8=
|
||||
github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0 h1:7NWTfyXvOjoizW7PmxNp3+8wCKPgpODs/D1cUZ3fkAY=
|
||||
github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0/go.mod h1:tQb4q3YMIj2dWhhXhQSJ4ELpol931ANKzHSYK5kX1qE=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
@ -303,7 +302,6 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A=
|
||||
github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM=
|
||||
|
||||
@ -40,6 +40,11 @@ type Cacher interface {
|
||||
ProviderForName(id string) Provider
|
||||
}
|
||||
|
||||
type CacheBuilder interface {
|
||||
Add(id, name string)
|
||||
Cacher
|
||||
}
|
||||
|
||||
var _ Cacher = &cache{}
|
||||
|
||||
type cache struct {
|
||||
@ -47,17 +52,29 @@ type cache struct {
|
||||
nameToID map[string]string
|
||||
}
|
||||
|
||||
func NewCache(idToName map[string]string) cache {
|
||||
nti := make(map[string]string, len(idToName))
|
||||
|
||||
for id, name := range idToName {
|
||||
nti[name] = id
|
||||
func NewCache(idToName map[string]string) *cache {
|
||||
c := cache{
|
||||
idToName: map[string]string{},
|
||||
nameToID: map[string]string{},
|
||||
}
|
||||
|
||||
return cache{
|
||||
idToName: idToName,
|
||||
nameToID: nti,
|
||||
if len(idToName) > 0 {
|
||||
nti := make(map[string]string, len(idToName))
|
||||
|
||||
for id, name := range idToName {
|
||||
nti[name] = id
|
||||
}
|
||||
|
||||
c.idToName = idToName
|
||||
c.nameToID = nti
|
||||
}
|
||||
|
||||
return &c
|
||||
}
|
||||
|
||||
func (c *cache) Add(id, name string) {
|
||||
c.idToName[strings.ToLower(id)] = name
|
||||
c.nameToID[strings.ToLower(name)] = id
|
||||
}
|
||||
|
||||
// IDOf returns the id associated with the given name.
|
||||
|
||||
@ -1,12 +1,24 @@
|
||||
package mock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// stream
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
var _ data.Stream = &Stream{}
|
||||
|
||||
type Stream struct {
|
||||
ID string
|
||||
Reader io.ReadCloser
|
||||
@ -52,3 +64,39 @@ type errReader struct {
|
||||
func (er errReader) Read([]byte) (int, error) {
|
||||
return 0, er.readErr
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// collection
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
var (
|
||||
_ data.Collection = &Collection{}
|
||||
_ data.BackupCollection = &Collection{}
|
||||
_ data.RestoreCollection = &Collection{}
|
||||
)
|
||||
|
||||
type Collection struct{}
|
||||
|
||||
func (c Collection) Items(ctx context.Context, errs *fault.Bus) <-chan data.Stream {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c Collection) FullPath() path.Path {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c Collection) PreviousPath() path.Path {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c Collection) State() data.CollectionState {
|
||||
return data.NewState
|
||||
}
|
||||
|
||||
func (c Collection) DoNotMergeItems() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (c Collection) FetchItemByName(ctx context.Context, name string) (data.Stream, error) {
|
||||
return &Stream{}, clues.New("not implemented")
|
||||
}
|
||||
|
||||
@ -14,6 +14,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/m365/support"
|
||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
@ -47,6 +48,11 @@ type Controller struct {
|
||||
// mutex used to synchronize updates to `status`
|
||||
mu sync.Mutex
|
||||
status support.ControllerOperationStatus // contains the status of the last run status
|
||||
|
||||
// backupDriveIDNames is populated on restore. It maps the backup's
|
||||
// drive names to their id. Primarily for use when creating or looking
|
||||
// up a new drive.
|
||||
backupDriveIDNames idname.CacheBuilder
|
||||
}
|
||||
|
||||
func NewController(
|
||||
@ -142,6 +148,20 @@ func (ctrl *Controller) incrementAwaitingMessages() {
|
||||
ctrl.wg.Add(1)
|
||||
}
|
||||
|
||||
func (ctrl *Controller) CacheItemInfo(dii details.ItemInfo) {
|
||||
if ctrl.backupDriveIDNames == nil {
|
||||
ctrl.backupDriveIDNames = idname.NewCache(map[string]string{})
|
||||
}
|
||||
|
||||
if dii.SharePoint != nil {
|
||||
ctrl.backupDriveIDNames.Add(dii.SharePoint.DriveID, dii.SharePoint.DriveName)
|
||||
}
|
||||
|
||||
if dii.OneDrive != nil {
|
||||
ctrl.backupDriveIDNames.Add(dii.OneDrive.DriveID, dii.OneDrive.DriveName)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Resource Lookup Handling
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
@ -12,8 +12,10 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
inMock "github.com/alcionai/corso/src/internal/common/idname/mock"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
dataMock "github.com/alcionai/corso/src/internal/data/mock"
|
||||
exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock"
|
||||
"github.com/alcionai/corso/src/internal/m365/mock"
|
||||
"github.com/alcionai/corso/src/internal/m365/resource"
|
||||
@ -22,6 +24,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/control/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/count"
|
||||
@ -260,6 +263,82 @@ func (suite *ControllerUnitSuite) TestController_Wait() {
|
||||
assert.Equal(t, int64(4), result.Bytes)
|
||||
}
|
||||
|
||||
func (suite *ControllerUnitSuite) TestController_CacheItemInfo() {
|
||||
var (
|
||||
odid = "od-id"
|
||||
odname = "od-name"
|
||||
spid = "sp-id"
|
||||
spname = "sp-name"
|
||||
// intentionally declared outside the test loop
|
||||
ctrl = &Controller{
|
||||
wg: &sync.WaitGroup{},
|
||||
region: &trace.Region{},
|
||||
backupDriveIDNames: idname.NewCache(nil),
|
||||
}
|
||||
)
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
service path.ServiceType
|
||||
cat path.CategoryType
|
||||
dii details.ItemInfo
|
||||
expectID string
|
||||
expectName string
|
||||
}{
|
||||
{
|
||||
name: "exchange",
|
||||
dii: details.ItemInfo{
|
||||
Exchange: &details.ExchangeInfo{},
|
||||
},
|
||||
expectID: "",
|
||||
expectName: "",
|
||||
},
|
||||
{
|
||||
name: "folder",
|
||||
dii: details.ItemInfo{
|
||||
Folder: &details.FolderInfo{},
|
||||
},
|
||||
expectID: "",
|
||||
expectName: "",
|
||||
},
|
||||
{
|
||||
name: "onedrive",
|
||||
dii: details.ItemInfo{
|
||||
OneDrive: &details.OneDriveInfo{
|
||||
DriveID: odid,
|
||||
DriveName: odname,
|
||||
},
|
||||
},
|
||||
expectID: odid,
|
||||
expectName: odname,
|
||||
},
|
||||
{
|
||||
name: "sharepoint",
|
||||
dii: details.ItemInfo{
|
||||
SharePoint: &details.SharePointInfo{
|
||||
DriveID: spid,
|
||||
DriveName: spname,
|
||||
},
|
||||
},
|
||||
expectID: spid,
|
||||
expectName: spname,
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctrl.CacheItemInfo(test.dii)
|
||||
|
||||
name, _ := ctrl.backupDriveIDNames.NameOf(test.expectID)
|
||||
assert.Equal(t, test.expectName, name)
|
||||
|
||||
id, _ := ctrl.backupDriveIDNames.IDOf(test.expectName)
|
||||
assert.Equal(t, test.expectID, id)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Integration tests
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -315,7 +394,7 @@ func (suite *ControllerIntegrationSuite) TestRestoreFailsBadService() {
|
||||
RestorePermissions: true,
|
||||
ToggleFeatures: control.Toggles{},
|
||||
},
|
||||
nil,
|
||||
[]data.RestoreCollection{&dataMock.Collection{}},
|
||||
fault.New(true),
|
||||
count.New())
|
||||
assert.Error(t, err, clues.ToCore(err))
|
||||
@ -397,13 +476,8 @@ func (suite *ControllerIntegrationSuite) TestEmptyCollections() {
|
||||
test.col,
|
||||
fault.New(true),
|
||||
count.New())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
assert.NotNil(t, deets)
|
||||
|
||||
stats := suite.ctrl.Wait()
|
||||
assert.Zero(t, stats.Objects)
|
||||
assert.Zero(t, stats.Folders)
|
||||
assert.Zero(t, stats.Successes)
|
||||
require.Error(t, err, clues.ToCore(err))
|
||||
assert.Nil(t, deets)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -271,7 +271,9 @@ func Wrap(ctx context.Context, e error, msg string) *clues.Err {
|
||||
e = clues.Stack(e, clues.New(mainMsg))
|
||||
}
|
||||
|
||||
return setLabels(clues.Wrap(e, msg).WithClues(ctx).With(data...), innerMsg)
|
||||
ce := clues.Wrap(e, msg).WithClues(ctx).With(data...).WithTrace(1)
|
||||
|
||||
return setLabels(ce, innerMsg)
|
||||
}
|
||||
|
||||
// Stack is a helper function that extracts ODataError metadata from
|
||||
@ -292,7 +294,9 @@ func Stack(ctx context.Context, e error) *clues.Err {
|
||||
e = clues.Stack(e, clues.New(mainMsg))
|
||||
}
|
||||
|
||||
return setLabels(clues.Stack(e).WithClues(ctx).With(data...), innerMsg)
|
||||
ce := clues.Stack(e).WithClues(ctx).With(data...).WithTrace(1)
|
||||
|
||||
return setLabels(ce, innerMsg)
|
||||
}
|
||||
|
||||
// stackReq is a helper function that extracts ODataError metadata from
|
||||
@ -361,7 +365,7 @@ func errData(err odataerrors.ODataErrorable) (string, []any, string) {
|
||||
msgConcat += ptr.Val(d.GetMessage())
|
||||
}
|
||||
|
||||
inner := mainErr.GetInnererror()
|
||||
inner := mainErr.GetInnerError()
|
||||
if inner != nil {
|
||||
data = appendIf(data, "odataerror_inner_cli_req_id", inner.GetClientRequestId())
|
||||
data = appendIf(data, "odataerror_inner_req_id", inner.GetRequestId())
|
||||
|
||||
@ -69,3 +69,5 @@ func (ctrl Controller) ConsumeRestoreCollections(
|
||||
) (*details.Details, error) {
|
||||
return ctrl.Deets, ctrl.Err
|
||||
}
|
||||
|
||||
func (ctrl Controller) CacheItemInfo(dii details.ItemInfo) {}
|
||||
|
||||
@ -35,6 +35,7 @@ type BackupHandler interface {
|
||||
api.Getter
|
||||
GetItemPermissioner
|
||||
GetItemer
|
||||
NewDrivePagerer
|
||||
|
||||
// PathPrefix constructs the service and category specific path prefix for
|
||||
// the given values.
|
||||
@ -49,7 +50,6 @@ type BackupHandler interface {
|
||||
|
||||
// ServiceCat returns the service and category used by this implementation.
|
||||
ServiceCat() (path.ServiceType, path.CategoryType)
|
||||
NewDrivePager(resourceOwner string, fields []string) api.DrivePager
|
||||
NewItemPager(driveID, link string, fields []string) api.DriveItemDeltaEnumerator
|
||||
// FormatDisplayPath creates a human-readable string to represent the
|
||||
// provided path.
|
||||
@ -61,6 +61,10 @@ type BackupHandler interface {
|
||||
IncludesDir(dir string) bool
|
||||
}
|
||||
|
||||
type NewDrivePagerer interface {
|
||||
NewDrivePager(resourceOwner string, fields []string) api.DrivePager
|
||||
}
|
||||
|
||||
type GetItemPermissioner interface {
|
||||
GetItemPermission(
|
||||
ctx context.Context,
|
||||
@ -86,7 +90,9 @@ type RestoreHandler interface {
|
||||
GetItemsByCollisionKeyser
|
||||
GetRootFolderer
|
||||
ItemInfoAugmenter
|
||||
NewDrivePagerer
|
||||
NewItemContentUploader
|
||||
PostDriver
|
||||
PostItemInContainerer
|
||||
DeleteItemPermissioner
|
||||
UpdateItemPermissioner
|
||||
@ -145,6 +151,13 @@ type UpdateItemLinkSharer interface {
|
||||
) (models.Permissionable, error)
|
||||
}
|
||||
|
||||
type PostDriver interface {
|
||||
PostDrive(
|
||||
ctx context.Context,
|
||||
protectedResourceID, driveName string,
|
||||
) (models.Driveable, error)
|
||||
}
|
||||
|
||||
type PostItemInContainerer interface {
|
||||
PostItemInContainer(
|
||||
ctx context.Context,
|
||||
|
||||
@ -361,8 +361,8 @@ func (suite *OneDriveIntgSuite) TestCreateGetDeleteFolder() {
|
||||
Folders: folderElements,
|
||||
}
|
||||
|
||||
caches := NewRestoreCaches()
|
||||
caches.DriveIDToRootFolderID[driveID] = ptr.Val(rootFolder.GetId())
|
||||
caches := NewRestoreCaches(nil)
|
||||
caches.DriveIDToDriveInfo[driveID] = driveInfo{rootFolderID: ptr.Val(rootFolder.GetId())}
|
||||
|
||||
rh := NewRestoreHandler(suite.ac)
|
||||
|
||||
|
||||
@ -5,6 +5,7 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/drives"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
|
||||
@ -133,6 +134,19 @@ func NewRestoreHandler(ac api.Client) *itemRestoreHandler {
|
||||
return &itemRestoreHandler{ac.Drives()}
|
||||
}
|
||||
|
||||
func (h itemRestoreHandler) PostDrive(
|
||||
context.Context,
|
||||
string, string,
|
||||
) (models.Driveable, error) {
|
||||
return nil, clues.New("creating drives in oneDrive is not supported")
|
||||
}
|
||||
|
||||
func (h itemRestoreHandler) NewDrivePager(
|
||||
resourceOwner string, fields []string,
|
||||
) api.DrivePager {
|
||||
return h.ac.NewUserDrivePager(resourceOwner, fields)
|
||||
}
|
||||
|
||||
// AugmentItemInfo will populate a details.OneDriveInfo struct
|
||||
// with properties from the drive item. ItemSize is specified
|
||||
// separately for restore processes because the local itemable
|
||||
|
||||
@ -249,9 +249,25 @@ type RestoreHandler struct {
|
||||
PostItemResp models.DriveItemable
|
||||
PostItemErr error
|
||||
|
||||
DrivePagerV api.DrivePager
|
||||
|
||||
PostDriveResp models.Driveable
|
||||
PostDriveErr error
|
||||
|
||||
UploadSessionErr error
|
||||
}
|
||||
|
||||
func (h RestoreHandler) PostDrive(
|
||||
ctx context.Context,
|
||||
protectedResourceID, driveName string,
|
||||
) (models.Driveable, error) {
|
||||
return h.PostDriveResp, h.PostDriveErr
|
||||
}
|
||||
|
||||
func (h RestoreHandler) NewDrivePager(string, []string) api.DrivePager {
|
||||
return h.DrivePagerV
|
||||
}
|
||||
|
||||
func (h *RestoreHandler) AugmentItemInfo(
|
||||
details.ItemInfo,
|
||||
models.DriveItemable,
|
||||
|
||||
@ -15,6 +15,7 @@ import (
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/diagnostics"
|
||||
@ -37,9 +38,17 @@ const (
|
||||
maxUploadRetries = 3
|
||||
)
|
||||
|
||||
type driveInfo struct {
|
||||
id string
|
||||
name string
|
||||
rootFolderID string
|
||||
}
|
||||
|
||||
type restoreCaches struct {
|
||||
BackupDriveIDName idname.Cacher
|
||||
collisionKeyToItemID map[string]api.DriveItemIDType
|
||||
DriveIDToRootFolderID map[string]string
|
||||
DriveIDToDriveInfo map[string]driveInfo
|
||||
DriveNameToDriveInfo map[string]driveInfo
|
||||
Folders *folderCache
|
||||
OldLinkShareIDToNewID map[string]string
|
||||
OldPermIDToNewID map[string]string
|
||||
@ -48,10 +57,74 @@ type restoreCaches struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
func NewRestoreCaches() *restoreCaches {
|
||||
func (rc *restoreCaches) AddDrive(
|
||||
ctx context.Context,
|
||||
md models.Driveable,
|
||||
grf GetRootFolderer,
|
||||
) error {
|
||||
di := driveInfo{
|
||||
id: ptr.Val(md.GetId()),
|
||||
name: ptr.Val(md.GetName()),
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "drive_info", di)
|
||||
|
||||
root, err := grf.GetRootFolder(ctx, di.id)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "getting drive root id")
|
||||
}
|
||||
|
||||
di.rootFolderID = ptr.Val(root.GetId())
|
||||
|
||||
rc.DriveIDToDriveInfo[di.id] = di
|
||||
rc.DriveNameToDriveInfo[di.name] = di
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Populate looks up drive items available to the protectedResource
|
||||
// and adds their info to the caches.
|
||||
func (rc *restoreCaches) Populate(
|
||||
ctx context.Context,
|
||||
gdparf GetDrivePagerAndRootFolderer,
|
||||
protectedResourceID string,
|
||||
) error {
|
||||
drives, err := api.GetAllDrives(
|
||||
ctx,
|
||||
gdparf.NewDrivePager(protectedResourceID, nil),
|
||||
true,
|
||||
maxDrivesRetries)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "getting drives")
|
||||
}
|
||||
|
||||
for _, md := range drives {
|
||||
if err := rc.AddDrive(ctx, md, gdparf); err != nil {
|
||||
return clues.Wrap(err, "caching drive")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type GetDrivePagerAndRootFolderer interface {
|
||||
GetRootFolderer
|
||||
NewDrivePagerer
|
||||
}
|
||||
|
||||
func NewRestoreCaches(
|
||||
backupDriveIDNames idname.Cacher,
|
||||
) *restoreCaches {
|
||||
// avoid nil panics
|
||||
if backupDriveIDNames == nil {
|
||||
backupDriveIDNames = idname.NewCache(nil)
|
||||
}
|
||||
|
||||
return &restoreCaches{
|
||||
BackupDriveIDName: backupDriveIDNames,
|
||||
collisionKeyToItemID: map[string]api.DriveItemIDType{},
|
||||
DriveIDToRootFolderID: map[string]string{},
|
||||
DriveIDToDriveInfo: map[string]driveInfo{},
|
||||
DriveNameToDriveInfo: map[string]driveInfo{},
|
||||
Folders: NewFolderCache(),
|
||||
OldLinkShareIDToNewID: map[string]string{},
|
||||
OldPermIDToNewID: map[string]string{},
|
||||
@ -73,19 +146,27 @@ func ConsumeRestoreCollections(
|
||||
backupVersion int,
|
||||
restoreCfg control.RestoreConfig,
|
||||
opts control.Options,
|
||||
backupDriveIDNames idname.Cacher,
|
||||
dcs []data.RestoreCollection,
|
||||
deets *details.Builder,
|
||||
errs *fault.Bus,
|
||||
ctr *count.Bus,
|
||||
) (*support.ControllerOperationStatus, error) {
|
||||
var (
|
||||
restoreMetrics support.CollectionMetrics
|
||||
caches = NewRestoreCaches()
|
||||
el = errs.Local()
|
||||
restoreMetrics support.CollectionMetrics
|
||||
el = errs.Local()
|
||||
caches = NewRestoreCaches(backupDriveIDNames)
|
||||
protectedResourceID = dcs[0].FullPath().ResourceOwner()
|
||||
fallbackDriveName = restoreCfg.Location
|
||||
)
|
||||
|
||||
ctx = clues.Add(ctx, "backup_version", backupVersion)
|
||||
|
||||
err := caches.Populate(ctx, rh, protectedResourceID)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "initializing restore caches")
|
||||
}
|
||||
|
||||
// Reorder collections so that the parents directories are created
|
||||
// before the child directories; a requirement for permissions.
|
||||
data.SortRestoreCollections(dcs)
|
||||
@ -102,7 +183,7 @@ func ConsumeRestoreCollections(
|
||||
ictx = clues.Add(
|
||||
ctx,
|
||||
"category", dc.FullPath().Category(),
|
||||
"resource_owner", clues.Hide(dc.FullPath().ResourceOwner()),
|
||||
"resource_owner", clues.Hide(protectedResourceID),
|
||||
"full_path", dc.FullPath())
|
||||
)
|
||||
|
||||
@ -115,6 +196,7 @@ func ConsumeRestoreCollections(
|
||||
caches,
|
||||
deets,
|
||||
opts.RestorePermissions,
|
||||
fallbackDriveName,
|
||||
errs,
|
||||
ctr.Local())
|
||||
if err != nil {
|
||||
@ -152,18 +234,20 @@ func RestoreCollection(
|
||||
caches *restoreCaches,
|
||||
deets *details.Builder,
|
||||
restorePerms bool, // TODD: move into restoreConfig
|
||||
fallbackDriveName string,
|
||||
errs *fault.Bus,
|
||||
ctr *count.Bus,
|
||||
) (support.CollectionMetrics, error) {
|
||||
var (
|
||||
metrics = support.CollectionMetrics{}
|
||||
directory = dc.FullPath()
|
||||
el = errs.Local()
|
||||
metricsObjects int64
|
||||
metricsBytes int64
|
||||
metricsSuccess int64
|
||||
wg sync.WaitGroup
|
||||
complete bool
|
||||
metrics = support.CollectionMetrics{}
|
||||
directory = dc.FullPath()
|
||||
protectedResourceID = directory.ResourceOwner()
|
||||
el = errs.Local()
|
||||
metricsObjects int64
|
||||
metricsBytes int64
|
||||
metricsSuccess int64
|
||||
wg sync.WaitGroup
|
||||
complete bool
|
||||
)
|
||||
|
||||
ctx, end := diagnostics.Span(ctx, "gc:drive:restoreCollection", diagnostics.Label("path", directory))
|
||||
@ -174,15 +258,23 @@ func RestoreCollection(
|
||||
return metrics, clues.Wrap(err, "creating drive path").WithClues(ctx)
|
||||
}
|
||||
|
||||
if _, ok := caches.DriveIDToRootFolderID[drivePath.DriveID]; !ok {
|
||||
root, err := rh.GetRootFolder(ctx, drivePath.DriveID)
|
||||
if err != nil {
|
||||
return metrics, clues.Wrap(err, "getting drive root id")
|
||||
}
|
||||
|
||||
caches.DriveIDToRootFolderID[drivePath.DriveID] = ptr.Val(root.GetId())
|
||||
di, err := ensureDriveExists(
|
||||
ctx,
|
||||
rh,
|
||||
caches,
|
||||
drivePath,
|
||||
protectedResourceID,
|
||||
fallbackDriveName)
|
||||
if err != nil {
|
||||
return metrics, clues.Wrap(err, "ensuring drive exists")
|
||||
}
|
||||
|
||||
// clobber the drivePath details with the details retrieved
|
||||
// in the ensure func, as they might have changed to reflect
|
||||
// a different drive as a restore location.
|
||||
drivePath.DriveID = di.id
|
||||
drivePath.Root = di.rootFolderID
|
||||
|
||||
// Assemble folder hierarchy we're going to restore into (we recreate the folder hierarchy
|
||||
// from the backup under this the restore folder instead of root)
|
||||
// i.e. Restore into `<restoreContainerName>/<original folder path>`
|
||||
@ -704,7 +796,7 @@ func createRestoreFolders(
|
||||
driveID = drivePath.DriveID
|
||||
folders = restoreDir.Elements()
|
||||
location = path.Builder{}.Append(driveID)
|
||||
parentFolderID = caches.DriveIDToRootFolderID[drivePath.DriveID]
|
||||
parentFolderID = caches.DriveIDToDriveInfo[drivePath.DriveID].rootFolderID
|
||||
)
|
||||
|
||||
ctx = clues.Add(
|
||||
@ -1113,3 +1205,79 @@ func AugmentRestorePaths(
|
||||
|
||||
return paths, nil
|
||||
}
|
||||
|
||||
type PostDriveAndGetRootFolderer interface {
|
||||
PostDriver
|
||||
GetRootFolderer
|
||||
}
|
||||
|
||||
// ensureDriveExists looks up the drive by its id. If no drive is found with
|
||||
// that ID, a new drive is generated with the same name. If the name collides
|
||||
// with an existing drive, a number is appended to the drive name. Eg: foo ->
|
||||
// foo 1. This will repeat as many times as is needed.
|
||||
// Returns the root folder of the drive
|
||||
func ensureDriveExists(
|
||||
ctx context.Context,
|
||||
pdagrf PostDriveAndGetRootFolderer,
|
||||
caches *restoreCaches,
|
||||
drivePath *path.DrivePath,
|
||||
protectedResourceID, fallbackDriveName string,
|
||||
) (driveInfo, error) {
|
||||
driveID := drivePath.DriveID
|
||||
|
||||
// the drive might already be cached by ID. it's okay
|
||||
// if the name has changed. the ID is a better reference
|
||||
// anyway.
|
||||
if di, ok := caches.DriveIDToDriveInfo[driveID]; ok {
|
||||
return di, nil
|
||||
}
|
||||
|
||||
var (
|
||||
newDriveName = fallbackDriveName
|
||||
newDrive models.Driveable
|
||||
err error
|
||||
)
|
||||
|
||||
// if the drive wasn't found by ID, maybe we can find a
|
||||
// drive with the same name but different ID.
|
||||
// start by looking up the old drive's name
|
||||
oldName, ok := caches.BackupDriveIDName.NameOf(driveID)
|
||||
if ok {
|
||||
// check for drives that currently have the same name
|
||||
if di, ok := caches.DriveNameToDriveInfo[oldName]; ok {
|
||||
return di, nil
|
||||
}
|
||||
|
||||
// if no current drives have the same name, we'll make
|
||||
// a new drive with that name.
|
||||
newDriveName = oldName
|
||||
}
|
||||
|
||||
nextDriveName := newDriveName
|
||||
|
||||
// For sharepoint, document libraries can collide by name with
|
||||
// item types beyond just drive. Lists, for example, cannot share
|
||||
// names with document libraries (they're the same type, actually).
|
||||
// In those cases we need to rename the drive until we can create
|
||||
// one without a collision.
|
||||
for i := 1; ; i++ {
|
||||
ictx := clues.Add(ctx, "new_drive_name", clues.Hide(nextDriveName))
|
||||
|
||||
newDrive, err = pdagrf.PostDrive(ictx, protectedResourceID, nextDriveName)
|
||||
if err != nil && !errors.Is(err, graph.ErrItemAlreadyExistsConflict) {
|
||||
return driveInfo{}, clues.Wrap(err, "creating new drive")
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
nextDriveName = fmt.Sprintf("%s %d", newDriveName, i)
|
||||
}
|
||||
|
||||
if err := caches.AddDrive(ctx, newDrive, pdagrf); err != nil {
|
||||
return driveInfo{}, clues.Wrap(err, "adding drive to cache").OrNil()
|
||||
}
|
||||
|
||||
return caches.DriveIDToDriveInfo[ptr.Val(newDrive.GetId())], nil
|
||||
}
|
||||
|
||||
@ -11,6 +11,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
odConsts "github.com/alcionai/corso/src/internal/m365/onedrive/consts"
|
||||
@ -21,6 +22,7 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/count"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
apiMock "github.com/alcionai/corso/src/pkg/services/m365/api/mock"
|
||||
)
|
||||
|
||||
type RestoreUnitSuite struct {
|
||||
@ -491,7 +493,7 @@ func (suite *RestoreUnitSuite) TestRestoreItem_collisionHandling() {
|
||||
mndi.SetId(ptr.To(mndiID))
|
||||
|
||||
var (
|
||||
caches = NewRestoreCaches()
|
||||
caches = NewRestoreCaches(nil)
|
||||
rh = &mock.RestoreHandler{
|
||||
PostItemResp: models.NewDriveItem(),
|
||||
DeleteItemErr: test.deleteErr,
|
||||
@ -617,3 +619,435 @@ func (suite *RestoreUnitSuite) TestCreateFolder() {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type mockGRF struct {
|
||||
err error
|
||||
rootFolder models.DriveItemable
|
||||
}
|
||||
|
||||
func (m *mockGRF) GetRootFolder(
|
||||
context.Context,
|
||||
string,
|
||||
) (models.DriveItemable, error) {
|
||||
return m.rootFolder, m.err
|
||||
}
|
||||
|
||||
func (suite *RestoreUnitSuite) TestRestoreCaches_AddDrive() {
|
||||
rfID := "this-is-id"
|
||||
driveID := "another-id"
|
||||
name := "name"
|
||||
|
||||
rf := models.NewDriveItem()
|
||||
rf.SetId(&rfID)
|
||||
|
||||
md := models.NewDrive()
|
||||
md.SetId(&driveID)
|
||||
md.SetName(&name)
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
mock *mockGRF
|
||||
expectErr require.ErrorAssertionFunc
|
||||
expectID string
|
||||
checkValues bool
|
||||
}{
|
||||
{
|
||||
name: "good",
|
||||
mock: &mockGRF{rootFolder: rf},
|
||||
expectErr: require.NoError,
|
||||
expectID: rfID,
|
||||
checkValues: true,
|
||||
},
|
||||
{
|
||||
name: "err",
|
||||
mock: &mockGRF{err: assert.AnError},
|
||||
expectErr: require.Error,
|
||||
expectID: "",
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
rc := NewRestoreCaches(nil)
|
||||
err := rc.AddDrive(ctx, md, test.mock)
|
||||
test.expectErr(t, err, clues.ToCore(err))
|
||||
|
||||
if test.checkValues {
|
||||
idResult := rc.DriveIDToDriveInfo[driveID]
|
||||
assert.Equal(t, driveID, idResult.id, "drive id")
|
||||
assert.Equal(t, name, idResult.name, "drive name")
|
||||
assert.Equal(t, test.expectID, idResult.rootFolderID, "root folder id")
|
||||
|
||||
nameResult := rc.DriveNameToDriveInfo[name]
|
||||
assert.Equal(t, driveID, nameResult.id, "drive id")
|
||||
assert.Equal(t, name, nameResult.name, "drive name")
|
||||
assert.Equal(t, test.expectID, nameResult.rootFolderID, "root folder id")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type mockGDPARF struct {
|
||||
err error
|
||||
rootFolder models.DriveItemable
|
||||
pager *apiMock.DrivePager
|
||||
}
|
||||
|
||||
func (m *mockGDPARF) GetRootFolder(
|
||||
context.Context,
|
||||
string,
|
||||
) (models.DriveItemable, error) {
|
||||
return m.rootFolder, m.err
|
||||
}
|
||||
|
||||
func (m *mockGDPARF) NewDrivePager(
|
||||
string,
|
||||
[]string,
|
||||
) api.DrivePager {
|
||||
return m.pager
|
||||
}
|
||||
|
||||
func (suite *RestoreUnitSuite) TestRestoreCaches_Populate() {
|
||||
rfID := "this-is-id"
|
||||
driveID := "another-id"
|
||||
name := "name"
|
||||
|
||||
rf := models.NewDriveItem()
|
||||
rf.SetId(&rfID)
|
||||
|
||||
md := models.NewDrive()
|
||||
md.SetId(&driveID)
|
||||
md.SetName(&name)
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
mock *apiMock.DrivePager
|
||||
expectErr require.ErrorAssertionFunc
|
||||
expectLen int
|
||||
checkValues bool
|
||||
}{
|
||||
{
|
||||
name: "no results",
|
||||
mock: &apiMock.DrivePager{
|
||||
ToReturn: []apiMock.PagerResult{
|
||||
{Drives: []models.Driveable{}},
|
||||
},
|
||||
},
|
||||
expectErr: require.NoError,
|
||||
expectLen: 0,
|
||||
},
|
||||
{
|
||||
name: "one result",
|
||||
mock: &apiMock.DrivePager{
|
||||
ToReturn: []apiMock.PagerResult{
|
||||
{Drives: []models.Driveable{md}},
|
||||
},
|
||||
},
|
||||
expectErr: require.NoError,
|
||||
expectLen: 1,
|
||||
checkValues: true,
|
||||
},
|
||||
{
|
||||
name: "error",
|
||||
mock: &apiMock.DrivePager{
|
||||
ToReturn: []apiMock.PagerResult{
|
||||
{Err: assert.AnError},
|
||||
},
|
||||
},
|
||||
expectErr: require.Error,
|
||||
expectLen: 0,
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
gdparf := &mockGDPARF{
|
||||
rootFolder: rf,
|
||||
pager: test.mock,
|
||||
}
|
||||
|
||||
rc := NewRestoreCaches(nil)
|
||||
err := rc.Populate(ctx, gdparf, "shmoo")
|
||||
test.expectErr(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Len(t, rc.DriveIDToDriveInfo, test.expectLen)
|
||||
assert.Len(t, rc.DriveNameToDriveInfo, test.expectLen)
|
||||
|
||||
if test.checkValues {
|
||||
idResult := rc.DriveIDToDriveInfo[driveID]
|
||||
assert.Equal(t, driveID, idResult.id, "drive id")
|
||||
assert.Equal(t, name, idResult.name, "drive name")
|
||||
assert.Equal(t, rfID, idResult.rootFolderID, "root folder id")
|
||||
|
||||
nameResult := rc.DriveNameToDriveInfo[name]
|
||||
assert.Equal(t, driveID, nameResult.id, "drive id")
|
||||
assert.Equal(t, name, nameResult.name, "drive name")
|
||||
assert.Equal(t, rfID, nameResult.rootFolderID, "root folder id")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type mockPDAGRF struct {
|
||||
i int
|
||||
postResp []models.Driveable
|
||||
postErr []error
|
||||
|
||||
grf mockGRF
|
||||
}
|
||||
|
||||
func (m *mockPDAGRF) PostDrive(
|
||||
ctx context.Context,
|
||||
protectedResourceID, driveName string,
|
||||
) (models.Driveable, error) {
|
||||
defer func() { m.i++ }()
|
||||
|
||||
md := m.postResp[m.i]
|
||||
if md != nil {
|
||||
md.SetName(&driveName)
|
||||
}
|
||||
|
||||
return md, m.postErr[m.i]
|
||||
}
|
||||
|
||||
func (m *mockPDAGRF) GetRootFolder(
|
||||
ctx context.Context,
|
||||
driveID string,
|
||||
) (models.DriveItemable, error) {
|
||||
return m.grf.rootFolder, m.grf.err
|
||||
}
|
||||
|
||||
func (suite *RestoreUnitSuite) TestEnsureDriveExists() {
|
||||
rfID := "this-is-id"
|
||||
driveID := "another-id"
|
||||
oldID := "old-id"
|
||||
name := "name"
|
||||
otherName := "other name"
|
||||
|
||||
rf := models.NewDriveItem()
|
||||
rf.SetId(&rfID)
|
||||
|
||||
grf := mockGRF{rootFolder: rf}
|
||||
|
||||
makeMD := func() models.Driveable {
|
||||
md := models.NewDrive()
|
||||
md.SetId(&driveID)
|
||||
md.SetName(&name)
|
||||
|
||||
return md
|
||||
}
|
||||
|
||||
dp := &path.DrivePath{
|
||||
DriveID: driveID,
|
||||
Root: "root:",
|
||||
Folders: path.Elements{},
|
||||
}
|
||||
|
||||
oldDP := &path.DrivePath{
|
||||
DriveID: oldID,
|
||||
Root: "root:",
|
||||
Folders: path.Elements{},
|
||||
}
|
||||
|
||||
populatedCache := func(id string) *restoreCaches {
|
||||
rc := NewRestoreCaches(nil)
|
||||
di := driveInfo{
|
||||
id: id,
|
||||
name: name,
|
||||
}
|
||||
rc.DriveIDToDriveInfo[id] = di
|
||||
rc.DriveNameToDriveInfo[name] = di
|
||||
|
||||
return rc
|
||||
}
|
||||
|
||||
oldDriveIDNames := idname.NewCache(nil)
|
||||
oldDriveIDNames.Add(oldID, name)
|
||||
|
||||
idSwitchedCache := func() *restoreCaches {
|
||||
rc := NewRestoreCaches(oldDriveIDNames)
|
||||
di := driveInfo{
|
||||
id: "diff",
|
||||
name: name,
|
||||
}
|
||||
rc.DriveIDToDriveInfo["diff"] = di
|
||||
rc.DriveNameToDriveInfo[name] = di
|
||||
|
||||
return rc
|
||||
}
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
dp *path.DrivePath
|
||||
mock *mockPDAGRF
|
||||
rc *restoreCaches
|
||||
expectErr require.ErrorAssertionFunc
|
||||
fallbackName string
|
||||
expectName string
|
||||
expectID string
|
||||
skipValueChecks bool
|
||||
}{
|
||||
{
|
||||
name: "drive already in cache",
|
||||
dp: dp,
|
||||
mock: &mockPDAGRF{
|
||||
postResp: []models.Driveable{makeMD()},
|
||||
postErr: []error{nil},
|
||||
grf: grf,
|
||||
},
|
||||
rc: populatedCache(driveID),
|
||||
expectErr: require.NoError,
|
||||
fallbackName: name,
|
||||
expectName: name,
|
||||
expectID: driveID,
|
||||
},
|
||||
{
|
||||
name: "drive with same name but different id exists",
|
||||
dp: oldDP,
|
||||
mock: &mockPDAGRF{
|
||||
postResp: []models.Driveable{makeMD()},
|
||||
postErr: []error{nil},
|
||||
grf: grf,
|
||||
},
|
||||
rc: idSwitchedCache(),
|
||||
expectErr: require.NoError,
|
||||
fallbackName: otherName,
|
||||
expectName: name,
|
||||
expectID: "diff",
|
||||
},
|
||||
{
|
||||
name: "drive created with old name",
|
||||
dp: oldDP,
|
||||
mock: &mockPDAGRF{
|
||||
postResp: []models.Driveable{makeMD()},
|
||||
postErr: []error{nil},
|
||||
grf: grf,
|
||||
},
|
||||
rc: NewRestoreCaches(oldDriveIDNames),
|
||||
expectErr: require.NoError,
|
||||
fallbackName: otherName,
|
||||
expectName: name,
|
||||
expectID: driveID,
|
||||
},
|
||||
{
|
||||
name: "drive created with fallback name",
|
||||
dp: dp,
|
||||
mock: &mockPDAGRF{
|
||||
postResp: []models.Driveable{makeMD()},
|
||||
postErr: []error{nil},
|
||||
grf: grf,
|
||||
},
|
||||
rc: NewRestoreCaches(nil),
|
||||
expectErr: require.NoError,
|
||||
fallbackName: otherName,
|
||||
expectName: otherName,
|
||||
expectID: driveID,
|
||||
},
|
||||
{
|
||||
name: "error creating drive",
|
||||
dp: dp,
|
||||
mock: &mockPDAGRF{
|
||||
postResp: []models.Driveable{nil},
|
||||
postErr: []error{assert.AnError},
|
||||
grf: grf,
|
||||
},
|
||||
rc: NewRestoreCaches(nil),
|
||||
expectErr: require.Error,
|
||||
fallbackName: name,
|
||||
expectName: "",
|
||||
skipValueChecks: true,
|
||||
expectID: driveID,
|
||||
},
|
||||
{
|
||||
name: "drive name already exists",
|
||||
dp: dp,
|
||||
mock: &mockPDAGRF{
|
||||
postResp: []models.Driveable{makeMD()},
|
||||
postErr: []error{nil},
|
||||
grf: grf,
|
||||
},
|
||||
rc: populatedCache("beaux"),
|
||||
expectErr: require.NoError,
|
||||
fallbackName: name,
|
||||
expectName: name,
|
||||
expectID: driveID,
|
||||
},
|
||||
{
|
||||
name: "list with name already exists",
|
||||
dp: dp,
|
||||
mock: &mockPDAGRF{
|
||||
postResp: []models.Driveable{nil, makeMD()},
|
||||
postErr: []error{graph.ErrItemAlreadyExistsConflict, nil},
|
||||
grf: grf,
|
||||
},
|
||||
rc: NewRestoreCaches(nil),
|
||||
expectErr: require.NoError,
|
||||
fallbackName: name,
|
||||
expectName: name + " 1",
|
||||
expectID: driveID,
|
||||
},
|
||||
{
|
||||
name: "list with old name already exists",
|
||||
dp: oldDP,
|
||||
mock: &mockPDAGRF{
|
||||
postResp: []models.Driveable{nil, makeMD()},
|
||||
postErr: []error{graph.ErrItemAlreadyExistsConflict, nil},
|
||||
grf: grf,
|
||||
},
|
||||
rc: NewRestoreCaches(oldDriveIDNames),
|
||||
expectErr: require.NoError,
|
||||
fallbackName: name,
|
||||
expectName: name + " 1",
|
||||
expectID: driveID,
|
||||
},
|
||||
{
|
||||
name: "drive and list with name already exist",
|
||||
dp: dp,
|
||||
mock: &mockPDAGRF{
|
||||
postResp: []models.Driveable{nil, makeMD()},
|
||||
postErr: []error{graph.ErrItemAlreadyExistsConflict, nil},
|
||||
grf: grf,
|
||||
},
|
||||
rc: populatedCache(driveID),
|
||||
expectErr: require.NoError,
|
||||
fallbackName: name,
|
||||
expectName: name,
|
||||
expectID: driveID,
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
rc := test.rc
|
||||
|
||||
di, err := ensureDriveExists(
|
||||
ctx,
|
||||
test.mock,
|
||||
rc,
|
||||
test.dp,
|
||||
"prID",
|
||||
test.fallbackName)
|
||||
test.expectErr(t, err, clues.ToCore(err))
|
||||
|
||||
if !test.skipValueChecks {
|
||||
assert.Equal(t, test.expectName, di.name, "ensured drive has expected name")
|
||||
assert.Equal(t, test.expectID, di.id, "ensured drive has expected id")
|
||||
|
||||
nameResult := rc.DriveNameToDriveInfo[test.expectName]
|
||||
assert.Equal(t, test.expectName, nameResult.name, "found drive entry with expected name")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -38,6 +38,10 @@ func (ctrl *Controller) ConsumeRestoreCollections(
|
||||
ctx = graph.BindRateLimiterConfig(ctx, graph.LimiterCfg{Service: sels.PathService()})
|
||||
ctx = clues.Add(ctx, "restore_config", restoreCfg) // TODO(rkeepers): needs PII control
|
||||
|
||||
if len(dcs) == 0 {
|
||||
return nil, clues.New("no data collections to restore")
|
||||
}
|
||||
|
||||
var (
|
||||
status *support.ControllerOperationStatus
|
||||
deets = &details.Builder{}
|
||||
@ -54,6 +58,7 @@ func (ctrl *Controller) ConsumeRestoreCollections(
|
||||
backupVersion,
|
||||
restoreCfg,
|
||||
opts,
|
||||
ctrl.backupDriveIDNames,
|
||||
dcs,
|
||||
deets,
|
||||
errs,
|
||||
@ -65,6 +70,7 @@ func (ctrl *Controller) ConsumeRestoreCollections(
|
||||
ctrl.AC,
|
||||
restoreCfg,
|
||||
opts,
|
||||
ctrl.backupDriveIDNames,
|
||||
dcs,
|
||||
deets,
|
||||
errs,
|
||||
|
||||
@ -157,11 +157,25 @@ func (h libraryBackupHandler) IncludesDir(dir string) bool {
|
||||
var _ onedrive.RestoreHandler = &libraryRestoreHandler{}
|
||||
|
||||
type libraryRestoreHandler struct {
|
||||
ac api.Drives
|
||||
ac api.Client
|
||||
}
|
||||
|
||||
func (h libraryRestoreHandler) PostDrive(
|
||||
ctx context.Context,
|
||||
siteID, driveName string,
|
||||
) (models.Driveable, error) {
|
||||
return h.ac.Lists().PostDrive(ctx, siteID, driveName)
|
||||
}
|
||||
|
||||
func NewRestoreHandler(ac api.Client) *libraryRestoreHandler {
|
||||
return &libraryRestoreHandler{ac.Drives()}
|
||||
return &libraryRestoreHandler{ac}
|
||||
}
|
||||
|
||||
func (h libraryRestoreHandler) NewDrivePager(
|
||||
resourceOwner string,
|
||||
fields []string,
|
||||
) api.DrivePager {
|
||||
return h.ac.Drives().NewSiteDrivePager(resourceOwner, fields)
|
||||
}
|
||||
|
||||
func (h libraryRestoreHandler) AugmentItemInfo(
|
||||
@ -177,21 +191,21 @@ func (h libraryRestoreHandler) DeleteItem(
|
||||
ctx context.Context,
|
||||
driveID, itemID string,
|
||||
) error {
|
||||
return h.ac.DeleteItem(ctx, driveID, itemID)
|
||||
return h.ac.Drives().DeleteItem(ctx, driveID, itemID)
|
||||
}
|
||||
|
||||
func (h libraryRestoreHandler) DeleteItemPermission(
|
||||
ctx context.Context,
|
||||
driveID, itemID, permissionID string,
|
||||
) error {
|
||||
return h.ac.DeleteItemPermission(ctx, driveID, itemID, permissionID)
|
||||
return h.ac.Drives().DeleteItemPermission(ctx, driveID, itemID, permissionID)
|
||||
}
|
||||
|
||||
func (h libraryRestoreHandler) GetItemsInContainerByCollisionKey(
|
||||
ctx context.Context,
|
||||
driveID, containerID string,
|
||||
) (map[string]api.DriveItemIDType, error) {
|
||||
m, err := h.ac.GetItemsInContainerByCollisionKey(ctx, driveID, containerID)
|
||||
m, err := h.ac.Drives().GetItemsInContainerByCollisionKey(ctx, driveID, containerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -203,7 +217,7 @@ func (h libraryRestoreHandler) NewItemContentUpload(
|
||||
ctx context.Context,
|
||||
driveID, itemID string,
|
||||
) (models.UploadSessionable, error) {
|
||||
return h.ac.NewItemContentUpload(ctx, driveID, itemID)
|
||||
return h.ac.Drives().NewItemContentUpload(ctx, driveID, itemID)
|
||||
}
|
||||
|
||||
func (h libraryRestoreHandler) PostItemPermissionUpdate(
|
||||
@ -211,7 +225,7 @@ func (h libraryRestoreHandler) PostItemPermissionUpdate(
|
||||
driveID, itemID string,
|
||||
body *drives.ItemItemsItemInvitePostRequestBody,
|
||||
) (drives.ItemItemsItemInviteResponseable, error) {
|
||||
return h.ac.PostItemPermissionUpdate(ctx, driveID, itemID, body)
|
||||
return h.ac.Drives().PostItemPermissionUpdate(ctx, driveID, itemID, body)
|
||||
}
|
||||
|
||||
func (h libraryRestoreHandler) PostItemLinkShareUpdate(
|
||||
@ -219,7 +233,7 @@ func (h libraryRestoreHandler) PostItemLinkShareUpdate(
|
||||
driveID, itemID string,
|
||||
body *drives.ItemItemsItemCreateLinkPostRequestBody,
|
||||
) (models.Permissionable, error) {
|
||||
return h.ac.PostItemLinkShareUpdate(ctx, driveID, itemID, body)
|
||||
return h.ac.Drives().PostItemLinkShareUpdate(ctx, driveID, itemID, body)
|
||||
}
|
||||
|
||||
func (h libraryRestoreHandler) PostItemInContainer(
|
||||
@ -228,21 +242,21 @@ func (h libraryRestoreHandler) PostItemInContainer(
|
||||
newItem models.DriveItemable,
|
||||
onCollision control.CollisionPolicy,
|
||||
) (models.DriveItemable, error) {
|
||||
return h.ac.PostItemInContainer(ctx, driveID, parentFolderID, newItem, onCollision)
|
||||
return h.ac.Drives().PostItemInContainer(ctx, driveID, parentFolderID, newItem, onCollision)
|
||||
}
|
||||
|
||||
func (h libraryRestoreHandler) GetFolderByName(
|
||||
ctx context.Context,
|
||||
driveID, parentFolderID, folderName string,
|
||||
) (models.DriveItemable, error) {
|
||||
return h.ac.GetFolderByName(ctx, driveID, parentFolderID, folderName)
|
||||
return h.ac.Drives().GetFolderByName(ctx, driveID, parentFolderID, folderName)
|
||||
}
|
||||
|
||||
func (h libraryRestoreHandler) GetRootFolder(
|
||||
ctx context.Context,
|
||||
driveID string,
|
||||
) (models.DriveItemable, error) {
|
||||
return h.ac.GetRootFolder(ctx, driveID)
|
||||
return h.ac.Drives().GetRootFolder(ctx, driveID)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
@ -10,6 +10,8 @@ import (
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/diagnostics"
|
||||
@ -33,17 +35,25 @@ func ConsumeRestoreCollections(
|
||||
ac api.Client,
|
||||
restoreCfg control.RestoreConfig,
|
||||
opts control.Options,
|
||||
backupDriveIDNames idname.Cacher,
|
||||
dcs []data.RestoreCollection,
|
||||
deets *details.Builder,
|
||||
errs *fault.Bus,
|
||||
ctr *count.Bus,
|
||||
) (*support.ControllerOperationStatus, error) {
|
||||
var (
|
||||
restoreMetrics support.CollectionMetrics
|
||||
caches = onedrive.NewRestoreCaches()
|
||||
el = errs.Local()
|
||||
lrh = libraryRestoreHandler{ac}
|
||||
protectedResourceID = dcs[0].FullPath().ResourceOwner()
|
||||
restoreMetrics support.CollectionMetrics
|
||||
caches = onedrive.NewRestoreCaches(backupDriveIDNames)
|
||||
el = errs.Local()
|
||||
)
|
||||
|
||||
err := caches.Populate(ctx, lrh, protectedResourceID)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "initializing restore caches")
|
||||
}
|
||||
|
||||
// Reorder collections so that the parents directories are created
|
||||
// before the child directories; a requirement for permissions.
|
||||
data.SortRestoreCollections(dcs)
|
||||
@ -69,13 +79,14 @@ func ConsumeRestoreCollections(
|
||||
case path.LibrariesCategory:
|
||||
metrics, err = onedrive.RestoreCollection(
|
||||
ictx,
|
||||
libraryRestoreHandler{ac.Drives()},
|
||||
lrh,
|
||||
restoreCfg,
|
||||
backupVersion,
|
||||
dc,
|
||||
caches,
|
||||
deets,
|
||||
opts.RestorePermissions,
|
||||
control.DefaultRestoreContainerName(dttm.HumanReadableDriveItem),
|
||||
errs,
|
||||
ctr)
|
||||
|
||||
|
||||
@ -46,6 +46,17 @@ type (
|
||||
) (*details.Details, error)
|
||||
|
||||
Wait() *data.CollectionStats
|
||||
|
||||
CacheItemInfoer
|
||||
}
|
||||
|
||||
CacheItemInfoer interface {
|
||||
// CacheItemInfo is used by the consumer to cache metadata that is
|
||||
// sourced from per-item info, but may be valuable to the restore at
|
||||
// large.
|
||||
// Ex: pairing drive ids with drive names as they appeared at the time
|
||||
// of backup.
|
||||
CacheItemInfo(v details.ItemInfo)
|
||||
}
|
||||
|
||||
RepoMaintenancer interface {
|
||||
|
||||
@ -219,7 +219,13 @@ func (op *RestoreOperation) do(
|
||||
|
||||
observe.Message(ctx, "Restoring", observe.Bullet, clues.Hide(bup.Selector.DiscreteOwner))
|
||||
|
||||
paths, err := formatDetailsForRestoration(ctx, bup.Version, op.Selectors, deets, op.Errors)
|
||||
paths, err := formatDetailsForRestoration(
|
||||
ctx,
|
||||
bup.Version,
|
||||
op.Selectors,
|
||||
deets,
|
||||
op.rc,
|
||||
op.Errors)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "formatting paths from details")
|
||||
}
|
||||
@ -359,6 +365,7 @@ func formatDetailsForRestoration(
|
||||
backupVersion int,
|
||||
sel selectors.Selector,
|
||||
deets *details.Details,
|
||||
cii inject.CacheItemInfoer,
|
||||
errs *fault.Bus,
|
||||
) ([]path.RestorePaths, error) {
|
||||
fds, err := sel.Reduce(ctx, deets, errs)
|
||||
@ -366,6 +373,11 @@ func formatDetailsForRestoration(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// allow restore controllers to iterate over item metadata
|
||||
for _, ent := range fds.Entries {
|
||||
cii.CacheItemInfo(ent.ItemInfo)
|
||||
}
|
||||
|
||||
paths, err := pathtransformer.GetPaths(ctx, backupVersion, fds.Items(), errs)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "getting restore paths")
|
||||
|
||||
@ -10,8 +10,6 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||
inMock "github.com/alcionai/corso/src/internal/common/idname/mock"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/events"
|
||||
evmock "github.com/alcionai/corso/src/internal/events/mock"
|
||||
@ -21,7 +19,6 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/mock"
|
||||
"github.com/alcionai/corso/src/internal/m365/resource"
|
||||
"github.com/alcionai/corso/src/internal/model"
|
||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||
"github.com/alcionai/corso/src/internal/stats"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
@ -32,7 +29,6 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/control/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/count"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/store"
|
||||
)
|
||||
@ -143,13 +139,6 @@ func (suite *RestoreOpSuite) TestRestoreOperation_PersistResults() {
|
||||
// integration
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type bupResults struct {
|
||||
selectorResourceOwners []string
|
||||
backupID model.StableID
|
||||
items int
|
||||
ctrl *m365.Controller
|
||||
}
|
||||
|
||||
type RestoreOpIntegrationSuite struct {
|
||||
tester.Suite
|
||||
|
||||
@ -267,192 +256,6 @@ func (suite *RestoreOpIntegrationSuite) TestNewRestoreOperation() {
|
||||
}
|
||||
}
|
||||
|
||||
func setupExchangeBackup(
|
||||
t *testing.T,
|
||||
kw *kopia.Wrapper,
|
||||
sw *store.Wrapper,
|
||||
acct account.Account,
|
||||
owner string,
|
||||
) bupResults {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
var (
|
||||
users = []string{owner}
|
||||
esel = selectors.NewExchangeBackup(users)
|
||||
)
|
||||
|
||||
esel.DiscreteOwner = owner
|
||||
esel.Include(
|
||||
esel.MailFolders([]string{api.MailInbox}, selectors.PrefixMatch()),
|
||||
esel.ContactFolders([]string{api.DefaultContacts}, selectors.PrefixMatch()),
|
||||
esel.EventCalendars([]string{api.DefaultCalendar}, selectors.PrefixMatch()))
|
||||
|
||||
ctrl, sel := ControllerWithSelector(t, ctx, acct, resource.Users, esel.Selector, nil, nil)
|
||||
|
||||
bo, err := NewBackupOperation(
|
||||
ctx,
|
||||
control.Defaults(),
|
||||
kw,
|
||||
sw,
|
||||
ctrl,
|
||||
acct,
|
||||
sel,
|
||||
inMock.NewProvider(owner, owner),
|
||||
evmock.NewBus())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = bo.Run(ctx)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
require.NotEmpty(t, bo.Results.BackupID)
|
||||
|
||||
return bupResults{
|
||||
selectorResourceOwners: users,
|
||||
backupID: bo.Results.BackupID,
|
||||
// Discount metadata collection files (1 delta and one prev path for each category).
|
||||
// These meta files are used to aid restore, but are not themselves
|
||||
// restored (ie: counted as writes).
|
||||
items: bo.Results.ItemsWritten - 6,
|
||||
ctrl: ctrl,
|
||||
}
|
||||
}
|
||||
|
||||
func setupSharePointBackup(
|
||||
t *testing.T,
|
||||
kw *kopia.Wrapper,
|
||||
sw *store.Wrapper,
|
||||
acct account.Account,
|
||||
owner string,
|
||||
) bupResults {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
var (
|
||||
sites = []string{owner}
|
||||
ssel = selectors.NewSharePointBackup(sites)
|
||||
)
|
||||
|
||||
// assume a folder name "test" exists in the drive.
|
||||
// this is brittle, and requires us to backfill anytime
|
||||
// the site under test changes, but also prevents explosive
|
||||
// growth from re-backup/restore of restored files.
|
||||
ssel.Include(ssel.LibraryFolders([]string{"test"}, selectors.PrefixMatch()))
|
||||
ssel.DiscreteOwner = owner
|
||||
|
||||
ctrl, sel := ControllerWithSelector(t, ctx, acct, resource.Sites, ssel.Selector, nil, nil)
|
||||
|
||||
bo, err := NewBackupOperation(
|
||||
ctx,
|
||||
control.Defaults(),
|
||||
kw,
|
||||
sw,
|
||||
ctrl,
|
||||
acct,
|
||||
sel,
|
||||
inMock.NewProvider(owner, owner),
|
||||
evmock.NewBus())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = bo.Run(ctx)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
require.NotEmpty(t, bo.Results.BackupID)
|
||||
|
||||
return bupResults{
|
||||
selectorResourceOwners: sites,
|
||||
backupID: bo.Results.BackupID,
|
||||
// Discount metadata files (2: 1 delta, 1 prev path)
|
||||
// assume only one folder, and therefore 1 dirmeta per drive
|
||||
// (2 drives: documents and more documents)
|
||||
// assume only one file in each folder, and therefore 1 meta per drive
|
||||
// (2 drives: documents and more documents)
|
||||
// Meta files are used to aid restore, but are not themselves
|
||||
// restored (ie: counted as writes).
|
||||
items: bo.Results.ItemsWritten - 6,
|
||||
ctrl: ctrl,
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *RestoreOpIntegrationSuite) TestRestore_Run() {
|
||||
tables := []struct {
|
||||
name string
|
||||
owner string
|
||||
restoreCfg control.RestoreConfig
|
||||
getSelector func(t *testing.T, owners []string) selectors.Selector
|
||||
setup func(t *testing.T, kw *kopia.Wrapper, sw *store.Wrapper, acct account.Account, owner string) bupResults
|
||||
}{
|
||||
{
|
||||
name: "Exchange_Restore",
|
||||
owner: tconfig.M365UserID(suite.T()),
|
||||
restoreCfg: testdata.DefaultRestoreConfig(""),
|
||||
getSelector: func(t *testing.T, owners []string) selectors.Selector {
|
||||
rsel := selectors.NewExchangeRestore(owners)
|
||||
rsel.Include(rsel.AllData())
|
||||
|
||||
return rsel.Selector
|
||||
},
|
||||
setup: setupExchangeBackup,
|
||||
},
|
||||
{
|
||||
name: "SharePoint_Restore",
|
||||
owner: tconfig.M365SiteID(suite.T()),
|
||||
restoreCfg: control.DefaultRestoreConfig(dttm.SafeForTesting),
|
||||
getSelector: func(t *testing.T, owners []string) selectors.Selector {
|
||||
rsel := selectors.NewSharePointRestore(owners)
|
||||
rsel.Include(rsel.Library(tconfig.LibraryDocuments), rsel.Library(tconfig.LibraryMoreDocuments))
|
||||
|
||||
return rsel.Selector
|
||||
},
|
||||
setup: setupSharePointBackup,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tables {
|
||||
suite.Run(test.name, func() {
|
||||
var (
|
||||
t = suite.T()
|
||||
mb = evmock.NewBus()
|
||||
bup = test.setup(t, suite.kw, suite.sw, suite.acct, test.owner)
|
||||
)
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
require.NotZero(t, bup.items)
|
||||
require.NotEmpty(t, bup.backupID)
|
||||
|
||||
ro, err := NewRestoreOperation(
|
||||
ctx,
|
||||
control.Options{FailureHandling: control.FailFast},
|
||||
suite.kw,
|
||||
suite.sw,
|
||||
bup.ctrl,
|
||||
tconfig.NewM365Account(t),
|
||||
bup.backupID,
|
||||
test.getSelector(t, bup.selectorResourceOwners),
|
||||
test.restoreCfg,
|
||||
mb,
|
||||
count.New())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
ds, err := ro.Run(ctx)
|
||||
|
||||
require.NoError(t, err, "restoreOp.Run() %+v", clues.ToCore(err))
|
||||
require.NotEmpty(t, ro.Results, "restoreOp results")
|
||||
require.NotNil(t, ds, "restored details")
|
||||
assert.Equal(t, ro.Status, Completed, "restoreOp status")
|
||||
assert.Equal(t, ro.Results.ItemsWritten, len(ds.Items()), "item write count matches len details")
|
||||
assert.Less(t, 0, ro.Results.ItemsRead, "restore items read")
|
||||
assert.Less(t, int64(0), ro.Results.BytesRead, "bytes read")
|
||||
assert.Equal(t, 1, ro.Results.ResourceOwners, "resource Owners")
|
||||
assert.NoError(t, ro.Errors.Failure(), "non-recoverable error", clues.ToCore(ro.Errors.Failure()))
|
||||
assert.Empty(t, ro.Errors.Recovered(), "recoverable errors")
|
||||
assert.Equal(t, bup.items, ro.Results.ItemsWritten, "backup and restore wrote the same num of items")
|
||||
assert.Equal(t, 1, mb.TimesCalled[events.RestoreStart], "restore-start events")
|
||||
assert.Equal(t, 1, mb.TimesCalled[events.RestoreEnd], "restore-end events")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *RestoreOpIntegrationSuite) TestRestore_Run_errorNoBackup() {
|
||||
t := suite.T()
|
||||
|
||||
|
||||
@ -5,6 +5,9 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/google/uuid"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
@ -19,6 +22,8 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
deeTD "github.com/alcionai/corso/src/pkg/backup/details/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
ctrlTD "github.com/alcionai/corso/src/pkg/control/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/count"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
selTD "github.com/alcionai/corso/src/pkg/selectors/testdata"
|
||||
@ -196,7 +201,7 @@ func (suite *SharePointRestoreIntgSuite) SetupSuite() {
|
||||
}
|
||||
|
||||
func (suite *SharePointRestoreIntgSuite) TestRestore_Run_sharepointWithAdvancedOptions() {
|
||||
sel := selectors.NewSharePointBackup([]string{suite.its.userID})
|
||||
sel := selectors.NewSharePointBackup([]string{suite.its.siteID})
|
||||
sel.Include(selTD.SharePointBackupFolderScope(sel))
|
||||
sel.Filter(sel.Library("documents"))
|
||||
sel.DiscreteOwner = suite.its.siteID
|
||||
@ -209,3 +214,240 @@ func (suite *SharePointRestoreIntgSuite) TestRestore_Run_sharepointWithAdvancedO
|
||||
suite.its.siteDriveID,
|
||||
suite.its.siteDriveRootFolderID)
|
||||
}
|
||||
|
||||
func (suite *SharePointRestoreIntgSuite) TestRestore_Run_sharepointDeletedDrives() {
|
||||
t := suite.T()
|
||||
|
||||
// despite the client having a method for drive.Patch and drive.Delete, both only return
|
||||
// the error code and message `invalidRequest`.
|
||||
t.Skip("graph api doesn't allow patch or delete on drives, so we cannot run any conditions")
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
rc := ctrlTD.DefaultRestoreConfig("restore_deleted_drives")
|
||||
rc.OnCollision = control.Copy
|
||||
|
||||
// create a new drive
|
||||
md, err := suite.its.ac.Lists().PostDrive(ctx, suite.its.siteID, rc.Location)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
driveID := ptr.Val(md.GetId())
|
||||
|
||||
// get the root folder
|
||||
mdi, err := suite.its.ac.Drives().GetRootFolder(ctx, driveID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
rootFolderID := ptr.Val(mdi.GetId())
|
||||
|
||||
// add an item to it
|
||||
itemName := uuid.NewString()
|
||||
|
||||
item := models.NewDriveItem()
|
||||
item.SetName(ptr.To(itemName + ".txt"))
|
||||
|
||||
file := models.NewFile()
|
||||
item.SetFile(file)
|
||||
|
||||
_, err = suite.its.ac.Drives().PostItemInContainer(
|
||||
ctx,
|
||||
driveID,
|
||||
rootFolderID,
|
||||
item,
|
||||
control.Copy)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// run a backup
|
||||
var (
|
||||
mb = evmock.NewBus()
|
||||
opts = control.Defaults()
|
||||
graphClient = suite.its.ac.Stable.Client()
|
||||
)
|
||||
|
||||
bsel := selectors.NewSharePointBackup([]string{suite.its.siteID})
|
||||
bsel.Include(selTD.SharePointBackupFolderScope(bsel))
|
||||
bsel.Filter(bsel.Library(rc.Location))
|
||||
bsel.DiscreteOwner = suite.its.siteID
|
||||
|
||||
bo, bod := prepNewTestBackupOp(t, ctx, mb, bsel.Selector, opts, version.Backup)
|
||||
defer bod.close(t, ctx)
|
||||
|
||||
runAndCheckBackup(t, ctx, &bo, mb, false)
|
||||
|
||||
// test cases:
|
||||
|
||||
// first test, we take the current drive and rename it.
|
||||
// the restore should find the drive by id and restore items
|
||||
// into it like normal. Due to collision handling, this should
|
||||
// create a copy of the current item.
|
||||
suite.Run("renamed drive", func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
patchBody := models.NewDrive()
|
||||
patchBody.SetName(ptr.To("some other name"))
|
||||
|
||||
md, err = graphClient.
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Patch(ctx, patchBody, nil)
|
||||
require.NoError(t, err, clues.ToCore(graph.Stack(ctx, err)))
|
||||
|
||||
var (
|
||||
mb = evmock.NewBus()
|
||||
ctr = count.New()
|
||||
)
|
||||
|
||||
ro, _ := prepNewTestRestoreOp(
|
||||
t,
|
||||
ctx,
|
||||
bod.st,
|
||||
bo.Results.BackupID,
|
||||
mb,
|
||||
ctr,
|
||||
bod.sel,
|
||||
opts,
|
||||
rc)
|
||||
|
||||
runAndCheckRestore(t, ctx, &ro, mb, false)
|
||||
assert.Equal(t, 1, ctr.Get(count.NewItemCreated), "restored an item")
|
||||
|
||||
resp, err := graphClient.
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().
|
||||
ByDriveItemId(rootFolderID).
|
||||
Children().
|
||||
Get(ctx, nil)
|
||||
require.NoError(t, err, clues.ToCore(graph.Stack(ctx, err)))
|
||||
|
||||
items := resp.GetValue()
|
||||
assert.Len(t, items, 2)
|
||||
|
||||
for _, item := range items {
|
||||
assert.Contains(t, ptr.Val(item.GetName()), itemName)
|
||||
}
|
||||
})
|
||||
|
||||
// second test, we delete the drive altogether. the restore should find
|
||||
// no existing drives, but it should have the old drive's name and attempt
|
||||
// to recreate that drive by name.
|
||||
suite.Run("deleted drive", func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
err = graphClient.
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Delete(ctx, nil)
|
||||
require.NoError(t, err, clues.ToCore(graph.Stack(ctx, err)))
|
||||
|
||||
var (
|
||||
mb = evmock.NewBus()
|
||||
ctr = count.New()
|
||||
)
|
||||
|
||||
ro, _ := prepNewTestRestoreOp(
|
||||
t,
|
||||
ctx,
|
||||
bod.st,
|
||||
bo.Results.BackupID,
|
||||
mb,
|
||||
ctr,
|
||||
bod.sel,
|
||||
opts,
|
||||
rc)
|
||||
|
||||
runAndCheckRestore(t, ctx, &ro, mb, false)
|
||||
assert.Equal(t, 1, ctr.Get(count.NewItemCreated), "restored an item")
|
||||
|
||||
pgr := suite.its.ac.
|
||||
Drives().
|
||||
NewSiteDrivePager(suite.its.siteID, []string{"id", "name"})
|
||||
|
||||
drives, err := api.GetAllDrives(ctx, pgr, false, -1)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
var created models.Driveable
|
||||
|
||||
for _, drive := range drives {
|
||||
if ptr.Val(drive.GetName()) == ptr.Val(created.GetName()) &&
|
||||
ptr.Val(drive.GetId()) != driveID {
|
||||
created = drive
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
require.NotNil(t, created, "found the restored drive by name")
|
||||
md = created
|
||||
driveID = ptr.Val(md.GetId())
|
||||
|
||||
mdi, err := suite.its.ac.Drives().GetRootFolder(ctx, driveID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
rootFolderID = ptr.Val(mdi.GetId())
|
||||
|
||||
resp, err := graphClient.
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().
|
||||
ByDriveItemId(rootFolderID).
|
||||
Children().
|
||||
Get(ctx, nil)
|
||||
require.NoError(t, err, clues.ToCore(graph.Stack(ctx, err)))
|
||||
|
||||
items := resp.GetValue()
|
||||
assert.Len(t, items, 1)
|
||||
|
||||
assert.Equal(t, ptr.Val(items[0].GetName()), itemName+".txt")
|
||||
})
|
||||
|
||||
// final test, run a follow-up restore. This should match the
|
||||
// drive we created in the prior test by name, but not by ID.
|
||||
suite.Run("different drive - same name", func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
var (
|
||||
mb = evmock.NewBus()
|
||||
ctr = count.New()
|
||||
)
|
||||
|
||||
ro, _ := prepNewTestRestoreOp(
|
||||
t,
|
||||
ctx,
|
||||
bod.st,
|
||||
bo.Results.BackupID,
|
||||
mb,
|
||||
ctr,
|
||||
bod.sel,
|
||||
opts,
|
||||
rc)
|
||||
|
||||
runAndCheckRestore(t, ctx, &ro, mb, false)
|
||||
|
||||
assert.Equal(t, 1, ctr.Get(count.NewItemCreated), "restored an item")
|
||||
|
||||
resp, err := graphClient.
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().
|
||||
ByDriveItemId(rootFolderID).
|
||||
Children().
|
||||
Get(ctx, nil)
|
||||
require.NoError(t, err, clues.ToCore(graph.Stack(ctx, err)))
|
||||
|
||||
items := resp.GetValue()
|
||||
assert.Len(t, items, 2)
|
||||
|
||||
for _, item := range items {
|
||||
assert.Contains(t, ptr.Val(item.GetName()), itemName)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@ -52,8 +52,9 @@ type RestoreConfig struct {
|
||||
// Defaults to "Corso_Restore_<current_dttm>"
|
||||
Location string
|
||||
|
||||
// Drive specifies the drive into which the data will be restored.
|
||||
// If empty, data is restored to the same drive that was backed up.
|
||||
// Drive specifies the name of the drive into which the data will be
|
||||
// restored. If empty, data is restored to the same drive that was backed
|
||||
// up.
|
||||
// Defaults to empty.
|
||||
Drive string
|
||||
}
|
||||
@ -65,6 +66,10 @@ func DefaultRestoreConfig(timeFormat dttm.TimeFormat) RestoreConfig {
|
||||
}
|
||||
}
|
||||
|
||||
func DefaultRestoreContainerName(timeFormat dttm.TimeFormat) string {
|
||||
return defaultRestoreLocation + dttm.FormatNow(timeFormat)
|
||||
}
|
||||
|
||||
// EnsureRestoreConfigDefaults sets all non-supported values in the config
|
||||
// struct to the default value.
|
||||
func EnsureRestoreConfigDefaults(
|
||||
|
||||
64
src/pkg/services/m365/api/lists.go
Normal file
64
src/pkg/services/m365/api/lists.go
Normal file
@ -0,0 +1,64 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// controller
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func (c Client) Lists() Lists {
|
||||
return Lists{c}
|
||||
}
|
||||
|
||||
// Lists is an interface-compliant provider of the client.
|
||||
type Lists struct {
|
||||
Client
|
||||
}
|
||||
|
||||
// PostDrive creates a new list of type drive. Specifically used to create
|
||||
// documentLibraries for SharePoint Sites.
|
||||
func (c Lists) PostDrive(
|
||||
ctx context.Context,
|
||||
siteID, driveName string,
|
||||
) (models.Driveable, error) {
|
||||
list := models.NewList()
|
||||
list.SetDisplayName(&driveName)
|
||||
list.SetDescription(ptr.To("corso auto-generated restore destination"))
|
||||
|
||||
li := models.NewListInfo()
|
||||
li.SetTemplate(ptr.To("documentLibrary"))
|
||||
list.SetList(li)
|
||||
|
||||
// creating a list of type documentLibrary will result in the creation
|
||||
// of a new drive owned by the given site.
|
||||
builder := c.Stable.
|
||||
Client().
|
||||
Sites().
|
||||
BySiteId(siteID).
|
||||
Lists()
|
||||
|
||||
newList, err := builder.Post(ctx, list, nil)
|
||||
if graph.IsErrItemAlreadyExistsConflict(err) {
|
||||
return nil, clues.Stack(graph.ErrItemAlreadyExistsConflict, err).WithClues(ctx)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, graph.Wrap(ctx, err, "creating documentLibrary list")
|
||||
}
|
||||
|
||||
// drive information is not returned by the list creation.
|
||||
drive, err := builder.
|
||||
ByListId(ptr.Val(newList.GetId())).
|
||||
Drive().
|
||||
Get(ctx, nil)
|
||||
|
||||
return drive, graph.Wrap(ctx, err, "fetching created documentLibrary").OrNil()
|
||||
}
|
||||
57
src/pkg/services/m365/api/lists_test.go
Normal file
57
src/pkg/services/m365/api/lists_test.go
Normal file
@ -0,0 +1,57 @@
|
||||
package api_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||
"github.com/alcionai/corso/src/pkg/control/testdata"
|
||||
)
|
||||
|
||||
type ListsAPIIntgSuite struct {
|
||||
tester.Suite
|
||||
its intgTesterSetup
|
||||
}
|
||||
|
||||
func (suite *ListsAPIIntgSuite) SetupSuite() {
|
||||
suite.its = newIntegrationTesterSetup(suite.T())
|
||||
}
|
||||
|
||||
func TestListsAPIIntgSuite(t *testing.T) {
|
||||
suite.Run(t, &ListsAPIIntgSuite{
|
||||
Suite: tester.NewIntegrationSuite(
|
||||
t,
|
||||
[][]string{tconfig.M365AcctCredEnvs}),
|
||||
})
|
||||
}
|
||||
|
||||
func (suite *ListsAPIIntgSuite) TestLists_PostDrive() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
var (
|
||||
acl = suite.its.ac.Lists()
|
||||
driveName = testdata.DefaultRestoreConfig("list_api_post_drive").Location
|
||||
siteID = suite.its.siteID
|
||||
)
|
||||
|
||||
// first post, should have no errors
|
||||
list, err := acl.PostDrive(ctx, siteID, driveName)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
// the site name cannot be set when posting, only its DisplayName.
|
||||
// so we double check here that we're still getting the name we expect.
|
||||
assert.Equal(t, driveName, ptr.Val(list.GetName()))
|
||||
|
||||
// second post, same name, should error on name conflict]
|
||||
_, err = acl.PostDrive(ctx, siteID, driveName)
|
||||
require.ErrorIs(t, err, graph.ErrItemAlreadyExistsConflict, clues.ToCore(err))
|
||||
}
|
||||
@ -16,8 +16,6 @@ Below is a list of known Corso issues and limitations:
|
||||
from M365 while a backup creation is running.
|
||||
The next backup creation will correct any missing data.
|
||||
|
||||
* SharePoint document library data can't be restored after the library has been deleted.
|
||||
|
||||
* Sharing information of items in OneDrive/SharePoint using sharing links aren't backed up and restored.
|
||||
|
||||
* Permissions/Access given to a site group can't be restored.
|
||||
|
||||
30
website/package-lock.json
generated
30
website/package-lock.json
generated
@ -16,7 +16,7 @@
|
||||
"animate.css": "^4.1.1",
|
||||
"clsx": "^2.0.0",
|
||||
"docusaurus-plugin-image-zoom": "^1.0.1",
|
||||
"docusaurus-plugin-sass": "^0.2.4",
|
||||
"docusaurus-plugin-sass": "^0.2.5",
|
||||
"feather-icons": "^4.29.0",
|
||||
"jarallax": "^2.1.3",
|
||||
"mdx-mermaid": "^1.3.2",
|
||||
@ -24,7 +24,7 @@
|
||||
"prism-react-renderer": "^1.3.5",
|
||||
"react": "^17.0.2",
|
||||
"react-dom": "^17.0.2",
|
||||
"sass": "^1.63.6",
|
||||
"sass": "^1.64.0",
|
||||
"tiny-slider": "^2.9.4",
|
||||
"tw-elements": "^1.0.0-alpha13",
|
||||
"wow.js": "^1.2.2"
|
||||
@ -6541,14 +6541,14 @@
|
||||
}
|
||||
},
|
||||
"node_modules/docusaurus-plugin-sass": {
|
||||
"version": "0.2.4",
|
||||
"resolved": "https://registry.npmjs.org/docusaurus-plugin-sass/-/docusaurus-plugin-sass-0.2.4.tgz",
|
||||
"integrity": "sha512-r9bLXW6X2z64bzQUQZB1SxmNlGvSO9swTFALgiMjr/1O4FRDti6BseU4Sw2mlZkYvVQTq8cJMJIP6w7z/5We8Q==",
|
||||
"version": "0.2.5",
|
||||
"resolved": "https://registry.npmjs.org/docusaurus-plugin-sass/-/docusaurus-plugin-sass-0.2.5.tgz",
|
||||
"integrity": "sha512-Z+D0fLFUKcFpM+bqSUmqKIU+vO+YF1xoEQh5hoFreg2eMf722+siwXDD+sqtwU8E4MvVpuvsQfaHwODNlxJAEg==",
|
||||
"dependencies": {
|
||||
"sass-loader": "^10.1.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@docusaurus/core": "^2.0.0-beta",
|
||||
"@docusaurus/core": "^2.0.0-beta || ^3.0.0-alpha",
|
||||
"sass": "^1.30.0"
|
||||
}
|
||||
},
|
||||
@ -12571,9 +12571,9 @@
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/sass": {
|
||||
"version": "1.63.6",
|
||||
"resolved": "https://registry.npmjs.org/sass/-/sass-1.63.6.tgz",
|
||||
"integrity": "sha512-MJuxGMHzaOW7ipp+1KdELtqKbfAWbH7OLIdoSMnVe3EXPMTmxTmlaZDCTsgIpPCs3w99lLo9/zDKkOrJuT5byw==",
|
||||
"version": "1.64.0",
|
||||
"resolved": "https://registry.npmjs.org/sass/-/sass-1.64.0.tgz",
|
||||
"integrity": "sha512-m7YtAGmQta9uANIUJwXesAJMSncqH+3INc8kdVXs6eV6GUC8Qu2IYKQSN8PRLgiQfpca697G94klm2leYMxSHw==",
|
||||
"dependencies": {
|
||||
"chokidar": ">=3.0.0 <4.0.0",
|
||||
"immutable": "^4.0.0",
|
||||
@ -19867,9 +19867,9 @@
|
||||
}
|
||||
},
|
||||
"docusaurus-plugin-sass": {
|
||||
"version": "0.2.4",
|
||||
"resolved": "https://registry.npmjs.org/docusaurus-plugin-sass/-/docusaurus-plugin-sass-0.2.4.tgz",
|
||||
"integrity": "sha512-r9bLXW6X2z64bzQUQZB1SxmNlGvSO9swTFALgiMjr/1O4FRDti6BseU4Sw2mlZkYvVQTq8cJMJIP6w7z/5We8Q==",
|
||||
"version": "0.2.5",
|
||||
"resolved": "https://registry.npmjs.org/docusaurus-plugin-sass/-/docusaurus-plugin-sass-0.2.5.tgz",
|
||||
"integrity": "sha512-Z+D0fLFUKcFpM+bqSUmqKIU+vO+YF1xoEQh5hoFreg2eMf722+siwXDD+sqtwU8E4MvVpuvsQfaHwODNlxJAEg==",
|
||||
"requires": {
|
||||
"sass-loader": "^10.1.1"
|
||||
}
|
||||
@ -23802,9 +23802,9 @@
|
||||
"integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
|
||||
},
|
||||
"sass": {
|
||||
"version": "1.63.6",
|
||||
"resolved": "https://registry.npmjs.org/sass/-/sass-1.63.6.tgz",
|
||||
"integrity": "sha512-MJuxGMHzaOW7ipp+1KdELtqKbfAWbH7OLIdoSMnVe3EXPMTmxTmlaZDCTsgIpPCs3w99lLo9/zDKkOrJuT5byw==",
|
||||
"version": "1.64.0",
|
||||
"resolved": "https://registry.npmjs.org/sass/-/sass-1.64.0.tgz",
|
||||
"integrity": "sha512-m7YtAGmQta9uANIUJwXesAJMSncqH+3INc8kdVXs6eV6GUC8Qu2IYKQSN8PRLgiQfpca697G94klm2leYMxSHw==",
|
||||
"requires": {
|
||||
"chokidar": ">=3.0.0 <4.0.0",
|
||||
"immutable": "^4.0.0",
|
||||
|
||||
@ -22,7 +22,7 @@
|
||||
"animate.css": "^4.1.1",
|
||||
"clsx": "^2.0.0",
|
||||
"docusaurus-plugin-image-zoom": "^1.0.1",
|
||||
"docusaurus-plugin-sass": "^0.2.4",
|
||||
"docusaurus-plugin-sass": "^0.2.5",
|
||||
"feather-icons": "^4.29.0",
|
||||
"jarallax": "^2.1.3",
|
||||
"mdx-mermaid": "^1.3.2",
|
||||
@ -30,7 +30,7 @@
|
||||
"prism-react-renderer": "^1.3.5",
|
||||
"react": "^17.0.2",
|
||||
"react-dom": "^17.0.2",
|
||||
"sass": "^1.63.6",
|
||||
"sass": "^1.64.0",
|
||||
"tiny-slider": "^2.9.4",
|
||||
"tw-elements": "^1.0.0-alpha13",
|
||||
"wow.js": "^1.2.2"
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user