merge main
This commit is contained in:
commit
0c69289b41
22
.github/workflows/auto-merge.yml
vendored
22
.github/workflows/auto-merge.yml
vendored
@ -1,3 +1,4 @@
|
||||
# See https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#common-dependabot-automations
|
||||
name: auto-merge
|
||||
|
||||
on:
|
||||
@ -5,11 +6,24 @@ on:
|
||||
paths-ignore:
|
||||
- "src/**" # prevent auto-merge for go dependencies
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
auto-merge:
|
||||
auto-approve-label:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.actor == 'dependabot[bot]' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: ahmadnassri/action-dependabot-auto-merge@v2 # https://github.com/marketplace/actions/dependabot-auto-merge
|
||||
- name: Dependabot metadata
|
||||
id: metadata
|
||||
uses: dependabot/fetch-metadata@v1
|
||||
with:
|
||||
github-token: ${{ secrets.DEPENDABOT_TOKEN }}
|
||||
github-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
- name: Enable auto-merge for Dependabot PRs
|
||||
if: ${{steps.metadata.outputs.update-type == 'version-update:semver-minor'}}
|
||||
run: |
|
||||
gh pr edit "$PR_URL" --add-label "mergequeue"
|
||||
gh pr review --approve "$PR_URL"
|
||||
env:
|
||||
PR_URL: ${{github.event.pull_request.html_url}}
|
||||
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
||||
|
||||
@ -18,6 +18,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
- Fix Exchange folder cache population error when parent folder isn't found.
|
||||
- Fix Exchange backup issue caused by incorrect json serialization
|
||||
- Fix issues with details model containing duplicate entry for api consumers
|
||||
- Handle OLE conversion errors when trying to fetch attachments
|
||||
|
||||
### Changed
|
||||
- Do not display all the items that we restored at the end if there are more than 15. You can override this with `--verbose`.
|
||||
|
||||
@ -118,7 +118,7 @@ issues:
|
||||
linters:
|
||||
- forbidigo
|
||||
text: "context.(Background|TODO)"
|
||||
- path: internal/connector/graph/betasdk
|
||||
- path: internal/m365/graph/betasdk
|
||||
linters:
|
||||
- wsl
|
||||
- revive
|
||||
|
||||
@ -12,8 +12,8 @@ import (
|
||||
. "github.com/alcionai/corso/src/cli/print"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/pkg/backup"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
@ -279,7 +279,7 @@ func genericDeleteCommand(cmd *cobra.Command, bID, designation string, args []st
|
||||
|
||||
ctx := clues.Add(cmd.Context(), "delete_backup_id", bID)
|
||||
|
||||
r, _, err := utils.GetAccountAndConnect(ctx)
|
||||
r, _, _, err := utils.GetAccountAndConnect(ctx)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
@ -300,7 +300,7 @@ func genericDeleteCommand(cmd *cobra.Command, bID, designation string, args []st
|
||||
func genericListCommand(cmd *cobra.Command, bID string, service path.ServiceType, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
||||
r, _, err := utils.GetAccountAndConnect(ctx)
|
||||
r, _, _, err := utils.GetAccountAndConnect(ctx)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
|
||||
@ -153,7 +153,7 @@ func createExchangeCmd(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
r, acct, err := utils.GetAccountAndConnect(ctx)
|
||||
r, acct, err := utils.AccountConnectAndWriteRepoConfig(ctx)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
@ -262,7 +262,7 @@ func detailsExchangeCmd(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
opts := utils.MakeExchangeOpts(cmd)
|
||||
|
||||
r, _, err := utils.GetAccountAndConnect(ctx)
|
||||
r, _, _, err := utils.GetAccountAndConnect(ctx)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
|
||||
@ -19,7 +19,7 @@ import (
|
||||
"github.com/alcionai/corso/src/cli/print"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/internal/connector/exchange"
|
||||
"github.com/alcionai/corso/src/internal/m365/exchange"
|
||||
"github.com/alcionai/corso/src/internal/operations"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
|
||||
@ -134,7 +134,7 @@ func createOneDriveCmd(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
r, acct, err := utils.GetAccountAndConnect(ctx)
|
||||
r, acct, err := utils.AccountConnectAndWriteRepoConfig(ctx)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
@ -220,7 +220,7 @@ func detailsOneDriveCmd(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
opts := utils.MakeOneDriveOpts(cmd)
|
||||
|
||||
r, _, err := utils.GetAccountAndConnect(ctx)
|
||||
r, _, _, err := utils.GetAccountAndConnect(ctx)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
|
||||
@ -150,7 +150,7 @@ func createSharePointCmd(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
r, acct, err := utils.GetAccountAndConnect(ctx)
|
||||
r, acct, err := utils.AccountConnectAndWriteRepoConfig(ctx)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
@ -312,7 +312,7 @@ func detailsSharePointCmd(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
opts := utils.MakeSharePointOpts(cmd)
|
||||
|
||||
r, _, err := utils.GetAccountAndConnect(ctx)
|
||||
r, _, _, err := utils.GetAccountAndConnect(ctx)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
|
||||
@ -121,7 +121,7 @@ func handleMaintenanceCmd(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
r, _, err := utils.GetAccountAndConnect(ctx)
|
||||
r, _, _, err := utils.GetAccountAndConnect(ctx)
|
||||
if err != nil {
|
||||
return print.Only(ctx, err)
|
||||
}
|
||||
|
||||
@ -13,6 +13,7 @@ import (
|
||||
"github.com/alcionai/corso/src/cli/options"
|
||||
. "github.com/alcionai/corso/src/cli/print"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/events"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/repository"
|
||||
"github.com/alcionai/corso/src/pkg/storage"
|
||||
@ -193,7 +194,7 @@ func connectS3Cmd(cmd *cobra.Command, args []string) error {
|
||||
|
||||
repoID := cfg.RepoID
|
||||
if len(repoID) == 0 {
|
||||
repoID = "not_found"
|
||||
repoID = events.RepoIDNotFound
|
||||
}
|
||||
|
||||
s3Cfg, err := cfg.Storage.S3Config()
|
||||
|
||||
@ -89,20 +89,20 @@ func restoreExchangeCmd(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
r, _, err := utils.GetAccountAndConnect(ctx)
|
||||
r, _, _, err := utils.GetAccountAndConnect(ctx)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
|
||||
defer utils.CloseRepo(ctx, r)
|
||||
|
||||
dest := control.DefaultRestoreDestination(dttm.HumanReadable)
|
||||
Infof(ctx, "Restoring to folder %s", dest.ContainerName)
|
||||
restoreCfg := control.DefaultRestoreConfig(dttm.HumanReadable)
|
||||
Infof(ctx, "Restoring to folder %s", restoreCfg.Location)
|
||||
|
||||
sel := utils.IncludeExchangeRestoreDataSelectors(opts)
|
||||
utils.FilterExchangeRestoreInfoSelectors(sel, opts)
|
||||
|
||||
ro, err := r.NewRestore(ctx, utils.BackupIDFV, sel.Selector, dest)
|
||||
ro, err := r.NewRestore(ctx, utils.BackupIDFV, sel.Selector, restoreCfg)
|
||||
if err != nil {
|
||||
return Only(ctx, clues.Wrap(err, "Failed to initialize Exchange restore"))
|
||||
}
|
||||
|
||||
@ -14,7 +14,7 @@ import (
|
||||
"github.com/alcionai/corso/src/cli/config"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/internal/connector/exchange"
|
||||
"github.com/alcionai/corso/src/internal/m365/exchange"
|
||||
"github.com/alcionai/corso/src/internal/operations"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
|
||||
@ -90,20 +90,20 @@ func restoreOneDriveCmd(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
r, _, err := utils.GetAccountAndConnect(ctx)
|
||||
r, _, _, err := utils.GetAccountAndConnect(ctx)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
|
||||
defer utils.CloseRepo(ctx, r)
|
||||
|
||||
dest := control.DefaultRestoreDestination(dttm.HumanReadableDriveItem)
|
||||
Infof(ctx, "Restoring to folder %s", dest.ContainerName)
|
||||
restoreCfg := control.DefaultRestoreConfig(dttm.HumanReadableDriveItem)
|
||||
Infof(ctx, "Restoring to folder %s", restoreCfg.Location)
|
||||
|
||||
sel := utils.IncludeOneDriveRestoreDataSelectors(opts)
|
||||
utils.FilterOneDriveRestoreInfoSelectors(sel, opts)
|
||||
|
||||
ro, err := r.NewRestore(ctx, utils.BackupIDFV, sel.Selector, dest)
|
||||
ro, err := r.NewRestore(ctx, utils.BackupIDFV, sel.Selector, restoreCfg)
|
||||
if err != nil {
|
||||
return Only(ctx, clues.Wrap(err, "Failed to initialize OneDrive restore"))
|
||||
}
|
||||
|
||||
@ -95,20 +95,20 @@ func restoreSharePointCmd(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
r, _, err := utils.GetAccountAndConnect(ctx)
|
||||
r, _, _, err := utils.GetAccountAndConnect(ctx)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
|
||||
defer utils.CloseRepo(ctx, r)
|
||||
|
||||
dest := control.DefaultRestoreDestination(dttm.HumanReadableDriveItem)
|
||||
Infof(ctx, "Restoring to folder %s", dest.ContainerName)
|
||||
restoreCfg := control.DefaultRestoreConfig(dttm.HumanReadableDriveItem)
|
||||
Infof(ctx, "Restoring to folder %s", restoreCfg.Location)
|
||||
|
||||
sel := utils.IncludeSharePointRestoreDataSelectors(ctx, opts)
|
||||
utils.FilterSharePointRestoreInfoSelectors(sel, opts)
|
||||
|
||||
ro, err := r.NewRestore(ctx, utils.BackupIDFV, sel.Selector, dest)
|
||||
ro, err := r.NewRestore(ctx, utils.BackupIDFV, sel.Selector, restoreCfg)
|
||||
if err != nil {
|
||||
return Only(ctx, clues.Wrap(err, "Failed to initialize SharePoint restore"))
|
||||
}
|
||||
|
||||
@ -24,23 +24,53 @@ const (
|
||||
Wildcard = "*"
|
||||
)
|
||||
|
||||
func GetAccountAndConnect(ctx context.Context) (repository.Repository, *account.Account, error) {
|
||||
func GetAccountAndConnect(ctx context.Context) (repository.Repository, *storage.Storage, *account.Account, error) {
|
||||
cfg, err := config.GetConfigRepoDetails(ctx, true, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
repoID := cfg.RepoID
|
||||
if len(repoID) == 0 {
|
||||
repoID = "not_found"
|
||||
repoID = events.RepoIDNotFound
|
||||
}
|
||||
|
||||
r, err := repository.Connect(ctx, cfg.Account, cfg.Storage, repoID, options.Control())
|
||||
if err != nil {
|
||||
return nil, nil, clues.Wrap(err, "Failed to connect to the "+cfg.Storage.Provider.String()+" repository")
|
||||
return nil, nil, nil, clues.Wrap(err, "connecting to the "+cfg.Storage.Provider.String()+" repository")
|
||||
}
|
||||
|
||||
return r, &cfg.Account, nil
|
||||
return r, &cfg.Storage, &cfg.Account, nil
|
||||
}
|
||||
|
||||
func AccountConnectAndWriteRepoConfig(ctx context.Context) (repository.Repository, *account.Account, error) {
|
||||
r, stg, acc, err := GetAccountAndConnect(ctx)
|
||||
if err != nil {
|
||||
logger.CtxErr(ctx, err).Info("getting and connecting account")
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
s3Config, err := stg.S3Config()
|
||||
if err != nil {
|
||||
logger.CtxErr(ctx, err).Info("getting storage configuration")
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
m365Config, err := acc.M365Config()
|
||||
if err != nil {
|
||||
logger.CtxErr(ctx, err).Info("getting m365 configuration")
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// repo config is already set while repo connect and init. This is just to confirm correct values.
|
||||
// So won't fail is the write fails
|
||||
err = config.WriteRepoConfig(ctx, s3Config, m365Config, r.GetID())
|
||||
if err != nil {
|
||||
logger.CtxErr(ctx, err).Info("writing to repository configuration")
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return r, acc, nil
|
||||
}
|
||||
|
||||
// CloseRepo handles closing a repo.
|
||||
|
||||
@ -15,14 +15,18 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/common/str"
|
||||
"github.com/alcionai/corso/src/internal/connector"
|
||||
exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/m365"
|
||||
exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock"
|
||||
odStub "github.com/alcionai/corso/src/internal/m365/onedrive/stub"
|
||||
"github.com/alcionai/corso/src/internal/m365/resource"
|
||||
m365Stub "github.com/alcionai/corso/src/internal/m365/stub"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/control/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/credentials"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
@ -50,7 +54,7 @@ type dataBuilderFunc func(id, now, subject, body string) []byte
|
||||
|
||||
func generateAndRestoreItems(
|
||||
ctx context.Context,
|
||||
gc *connector.GraphConnector,
|
||||
ctrl *m365.Controller,
|
||||
service path.ServiceType,
|
||||
cat path.CategoryType,
|
||||
sel selectors.Selector,
|
||||
@ -83,14 +87,14 @@ func generateAndRestoreItems(
|
||||
items: items,
|
||||
}}
|
||||
|
||||
dest := control.DefaultRestoreDestination(dttm.SafeForTesting)
|
||||
dest.ContainerName = destFldr
|
||||
print.Infof(ctx, "Restoring to folder %s", dest.ContainerName)
|
||||
restoreCfg := control.DefaultRestoreConfig(dttm.SafeForTesting)
|
||||
restoreCfg.Location = destFldr
|
||||
print.Infof(ctx, "Restoring to folder %s", restoreCfg.Location)
|
||||
|
||||
dataColls, err := buildCollections(
|
||||
service,
|
||||
tenantID, userID,
|
||||
dest,
|
||||
restoreCfg,
|
||||
collections)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -98,19 +102,19 @@ func generateAndRestoreItems(
|
||||
|
||||
print.Infof(ctx, "Generating %d %s items in %s\n", howMany, cat, Destination)
|
||||
|
||||
return gc.ConsumeRestoreCollections(ctx, version.Backup, sel, dest, opts, dataColls, errs)
|
||||
return ctrl.ConsumeRestoreCollections(ctx, version.Backup, sel, restoreCfg, opts, dataColls, errs)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------
|
||||
// Common Helpers
|
||||
// ------------------------------------------------------------------------------------------
|
||||
|
||||
func getGCAndVerifyResourceOwner(
|
||||
func getControllerAndVerifyResourceOwner(
|
||||
ctx context.Context,
|
||||
resource connector.Resource,
|
||||
resourceCat resource.Category,
|
||||
resourceOwner string,
|
||||
) (
|
||||
*connector.GraphConnector,
|
||||
*m365.Controller,
|
||||
account.Account,
|
||||
idname.Provider,
|
||||
error,
|
||||
@ -132,17 +136,17 @@ func getGCAndVerifyResourceOwner(
|
||||
return nil, account.Account{}, nil, clues.Wrap(err, "finding m365 account details")
|
||||
}
|
||||
|
||||
gc, err := connector.NewGraphConnector(ctx, acct, resource)
|
||||
ctrl, err := m365.NewController(ctx, acct, resourceCat)
|
||||
if err != nil {
|
||||
return nil, account.Account{}, nil, clues.Wrap(err, "connecting to graph api")
|
||||
}
|
||||
|
||||
id, _, err := gc.PopulateOwnerIDAndNamesFrom(ctx, resourceOwner, nil)
|
||||
id, _, err := ctrl.PopulateOwnerIDAndNamesFrom(ctx, resourceOwner, nil)
|
||||
if err != nil {
|
||||
return nil, account.Account{}, nil, clues.Wrap(err, "verifying user")
|
||||
}
|
||||
|
||||
return gc, acct, gc.IDNameLookup.ProviderForID(id), nil
|
||||
return ctrl, acct, ctrl.IDNameLookup.ProviderForID(id), nil
|
||||
}
|
||||
|
||||
type item struct {
|
||||
@ -163,7 +167,7 @@ type collection struct {
|
||||
func buildCollections(
|
||||
service path.ServiceType,
|
||||
tenant, user string,
|
||||
dest control.RestoreDestination,
|
||||
restoreCfg control.RestoreConfig,
|
||||
colls []collection,
|
||||
) ([]data.RestoreCollection, error) {
|
||||
collections := make([]data.RestoreCollection, 0, len(colls))
|
||||
@ -208,7 +212,7 @@ var (
|
||||
)
|
||||
|
||||
func generateAndRestoreDriveItems(
|
||||
gc *connector.GraphConnector,
|
||||
ctrl *m365.Controller,
|
||||
resourceOwner, secondaryUserID, secondaryUserName string,
|
||||
acct account.Account,
|
||||
service path.ServiceType,
|
||||
@ -224,22 +228,22 @@ func generateAndRestoreDriveItems(
|
||||
ctx, flush := tester.NewContext(nil)
|
||||
defer flush()
|
||||
|
||||
dest := control.DefaultRestoreDestination(dttm.SafeForTesting)
|
||||
dest.ContainerName = destFldr
|
||||
print.Infof(ctx, "Restoring to folder %s", dest.ContainerName)
|
||||
restoreCfg := control.DefaultRestoreConfig(dttm.SafeForTesting)
|
||||
restoreCfg.Location = destFldr
|
||||
print.Infof(ctx, "Restoring to folder %s", restoreCfg.Location)
|
||||
|
||||
var driveID string
|
||||
|
||||
switch service {
|
||||
case path.SharePointService:
|
||||
d, err := gc.AC.Stable.Client().Sites().BySiteId(resourceOwner).Drive().Get(ctx, nil)
|
||||
d, err := ctrl.AC.Stable.Client().Sites().BySiteId(resourceOwner).Drive().Get(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "getting site's default drive")
|
||||
}
|
||||
|
||||
driveID = ptr.Val(d.GetId())
|
||||
default:
|
||||
d, err := gc.AC.Stable.Client().Users().ByUserId(resourceOwner).Drive().Get(ctx, nil)
|
||||
d, err := ctrl.AC.Stable.Client().Users().ByUserId(resourceOwner).Drive().Get(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "getting user's default drive")
|
||||
}
|
||||
@ -248,7 +252,7 @@ func generateAndRestoreDriveItems(
|
||||
}
|
||||
|
||||
var (
|
||||
cols []connector.OnedriveColInfo
|
||||
cols []odStub.ColInfo
|
||||
|
||||
rootPath = []string{"drives", driveID, "root:"}
|
||||
folderAPath = []string{"drives", driveID, "root:", folderAName}
|
||||
@ -262,15 +266,15 @@ func generateAndRestoreDriveItems(
|
||||
)
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
col := []connector.OnedriveColInfo{
|
||||
col := []odStub.ColInfo{
|
||||
// basic folder and file creation
|
||||
{
|
||||
PathElements: rootPath,
|
||||
Files: []connector.ItemData{
|
||||
Files: []odStub.ItemData{
|
||||
{
|
||||
Name: fmt.Sprintf("file-1st-count-%d-at-%s", i, currentTime),
|
||||
Data: fileAData,
|
||||
Perms: connector.PermData{
|
||||
Perms: odStub.PermData{
|
||||
User: secondaryUserName,
|
||||
EntityID: secondaryUserID,
|
||||
Roles: writePerm,
|
||||
@ -281,13 +285,13 @@ func generateAndRestoreDriveItems(
|
||||
Data: fileBData,
|
||||
},
|
||||
},
|
||||
Folders: []connector.ItemData{
|
||||
Folders: []odStub.ItemData{
|
||||
{
|
||||
Name: folderBName,
|
||||
},
|
||||
{
|
||||
Name: folderAName,
|
||||
Perms: connector.PermData{
|
||||
Perms: odStub.PermData{
|
||||
User: secondaryUserName,
|
||||
EntityID: secondaryUserID,
|
||||
Roles: readPerm,
|
||||
@ -295,7 +299,7 @@ func generateAndRestoreDriveItems(
|
||||
},
|
||||
{
|
||||
Name: folderCName,
|
||||
Perms: connector.PermData{
|
||||
Perms: odStub.PermData{
|
||||
User: secondaryUserName,
|
||||
EntityID: secondaryUserID,
|
||||
Roles: readPerm,
|
||||
@ -307,18 +311,18 @@ func generateAndRestoreDriveItems(
|
||||
// a folder that has permissions with an item in the folder with
|
||||
// the different permissions.
|
||||
PathElements: folderAPath,
|
||||
Files: []connector.ItemData{
|
||||
Files: []odStub.ItemData{
|
||||
{
|
||||
Name: fmt.Sprintf("file-count-%d-at-%s", i, currentTime),
|
||||
Data: fileEData,
|
||||
Perms: connector.PermData{
|
||||
Perms: odStub.PermData{
|
||||
User: secondaryUserName,
|
||||
EntityID: secondaryUserID,
|
||||
Roles: writePerm,
|
||||
},
|
||||
},
|
||||
},
|
||||
Perms: connector.PermData{
|
||||
Perms: odStub.PermData{
|
||||
User: secondaryUserName,
|
||||
EntityID: secondaryUserID,
|
||||
Roles: readPerm,
|
||||
@ -328,13 +332,13 @@ func generateAndRestoreDriveItems(
|
||||
// a folder that has permissions with an item in the folder with
|
||||
// no permissions.
|
||||
PathElements: folderCPath,
|
||||
Files: []connector.ItemData{
|
||||
Files: []odStub.ItemData{
|
||||
{
|
||||
Name: fmt.Sprintf("file-count-%d-at-%s", i, currentTime),
|
||||
Data: fileAData,
|
||||
},
|
||||
},
|
||||
Perms: connector.PermData{
|
||||
Perms: odStub.PermData{
|
||||
User: secondaryUserName,
|
||||
EntityID: secondaryUserID,
|
||||
Roles: readPerm,
|
||||
@ -342,23 +346,23 @@ func generateAndRestoreDriveItems(
|
||||
},
|
||||
{
|
||||
PathElements: folderBPath,
|
||||
Files: []connector.ItemData{
|
||||
Files: []odStub.ItemData{
|
||||
{
|
||||
// restoring a file in a non-root folder that doesn't inherit
|
||||
// permissions.
|
||||
Name: fmt.Sprintf("file-count-%d-at-%s", i, currentTime),
|
||||
Data: fileBData,
|
||||
Perms: connector.PermData{
|
||||
Perms: odStub.PermData{
|
||||
User: secondaryUserName,
|
||||
EntityID: secondaryUserID,
|
||||
Roles: writePerm,
|
||||
},
|
||||
},
|
||||
},
|
||||
Folders: []connector.ItemData{
|
||||
Folders: []odStub.ItemData{
|
||||
{
|
||||
Name: folderAName,
|
||||
Perms: connector.PermData{
|
||||
Perms: odStub.PermData{
|
||||
User: secondaryUserName,
|
||||
EntityID: secondaryUserID,
|
||||
Roles: readPerm,
|
||||
@ -371,7 +375,7 @@ func generateAndRestoreDriveItems(
|
||||
cols = append(cols, col...)
|
||||
}
|
||||
|
||||
input, err := connector.DataForInfo(service, cols, version.Backup)
|
||||
input, err := odStub.DataForInfo(service, cols, version.Backup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -388,16 +392,16 @@ func generateAndRestoreDriveItems(
|
||||
ToggleFeatures: control.Toggles{},
|
||||
}
|
||||
|
||||
config := connector.ConfigInfo{
|
||||
config := m365Stub.ConfigInfo{
|
||||
Opts: opts,
|
||||
Resource: connector.Users,
|
||||
Resource: resource.Users,
|
||||
Service: service,
|
||||
Tenant: tenantID,
|
||||
ResourceOwners: []string{resourceOwner},
|
||||
Dest: tester.DefaultTestRestoreDestination(""),
|
||||
RestoreCfg: testdata.DefaultRestoreConfig(""),
|
||||
}
|
||||
|
||||
_, _, collections, _, err := connector.GetCollectionsAndExpected(
|
||||
_, _, collections, _, err := m365Stub.GetCollectionsAndExpected(
|
||||
config,
|
||||
input,
|
||||
version.Backup)
|
||||
@ -405,5 +409,5 @@ func generateAndRestoreDriveItems(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return gc.ConsumeRestoreCollections(ctx, version.Backup, sel, dest, opts, collections, errs)
|
||||
return ctrl.ConsumeRestoreCollections(ctx, version.Backup, sel, restoreCfg, opts, collections, errs)
|
||||
}
|
||||
|
||||
@ -5,8 +5,8 @@ import (
|
||||
|
||||
. "github.com/alcionai/corso/src/cli/print"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/connector"
|
||||
exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock"
|
||||
exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock"
|
||||
"github.com/alcionai/corso/src/internal/m365/resource"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
@ -52,14 +52,14 @@ func handleExchangeEmailFactory(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
gc, _, _, err := getGCAndVerifyResourceOwner(ctx, connector.Users, User)
|
||||
ctrl, _, _, err := getControllerAndVerifyResourceOwner(ctx, resource.Users, User)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
|
||||
deets, err := generateAndRestoreItems(
|
||||
ctx,
|
||||
gc,
|
||||
ctrl,
|
||||
service,
|
||||
category,
|
||||
selectors.NewExchangeRestore([]string{User}).Selector,
|
||||
@ -98,14 +98,14 @@ func handleExchangeCalendarEventFactory(cmd *cobra.Command, args []string) error
|
||||
return nil
|
||||
}
|
||||
|
||||
gc, _, _, err := getGCAndVerifyResourceOwner(ctx, connector.Users, User)
|
||||
ctrl, _, _, err := getControllerAndVerifyResourceOwner(ctx, resource.Users, User)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
|
||||
deets, err := generateAndRestoreItems(
|
||||
ctx,
|
||||
gc,
|
||||
ctrl,
|
||||
service,
|
||||
category,
|
||||
selectors.NewExchangeRestore([]string{User}).Selector,
|
||||
@ -143,14 +143,14 @@ func handleExchangeContactFactory(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
gc, _, _, err := getGCAndVerifyResourceOwner(ctx, connector.Users, User)
|
||||
ctrl, _, _, err := getControllerAndVerifyResourceOwner(ctx, resource.Users, User)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
|
||||
deets, err := generateAndRestoreItems(
|
||||
ctx,
|
||||
gc,
|
||||
ctrl,
|
||||
service,
|
||||
category,
|
||||
selectors.NewExchangeRestore([]string{User}).Selector,
|
||||
|
||||
@ -7,7 +7,7 @@ import (
|
||||
|
||||
. "github.com/alcionai/corso/src/cli/print"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/connector"
|
||||
"github.com/alcionai/corso/src/internal/m365/resource"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
@ -36,7 +36,7 @@ func handleOneDriveFileFactory(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
gc, acct, inp, err := getGCAndVerifyResourceOwner(ctx, connector.Users, User)
|
||||
ctrl, acct, inp, err := getControllerAndVerifyResourceOwner(ctx, resource.Users, User)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
@ -45,7 +45,7 @@ func handleOneDriveFileFactory(cmd *cobra.Command, args []string) error {
|
||||
sel.SetDiscreteOwnerIDName(inp.ID(), inp.Name())
|
||||
|
||||
deets, err := generateAndRestoreDriveItems(
|
||||
gc,
|
||||
ctrl,
|
||||
inp.ID(),
|
||||
SecondaryUser,
|
||||
strings.ToLower(SecondaryUser),
|
||||
|
||||
@ -7,7 +7,7 @@ import (
|
||||
|
||||
. "github.com/alcionai/corso/src/cli/print"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/connector"
|
||||
"github.com/alcionai/corso/src/internal/m365/resource"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
@ -36,7 +36,7 @@ func handleSharePointLibraryFileFactory(cmd *cobra.Command, args []string) error
|
||||
return nil
|
||||
}
|
||||
|
||||
gc, acct, inp, err := getGCAndVerifyResourceOwner(ctx, connector.Sites, Site)
|
||||
ctrl, acct, inp, err := getControllerAndVerifyResourceOwner(ctx, resource.Sites, Site)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
@ -45,7 +45,7 @@ func handleSharePointLibraryFileFactory(cmd *cobra.Command, args []string) error
|
||||
sel.SetDiscreteOwnerIDName(inp.ID(), inp.Name())
|
||||
|
||||
deets, err := generateAndRestoreDriveItems(
|
||||
gc,
|
||||
ctrl,
|
||||
inp.ID(),
|
||||
SecondaryUser,
|
||||
strings.ToLower(SecondaryUser),
|
||||
|
||||
@ -21,7 +21,7 @@ import (
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/common/str"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/credentials"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
|
||||
@ -17,7 +17,7 @@ import (
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/filters"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
|
||||
24
src/go.mod
24
src/go.mod
@ -1,14 +1,14 @@
|
||||
module github.com/alcionai/corso/src
|
||||
|
||||
go 1.19
|
||||
go 1.20
|
||||
|
||||
replace github.com/kopia/kopia => github.com/alcionai/kopia v0.12.2-0.20230502235504-2509b1d72a79
|
||||
replace github.com/kopia/kopia => github.com/alcionai/kopia v0.12.2-0.20230616023302-6c5412bbf417
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0
|
||||
github.com/alcionai/clues v0.0.0-20230406223931-f48777f4773c
|
||||
github.com/alcionai/clues v0.0.0-20230613181047-258ea4f19225
|
||||
github.com/armon/go-metrics v0.4.1
|
||||
github.com/aws/aws-sdk-go v1.44.275
|
||||
github.com/aws/aws-sdk-go v1.44.283
|
||||
github.com/aws/aws-xray-sdk-go v1.8.1
|
||||
github.com/cenkalti/backoff/v4 v4.2.1
|
||||
github.com/google/uuid v1.3.0
|
||||
@ -18,7 +18,7 @@ require (
|
||||
github.com/microsoft/kiota-authentication-azure-go v1.0.0
|
||||
github.com/microsoft/kiota-http-go v1.0.0
|
||||
github.com/microsoft/kiota-serialization-form-go v1.0.0
|
||||
github.com/microsoft/kiota-serialization-json-go v1.0.1
|
||||
github.com/microsoft/kiota-serialization-json-go v1.0.2
|
||||
github.com/microsoftgraph/msgraph-sdk-go v1.4.0
|
||||
github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
@ -34,7 +34,7 @@ require (
|
||||
go.uber.org/zap v1.24.0
|
||||
golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb
|
||||
golang.org/x/time v0.3.0
|
||||
golang.org/x/tools v0.9.3
|
||||
golang.org/x/tools v0.10.0
|
||||
)
|
||||
|
||||
require (
|
||||
@ -115,12 +115,12 @@ require (
|
||||
go.opentelemetry.io/otel/trace v1.15.1 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.9.0 // indirect
|
||||
golang.org/x/mod v0.10.0 // indirect
|
||||
golang.org/x/net v0.10.0
|
||||
golang.org/x/sync v0.2.0 // indirect
|
||||
golang.org/x/sys v0.8.0 // indirect
|
||||
golang.org/x/text v0.9.0 // indirect
|
||||
golang.org/x/crypto v0.10.0 // indirect
|
||||
golang.org/x/mod v0.11.0 // indirect
|
||||
golang.org/x/net v0.11.0
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.9.0 // indirect
|
||||
golang.org/x/text v0.10.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
||||
google.golang.org/grpc v1.55.0 // indirect
|
||||
google.golang.org/protobuf v1.30.0 // indirect
|
||||
|
||||
44
src/go.sum
44
src/go.sum
@ -53,10 +53,10 @@ github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1o
|
||||
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
|
||||
github.com/alcionai/clues v0.0.0-20230406223931-f48777f4773c h1:Njdw/Nnq2DN3f8QMaHuZZHdVHTUSxFqPMMxDIInDWB4=
|
||||
github.com/alcionai/clues v0.0.0-20230406223931-f48777f4773c/go.mod h1:DeaMbAwDvYM6ZfPMR/GUl3hceqI5C8jIQ1lstjB2IW8=
|
||||
github.com/alcionai/kopia v0.12.2-0.20230502235504-2509b1d72a79 h1:Wrl99Y7jftZMnNDiOIcRJrjstZO3IEj3+Q/sip27vmI=
|
||||
github.com/alcionai/kopia v0.12.2-0.20230502235504-2509b1d72a79/go.mod h1:Iic7CcKhsq+A7MLR9hh6VJfgpcJhLx3Kn+BgjY+azvI=
|
||||
github.com/alcionai/clues v0.0.0-20230613181047-258ea4f19225 h1:mjUjCCGvJpmnLh3fuVzpfOSFC9lp9TOIOfjj51L5Rs0=
|
||||
github.com/alcionai/clues v0.0.0-20230613181047-258ea4f19225/go.mod h1:DeaMbAwDvYM6ZfPMR/GUl3hceqI5C8jIQ1lstjB2IW8=
|
||||
github.com/alcionai/kopia v0.12.2-0.20230616023302-6c5412bbf417 h1:s0B7Be1qqZW+XDRStDYvyXZ7ovQAMkg0N1F/ji4TJyc=
|
||||
github.com/alcionai/kopia v0.12.2-0.20230616023302-6c5412bbf417/go.mod h1:Iic7CcKhsq+A7MLR9hh6VJfgpcJhLx3Kn+BgjY+azvI=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
@ -66,8 +66,8 @@ github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY
|
||||
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||
github.com/aws/aws-sdk-go v1.44.275 h1:VqRULgqrigvQLll4e4hXuc568EQAtZQ6jmBzLlQHzSI=
|
||||
github.com/aws/aws-sdk-go v1.44.275/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go v1.44.283 h1:ObMaIvdhHJM2sIrbcljd7muHBaFb+Kp/QsX6iflGDg4=
|
||||
github.com/aws/aws-sdk-go v1.44.283/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo=
|
||||
github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
@ -281,8 +281,8 @@ github.com/microsoft/kiota-http-go v1.0.0 h1:F1hd6gMlLeEgH2CkRB7z13ow7LxMKMWEmms
|
||||
github.com/microsoft/kiota-http-go v1.0.0/go.mod h1:eujxJliqodotsYepIc6ihhK+vXMMt5Q8YiSNL7+7M7U=
|
||||
github.com/microsoft/kiota-serialization-form-go v1.0.0 h1:UNdrkMnLFqUCccQZerKjblsyVgifS11b3WCx+eFEsAI=
|
||||
github.com/microsoft/kiota-serialization-form-go v1.0.0/go.mod h1:h4mQOO6KVTNciMF6azi1J9QB19ujSw3ULKcSNyXXOMA=
|
||||
github.com/microsoft/kiota-serialization-json-go v1.0.1 h1:nI3pLpqep7L6BLJPT7teCqkYFRmgyuA2G0zx6ZrwgFE=
|
||||
github.com/microsoft/kiota-serialization-json-go v1.0.1/go.mod h1:KS+eFtwtJGsosXRQr/Qilep7ZD1MRF+VtO7LnL7Oyuw=
|
||||
github.com/microsoft/kiota-serialization-json-go v1.0.2 h1:RXan8v7yWBD88XxVZ2W38BBcqu2UqWtgS54nCbOS5ow=
|
||||
github.com/microsoft/kiota-serialization-json-go v1.0.2/go.mod h1:AUItT9exyxmjZQE8IeFD9ygP77q9GKVb+AQE2V5Ikho=
|
||||
github.com/microsoft/kiota-serialization-text-go v1.0.0 h1:XOaRhAXy+g8ZVpcq7x7a0jlETWnWrEum0RhmbYrTFnA=
|
||||
github.com/microsoft/kiota-serialization-text-go v1.0.0/go.mod h1:sM1/C6ecnQ7IquQOGUrUldaO5wj+9+v7G2W3sQ3fy6M=
|
||||
github.com/microsoftgraph/msgraph-sdk-go v1.4.0 h1:ibNwMDEZ6HikA9BVXu+TljCzCiE+yFsD6wLpJbTc1tc=
|
||||
@ -452,8 +452,8 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
|
||||
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
|
||||
golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
|
||||
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@ -490,8 +490,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
|
||||
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU=
|
||||
golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -529,8 +529,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU=
|
||||
golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@ -551,8 +551,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
|
||||
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -603,8 +603,8 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
|
||||
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@ -617,8 +617,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58=
|
||||
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@ -672,8 +672,8 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f
|
||||
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM=
|
||||
golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
|
||||
golang.org/x/tools v0.10.0 h1:tvDr/iQoUqNdohiYm0LmmKcBk+q86lb9EprIUFhHHGg=
|
||||
golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
@ -41,7 +41,7 @@ const (
|
||||
|
||||
// M365dateTimeTimeZoneTimeFormat is the format used by M365 for datetimetimezone resource
|
||||
// https://learn.microsoft.com/en-us/graph/api/resources/datetimetimezone?view=graph-rest-1.0
|
||||
M365DateTimeTimeZone TimeFormat = "2006-01-02T15:04:05.000000"
|
||||
M365DateTimeTimeZone TimeFormat = "2006-01-02T15:04:05.0000000"
|
||||
)
|
||||
|
||||
// these regexes are used to extract time formats from strings. Their primary purpose is to
|
||||
|
||||
@ -1,317 +0,0 @@
|
||||
package exchange
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/connector/support"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/observe"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
// MetadataFileNames produces the category-specific set of filenames used to
|
||||
// store graph metadata such as delta tokens and folderID->path references.
|
||||
func MetadataFileNames(cat path.CategoryType) []string {
|
||||
switch cat {
|
||||
case path.EmailCategory, path.ContactsCategory:
|
||||
return []string{graph.DeltaURLsFileName, graph.PreviousPathFileName}
|
||||
default:
|
||||
return []string{graph.PreviousPathFileName}
|
||||
}
|
||||
}
|
||||
|
||||
type CatDeltaPaths map[path.CategoryType]DeltaPaths
|
||||
|
||||
type DeltaPaths map[string]DeltaPath
|
||||
|
||||
func (dps DeltaPaths) AddDelta(k, d string) {
|
||||
dp, ok := dps[k]
|
||||
if !ok {
|
||||
dp = DeltaPath{}
|
||||
}
|
||||
|
||||
dp.Delta = d
|
||||
dps[k] = dp
|
||||
}
|
||||
|
||||
func (dps DeltaPaths) AddPath(k, p string) {
|
||||
dp, ok := dps[k]
|
||||
if !ok {
|
||||
dp = DeltaPath{}
|
||||
}
|
||||
|
||||
dp.Path = p
|
||||
dps[k] = dp
|
||||
}
|
||||
|
||||
type DeltaPath struct {
|
||||
Delta string
|
||||
Path string
|
||||
}
|
||||
|
||||
// ParseMetadataCollections produces a map of structs holding delta
|
||||
// and path lookup maps.
|
||||
func parseMetadataCollections(
|
||||
ctx context.Context,
|
||||
colls []data.RestoreCollection,
|
||||
) (CatDeltaPaths, bool, error) {
|
||||
// cdp stores metadata
|
||||
cdp := CatDeltaPaths{
|
||||
path.ContactsCategory: {},
|
||||
path.EmailCategory: {},
|
||||
path.EventsCategory: {},
|
||||
}
|
||||
|
||||
// found tracks the metadata we've loaded, to make sure we don't
|
||||
// fetch overlapping copies.
|
||||
found := map[path.CategoryType]map[string]struct{}{
|
||||
path.ContactsCategory: {},
|
||||
path.EmailCategory: {},
|
||||
path.EventsCategory: {},
|
||||
}
|
||||
|
||||
// errors from metadata items should not stop the backup,
|
||||
// but it should prevent us from using previous backups
|
||||
errs := fault.New(true)
|
||||
|
||||
for _, coll := range colls {
|
||||
var (
|
||||
breakLoop bool
|
||||
items = coll.Items(ctx, errs)
|
||||
category = coll.FullPath().Category()
|
||||
)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, false, clues.Wrap(ctx.Err(), "parsing collection metadata").WithClues(ctx)
|
||||
|
||||
case item, ok := <-items:
|
||||
if !ok || errs.Failure() != nil {
|
||||
breakLoop = true
|
||||
break
|
||||
}
|
||||
|
||||
var (
|
||||
m = map[string]string{}
|
||||
cdps = cdp[category]
|
||||
)
|
||||
|
||||
err := json.NewDecoder(item.ToReader()).Decode(&m)
|
||||
if err != nil {
|
||||
return nil, false, clues.New("decoding metadata json").WithClues(ctx)
|
||||
}
|
||||
|
||||
switch item.UUID() {
|
||||
case graph.PreviousPathFileName:
|
||||
if _, ok := found[category]["path"]; ok {
|
||||
return nil, false, clues.Wrap(clues.New(category.String()), "multiple versions of path metadata").WithClues(ctx)
|
||||
}
|
||||
|
||||
for k, p := range m {
|
||||
cdps.AddPath(k, p)
|
||||
}
|
||||
|
||||
found[category]["path"] = struct{}{}
|
||||
|
||||
case graph.DeltaURLsFileName:
|
||||
if _, ok := found[category]["delta"]; ok {
|
||||
return nil, false, clues.Wrap(clues.New(category.String()), "multiple versions of delta metadata").WithClues(ctx)
|
||||
}
|
||||
|
||||
for k, d := range m {
|
||||
cdps.AddDelta(k, d)
|
||||
}
|
||||
|
||||
found[category]["delta"] = struct{}{}
|
||||
}
|
||||
|
||||
cdp[category] = cdps
|
||||
}
|
||||
|
||||
if breakLoop {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if errs.Failure() != nil {
|
||||
logger.CtxErr(ctx, errs.Failure()).Info("reading metadata collection items")
|
||||
|
||||
return CatDeltaPaths{
|
||||
path.ContactsCategory: {},
|
||||
path.EmailCategory: {},
|
||||
path.EventsCategory: {},
|
||||
}, false, nil
|
||||
}
|
||||
|
||||
// Remove any entries that contain a path or a delta, but not both.
|
||||
// That metadata is considered incomplete, and needs to incur a
|
||||
// complete backup on the next run.
|
||||
for _, dps := range cdp {
|
||||
for k, dp := range dps {
|
||||
if len(dp.Path) == 0 {
|
||||
delete(dps, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return cdp, true, nil
|
||||
}
|
||||
|
||||
// DataCollections returns a DataCollection which the caller can
|
||||
// use to read mailbox data out for the specified user
|
||||
func DataCollections(
|
||||
ctx context.Context,
|
||||
ac api.Client,
|
||||
selector selectors.Selector,
|
||||
tenantID string,
|
||||
user idname.Provider,
|
||||
metadata []data.RestoreCollection,
|
||||
su support.StatusUpdater,
|
||||
ctrlOpts control.Options,
|
||||
errs *fault.Bus,
|
||||
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, bool, error) {
|
||||
eb, err := selector.ToExchangeBackup()
|
||||
if err != nil {
|
||||
return nil, nil, false, clues.Wrap(err, "exchange dataCollection selector").WithClues(ctx)
|
||||
}
|
||||
|
||||
var (
|
||||
collections = []data.BackupCollection{}
|
||||
el = errs.Local()
|
||||
categories = map[path.CategoryType]struct{}{}
|
||||
handlers = BackupHandlers(ac)
|
||||
)
|
||||
|
||||
// Turn on concurrency limiter middleware for exchange backups
|
||||
// unless explicitly disabled through DisableConcurrencyLimiterFN cli flag
|
||||
if !ctrlOpts.ToggleFeatures.DisableConcurrencyLimiter {
|
||||
graph.InitializeConcurrencyLimiter(ctrlOpts.Parallelism.ItemFetch)
|
||||
}
|
||||
|
||||
cdps, canUsePreviousBackup, err := parseMetadataCollections(ctx, metadata)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
for _, scope := range eb.Scopes() {
|
||||
if el.Failure() != nil {
|
||||
break
|
||||
}
|
||||
|
||||
dcs, err := createCollections(
|
||||
ctx,
|
||||
handlers,
|
||||
tenantID,
|
||||
user,
|
||||
scope,
|
||||
cdps[scope.Category().PathType()],
|
||||
ctrlOpts,
|
||||
su,
|
||||
errs)
|
||||
if err != nil {
|
||||
el.AddRecoverable(err)
|
||||
continue
|
||||
}
|
||||
|
||||
categories[scope.Category().PathType()] = struct{}{}
|
||||
|
||||
collections = append(collections, dcs...)
|
||||
}
|
||||
|
||||
if len(collections) > 0 {
|
||||
baseCols, err := graph.BaseCollections(
|
||||
ctx,
|
||||
collections,
|
||||
tenantID,
|
||||
user.ID(),
|
||||
path.ExchangeService,
|
||||
categories,
|
||||
su,
|
||||
errs)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
collections = append(collections, baseCols...)
|
||||
}
|
||||
|
||||
return collections, nil, canUsePreviousBackup, el.Failure()
|
||||
}
|
||||
|
||||
// createCollections - utility function that retrieves M365
|
||||
// IDs through Microsoft Graph API. The selectors.ExchangeScope
|
||||
// determines the type of collections that are retrieved.
|
||||
func createCollections(
|
||||
ctx context.Context,
|
||||
handlers map[path.CategoryType]backupHandler,
|
||||
tenantID string,
|
||||
user idname.Provider,
|
||||
scope selectors.ExchangeScope,
|
||||
dps DeltaPaths,
|
||||
ctrlOpts control.Options,
|
||||
su support.StatusUpdater,
|
||||
errs *fault.Bus,
|
||||
) ([]data.BackupCollection, error) {
|
||||
ctx = clues.Add(ctx, "category", scope.Category().PathType())
|
||||
|
||||
var (
|
||||
allCollections = make([]data.BackupCollection, 0)
|
||||
category = scope.Category().PathType()
|
||||
qp = graph.QueryParams{
|
||||
Category: category,
|
||||
ResourceOwner: user,
|
||||
TenantID: tenantID,
|
||||
}
|
||||
)
|
||||
|
||||
handler, ok := handlers[category]
|
||||
if !ok {
|
||||
return nil, clues.New("unsupported backup category type").WithClues(ctx)
|
||||
}
|
||||
|
||||
foldersComplete := observe.MessageWithCompletion(
|
||||
ctx,
|
||||
observe.Bulletf("%s", qp.Category))
|
||||
defer close(foldersComplete)
|
||||
|
||||
rootFolder, cc := handler.NewContainerCache(user.ID())
|
||||
|
||||
if err := cc.Populate(ctx, errs, rootFolder); err != nil {
|
||||
return nil, clues.Wrap(err, "populating container cache")
|
||||
}
|
||||
|
||||
collections, err := filterContainersAndFillCollections(
|
||||
ctx,
|
||||
qp,
|
||||
handler,
|
||||
su,
|
||||
cc,
|
||||
scope,
|
||||
dps,
|
||||
ctrlOpts,
|
||||
errs)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "filling collections")
|
||||
}
|
||||
|
||||
foldersComplete <- struct{}{}
|
||||
|
||||
for _, coll := range collections {
|
||||
allCollections = append(allCollections, coll)
|
||||
}
|
||||
|
||||
return allCollections, nil
|
||||
}
|
||||
@ -1,761 +0,0 @@
|
||||
package exchange
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
inMock "github.com/alcionai/corso/src/internal/common/idname/mock"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/connector/support"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Unit tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type DataCollectionsUnitSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestDataCollectionsUnitSuite(t *testing.T) {
|
||||
suite.Run(t, &DataCollectionsUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
||||
type fileValues struct {
|
||||
fileName string
|
||||
value string
|
||||
}
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
data []fileValues
|
||||
expect map[string]DeltaPath
|
||||
canUsePreviousBackup bool
|
||||
expectError assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "delta urls only",
|
||||
data: []fileValues{
|
||||
{graph.DeltaURLsFileName, "delta-link"},
|
||||
},
|
||||
expect: map[string]DeltaPath{},
|
||||
canUsePreviousBackup: true,
|
||||
expectError: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "multiple delta urls",
|
||||
data: []fileValues{
|
||||
{graph.DeltaURLsFileName, "delta-link"},
|
||||
{graph.DeltaURLsFileName, "delta-link-2"},
|
||||
},
|
||||
canUsePreviousBackup: false,
|
||||
expectError: assert.Error,
|
||||
},
|
||||
{
|
||||
name: "previous path only",
|
||||
data: []fileValues{
|
||||
{graph.PreviousPathFileName, "prev-path"},
|
||||
},
|
||||
expect: map[string]DeltaPath{
|
||||
"key": {
|
||||
Delta: "delta-link",
|
||||
Path: "prev-path",
|
||||
},
|
||||
},
|
||||
canUsePreviousBackup: true,
|
||||
expectError: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "multiple previous paths",
|
||||
data: []fileValues{
|
||||
{graph.PreviousPathFileName, "prev-path"},
|
||||
{graph.PreviousPathFileName, "prev-path-2"},
|
||||
},
|
||||
canUsePreviousBackup: false,
|
||||
expectError: assert.Error,
|
||||
},
|
||||
{
|
||||
name: "delta urls and previous paths",
|
||||
data: []fileValues{
|
||||
{graph.DeltaURLsFileName, "delta-link"},
|
||||
{graph.PreviousPathFileName, "prev-path"},
|
||||
},
|
||||
expect: map[string]DeltaPath{
|
||||
"key": {
|
||||
Delta: "delta-link",
|
||||
Path: "prev-path",
|
||||
},
|
||||
},
|
||||
canUsePreviousBackup: true,
|
||||
expectError: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "delta urls and empty previous paths",
|
||||
data: []fileValues{
|
||||
{graph.DeltaURLsFileName, "delta-link"},
|
||||
{graph.PreviousPathFileName, ""},
|
||||
},
|
||||
expect: map[string]DeltaPath{},
|
||||
canUsePreviousBackup: true,
|
||||
expectError: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "empty delta urls and previous paths",
|
||||
data: []fileValues{
|
||||
{graph.DeltaURLsFileName, ""},
|
||||
{graph.PreviousPathFileName, "prev-path"},
|
||||
},
|
||||
expect: map[string]DeltaPath{
|
||||
"key": {
|
||||
Delta: "delta-link",
|
||||
Path: "prev-path",
|
||||
},
|
||||
},
|
||||
canUsePreviousBackup: true,
|
||||
expectError: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "delta urls with special chars",
|
||||
data: []fileValues{
|
||||
{graph.DeltaURLsFileName, "`!@#$%^&*()_[]{}/\"\\"},
|
||||
{graph.PreviousPathFileName, "prev-path"},
|
||||
},
|
||||
expect: map[string]DeltaPath{
|
||||
"key": {
|
||||
Delta: "`!@#$%^&*()_[]{}/\"\\",
|
||||
Path: "prev-path",
|
||||
},
|
||||
},
|
||||
canUsePreviousBackup: true,
|
||||
expectError: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "delta urls with escaped chars",
|
||||
data: []fileValues{
|
||||
{graph.DeltaURLsFileName, `\n\r\t\b\f\v\0\\`},
|
||||
{graph.PreviousPathFileName, "prev-path"},
|
||||
},
|
||||
expect: map[string]DeltaPath{
|
||||
"key": {
|
||||
Delta: "\\n\\r\\t\\b\\f\\v\\0\\\\",
|
||||
Path: "prev-path",
|
||||
},
|
||||
},
|
||||
canUsePreviousBackup: true,
|
||||
expectError: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "delta urls with newline char runes",
|
||||
data: []fileValues{
|
||||
// rune(92) = \, rune(110) = n. Ensuring it's not possible to
|
||||
// error in serializing/deserializing and produce a single newline
|
||||
// character from those two runes.
|
||||
{graph.DeltaURLsFileName, string([]rune{rune(92), rune(110)})},
|
||||
{graph.PreviousPathFileName, "prev-path"},
|
||||
},
|
||||
expect: map[string]DeltaPath{
|
||||
"key": {
|
||||
Delta: "\\n",
|
||||
Path: "prev-path",
|
||||
},
|
||||
},
|
||||
canUsePreviousBackup: true,
|
||||
expectError: assert.NoError,
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
entries := []graph.MetadataCollectionEntry{}
|
||||
|
||||
for _, d := range test.data {
|
||||
entries = append(
|
||||
entries,
|
||||
graph.NewMetadataEntry(d.fileName, map[string]string{"key": d.value}))
|
||||
}
|
||||
|
||||
coll, err := graph.MakeMetadataCollection(
|
||||
"t", "u",
|
||||
path.ExchangeService,
|
||||
path.EmailCategory,
|
||||
entries,
|
||||
func(cos *support.ConnectorOperationStatus) {},
|
||||
)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
cdps, canUsePreviousBackup, err := parseMetadataCollections(ctx, []data.RestoreCollection{
|
||||
data.NoFetchRestoreCollection{Collection: coll},
|
||||
})
|
||||
test.expectError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, test.canUsePreviousBackup, canUsePreviousBackup, "can use previous backup")
|
||||
|
||||
emails := cdps[path.EmailCategory]
|
||||
|
||||
assert.Len(t, emails, len(test.expect))
|
||||
|
||||
for k, v := range emails {
|
||||
assert.Equal(t, v.Delta, emails[k].Delta, "delta")
|
||||
assert.Equal(t, v.Path, emails[k].Path, "path")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type failingColl struct {
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func (f failingColl) Items(ctx context.Context, errs *fault.Bus) <-chan data.Stream {
|
||||
ic := make(chan data.Stream)
|
||||
defer close(ic)
|
||||
|
||||
errs.AddRecoverable(assert.AnError)
|
||||
|
||||
return ic
|
||||
}
|
||||
|
||||
func (f failingColl) FullPath() path.Path {
|
||||
tmp, err := path.Build(
|
||||
"tenant",
|
||||
"user",
|
||||
path.ExchangeService,
|
||||
path.EmailCategory,
|
||||
false,
|
||||
"inbox")
|
||||
require.NoError(f.t, err, clues.ToCore(err))
|
||||
|
||||
return tmp
|
||||
}
|
||||
|
||||
func (f failingColl) FetchItemByName(context.Context, string) (data.Stream, error) {
|
||||
// no fetch calls will be made
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// This check is to ensure that we don't error out, but still return
|
||||
// canUsePreviousBackup as false on read errors
|
||||
func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections_ReadFailure() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
fc := failingColl{t}
|
||||
|
||||
_, canUsePreviousBackup, err := parseMetadataCollections(ctx, []data.RestoreCollection{fc})
|
||||
require.NoError(t, err)
|
||||
require.False(t, canUsePreviousBackup)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Integration tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func newStatusUpdater(t *testing.T, wg *sync.WaitGroup) func(status *support.ConnectorOperationStatus) {
|
||||
updater := func(status *support.ConnectorOperationStatus) {
|
||||
defer wg.Done()
|
||||
}
|
||||
|
||||
return updater
|
||||
}
|
||||
|
||||
type DataCollectionsIntegrationSuite struct {
|
||||
tester.Suite
|
||||
user string
|
||||
site string
|
||||
tenantID string
|
||||
ac api.Client
|
||||
}
|
||||
|
||||
func TestDataCollectionsIntegrationSuite(t *testing.T) {
|
||||
suite.Run(t, &DataCollectionsIntegrationSuite{
|
||||
Suite: tester.NewIntegrationSuite(
|
||||
t,
|
||||
[][]string{tester.M365AcctCredEnvs},
|
||||
),
|
||||
})
|
||||
}
|
||||
|
||||
func (suite *DataCollectionsIntegrationSuite) SetupSuite() {
|
||||
suite.user = tester.M365UserID(suite.T())
|
||||
suite.site = tester.M365SiteID(suite.T())
|
||||
|
||||
acct := tester.NewM365Account(suite.T())
|
||||
creds, err := acct.M365Config()
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
suite.ac, err = api.NewClient(creds)
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
suite.tenantID = creds.AzureTenantID
|
||||
|
||||
tester.LogTimeOfTest(suite.T())
|
||||
}
|
||||
|
||||
func (suite *DataCollectionsIntegrationSuite) TestMailFetch() {
|
||||
var (
|
||||
userID = tester.M365UserID(suite.T())
|
||||
users = []string{userID}
|
||||
handlers = BackupHandlers(suite.ac)
|
||||
)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
scope selectors.ExchangeScope
|
||||
folderNames map[string]struct{}
|
||||
canMakeDeltaQueries bool
|
||||
}{
|
||||
{
|
||||
name: "Folder Iterative Check Mail",
|
||||
scope: selectors.NewExchangeBackup(users).MailFolders(
|
||||
[]string{DefaultMailFolder},
|
||||
selectors.PrefixMatch(),
|
||||
)[0],
|
||||
folderNames: map[string]struct{}{
|
||||
DefaultMailFolder: {},
|
||||
},
|
||||
canMakeDeltaQueries: true,
|
||||
},
|
||||
{
|
||||
name: "Folder Iterative Check Mail Non-Delta",
|
||||
scope: selectors.NewExchangeBackup(users).MailFolders(
|
||||
[]string{DefaultMailFolder},
|
||||
selectors.PrefixMatch(),
|
||||
)[0],
|
||||
folderNames: map[string]struct{}{
|
||||
DefaultMailFolder: {},
|
||||
},
|
||||
canMakeDeltaQueries: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
ctrlOpts := control.Defaults()
|
||||
ctrlOpts.ToggleFeatures.DisableDelta = !test.canMakeDeltaQueries
|
||||
|
||||
collections, err := createCollections(
|
||||
ctx,
|
||||
handlers,
|
||||
suite.tenantID,
|
||||
inMock.NewProvider(userID, userID),
|
||||
test.scope,
|
||||
DeltaPaths{},
|
||||
ctrlOpts,
|
||||
func(status *support.ConnectorOperationStatus) {},
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
for _, c := range collections {
|
||||
if c.FullPath().Service() == path.ExchangeMetadataService {
|
||||
continue
|
||||
}
|
||||
|
||||
require.NotEmpty(t, c.FullPath().Folder(false))
|
||||
|
||||
// TODO(ashmrtn): Remove when LocationPath is made part of BackupCollection
|
||||
// interface.
|
||||
if !assert.Implements(t, (*data.LocationPather)(nil), c) {
|
||||
continue
|
||||
}
|
||||
|
||||
loc := c.(data.LocationPather).LocationPath().String()
|
||||
|
||||
require.NotEmpty(t, loc)
|
||||
|
||||
delete(test.folderNames, loc)
|
||||
}
|
||||
|
||||
assert.Empty(t, test.folderNames)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *DataCollectionsIntegrationSuite) TestDelta() {
|
||||
var (
|
||||
userID = tester.M365UserID(suite.T())
|
||||
users = []string{userID}
|
||||
handlers = BackupHandlers(suite.ac)
|
||||
)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
scope selectors.ExchangeScope
|
||||
}{
|
||||
{
|
||||
name: "Mail",
|
||||
scope: selectors.NewExchangeBackup(users).MailFolders(
|
||||
[]string{DefaultMailFolder},
|
||||
selectors.PrefixMatch(),
|
||||
)[0],
|
||||
},
|
||||
{
|
||||
name: "Contacts",
|
||||
scope: selectors.NewExchangeBackup(users).ContactFolders(
|
||||
[]string{DefaultContactFolder},
|
||||
selectors.PrefixMatch(),
|
||||
)[0],
|
||||
},
|
||||
{
|
||||
name: "Events",
|
||||
scope: selectors.NewExchangeBackup(users).EventCalendars(
|
||||
[]string{DefaultCalendar},
|
||||
selectors.PrefixMatch(),
|
||||
)[0],
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
// get collections without providing any delta history (ie: full backup)
|
||||
collections, err := createCollections(
|
||||
ctx,
|
||||
handlers,
|
||||
suite.tenantID,
|
||||
inMock.NewProvider(userID, userID),
|
||||
test.scope,
|
||||
DeltaPaths{},
|
||||
control.Defaults(),
|
||||
func(status *support.ConnectorOperationStatus) {},
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
assert.Less(t, 1, len(collections), "retrieved metadata and data collections")
|
||||
|
||||
var metadata data.BackupCollection
|
||||
|
||||
for _, coll := range collections {
|
||||
if coll.FullPath().Service() == path.ExchangeMetadataService {
|
||||
metadata = coll
|
||||
}
|
||||
}
|
||||
|
||||
require.NotNil(t, metadata, "collections contains a metadata collection")
|
||||
|
||||
cdps, canUsePreviousBackup, err := parseMetadataCollections(ctx, []data.RestoreCollection{
|
||||
data.NoFetchRestoreCollection{Collection: metadata},
|
||||
})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||
|
||||
dps := cdps[test.scope.Category().PathType()]
|
||||
|
||||
// now do another backup with the previous delta tokens,
|
||||
// which should only contain the difference.
|
||||
collections, err = createCollections(
|
||||
ctx,
|
||||
handlers,
|
||||
suite.tenantID,
|
||||
inMock.NewProvider(userID, userID),
|
||||
test.scope,
|
||||
dps,
|
||||
control.Defaults(),
|
||||
func(status *support.ConnectorOperationStatus) {},
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// TODO(keepers): this isn't a very useful test at the moment. It needs to
|
||||
// investigate the items in the original and delta collections to at least
|
||||
// assert some minimum assumptions, such as "deltas should retrieve fewer items".
|
||||
// Delta usage is commented out at the moment, anyway. So this is currently
|
||||
// a sanity check that the minimum behavior won't break.
|
||||
for _, coll := range collections {
|
||||
if coll.FullPath().Service() != path.ExchangeMetadataService {
|
||||
ec, ok := coll.(*Collection)
|
||||
require.True(t, ok, "collection is *Collection")
|
||||
assert.NotNil(t, ec)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestMailSerializationRegression verifies that all mail data stored in the
|
||||
// test account can be successfully downloaded into bytes and restored into
|
||||
// M365 mail objects
|
||||
func (suite *DataCollectionsIntegrationSuite) TestMailSerializationRegression() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
users = []string{suite.user}
|
||||
handlers = BackupHandlers(suite.ac)
|
||||
)
|
||||
|
||||
sel := selectors.NewExchangeBackup(users)
|
||||
sel.Include(sel.MailFolders([]string{DefaultMailFolder}, selectors.PrefixMatch()))
|
||||
|
||||
collections, err := createCollections(
|
||||
ctx,
|
||||
handlers,
|
||||
suite.tenantID,
|
||||
inMock.NewProvider(suite.user, suite.user),
|
||||
sel.Scopes()[0],
|
||||
DeltaPaths{},
|
||||
control.Defaults(),
|
||||
newStatusUpdater(t, &wg),
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
wg.Add(len(collections))
|
||||
|
||||
for _, edc := range collections {
|
||||
suite.Run(edc.FullPath().String(), func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
isMetadata := edc.FullPath().Service() == path.ExchangeMetadataService
|
||||
streamChannel := edc.Items(ctx, fault.New(true))
|
||||
|
||||
// Verify that each message can be restored
|
||||
for stream := range streamChannel {
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
read, err := buf.ReadFrom(stream.ToReader())
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
assert.NotZero(t, read)
|
||||
|
||||
if isMetadata {
|
||||
continue
|
||||
}
|
||||
|
||||
message, err := api.BytesToMessageable(buf.Bytes())
|
||||
assert.NotNil(t, message)
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// TestContactSerializationRegression verifies ability to query contact items
|
||||
// and to store contact within Collection. Downloaded contacts are run through
|
||||
// a regression test to ensure that downloaded items can be uploaded.
|
||||
func (suite *DataCollectionsIntegrationSuite) TestContactSerializationRegression() {
|
||||
var (
|
||||
users = []string{suite.user}
|
||||
handlers = BackupHandlers(suite.ac)
|
||||
)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
scope selectors.ExchangeScope
|
||||
}{
|
||||
{
|
||||
name: "Default Contact Folder",
|
||||
scope: selectors.NewExchangeBackup(users).ContactFolders(
|
||||
[]string{DefaultContactFolder},
|
||||
selectors.PrefixMatch())[0],
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
edcs, err := createCollections(
|
||||
ctx,
|
||||
handlers,
|
||||
suite.tenantID,
|
||||
inMock.NewProvider(suite.user, suite.user),
|
||||
test.scope,
|
||||
DeltaPaths{},
|
||||
control.Defaults(),
|
||||
newStatusUpdater(t, &wg),
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
wg.Add(len(edcs))
|
||||
|
||||
require.GreaterOrEqual(t, len(edcs), 1, "expected 1 <= num collections <= 2")
|
||||
require.GreaterOrEqual(t, 2, len(edcs), "expected 1 <= num collections <= 2")
|
||||
|
||||
for _, edc := range edcs {
|
||||
isMetadata := edc.FullPath().Service() == path.ExchangeMetadataService
|
||||
count := 0
|
||||
|
||||
for stream := range edc.Items(ctx, fault.New(true)) {
|
||||
buf := &bytes.Buffer{}
|
||||
read, err := buf.ReadFrom(stream.ToReader())
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
assert.NotZero(t, read)
|
||||
|
||||
if isMetadata {
|
||||
continue
|
||||
}
|
||||
|
||||
contact, err := api.BytesToContactable(buf.Bytes())
|
||||
assert.NotNil(t, contact)
|
||||
assert.NoError(t, err, "converting contact bytes: "+buf.String(), clues.ToCore(err))
|
||||
count++
|
||||
}
|
||||
|
||||
if isMetadata {
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO(ashmrtn): Remove when LocationPath is made part of BackupCollection
|
||||
// interface.
|
||||
if !assert.Implements(t, (*data.LocationPather)(nil), edc) {
|
||||
continue
|
||||
}
|
||||
|
||||
assert.Equal(
|
||||
t,
|
||||
edc.(data.LocationPather).LocationPath().String(),
|
||||
DefaultContactFolder)
|
||||
assert.NotZero(t, count)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestEventsSerializationRegression ensures functionality of createCollections
|
||||
// to be able to successfully query, download and restore event objects
|
||||
func (suite *DataCollectionsIntegrationSuite) TestEventsSerializationRegression() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
var (
|
||||
users = []string{suite.user}
|
||||
handlers = BackupHandlers(suite.ac)
|
||||
calID string
|
||||
bdayID string
|
||||
)
|
||||
|
||||
fn := func(gcf graph.CachedContainer) error {
|
||||
if ptr.Val(gcf.GetDisplayName()) == DefaultCalendar {
|
||||
calID = ptr.Val(gcf.GetId())
|
||||
}
|
||||
|
||||
if ptr.Val(gcf.GetDisplayName()) == "Birthdays" {
|
||||
bdayID = ptr.Val(gcf.GetId())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
err := suite.ac.Events().EnumerateContainers(ctx, suite.user, DefaultCalendar, fn, fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
tests := []struct {
|
||||
name, expected string
|
||||
scope selectors.ExchangeScope
|
||||
}{
|
||||
{
|
||||
name: "Default Event Calendar",
|
||||
expected: calID,
|
||||
scope: selectors.NewExchangeBackup(users).EventCalendars(
|
||||
[]string{DefaultCalendar},
|
||||
selectors.PrefixMatch(),
|
||||
)[0],
|
||||
},
|
||||
{
|
||||
name: "Birthday Calendar",
|
||||
expected: bdayID,
|
||||
scope: selectors.NewExchangeBackup(users).EventCalendars(
|
||||
[]string{"Birthdays"},
|
||||
selectors.PrefixMatch(),
|
||||
)[0],
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
collections, err := createCollections(
|
||||
ctx,
|
||||
handlers,
|
||||
suite.tenantID,
|
||||
inMock.NewProvider(suite.user, suite.user),
|
||||
test.scope,
|
||||
DeltaPaths{},
|
||||
control.Defaults(),
|
||||
newStatusUpdater(t, &wg),
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
require.Len(t, collections, 2)
|
||||
|
||||
wg.Add(len(collections))
|
||||
|
||||
for _, edc := range collections {
|
||||
var isMetadata bool
|
||||
|
||||
if edc.FullPath().Service() != path.ExchangeMetadataService {
|
||||
isMetadata = true
|
||||
assert.Equal(t, test.expected, edc.FullPath().Folder(false))
|
||||
} else {
|
||||
assert.Equal(t, "", edc.FullPath().Folder(false))
|
||||
}
|
||||
|
||||
for item := range edc.Items(ctx, fault.New(true)) {
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
read, err := buf.ReadFrom(item.ToReader())
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
assert.NotZero(t, read)
|
||||
|
||||
if isMetadata {
|
||||
continue
|
||||
}
|
||||
|
||||
event, err := api.BytesToEventable(buf.Bytes())
|
||||
assert.NotNil(t, event)
|
||||
assert.NoError(t, err, "creating event from bytes: "+buf.String(), clues.ToCore(err))
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -1,129 +0,0 @@
|
||||
package exchange
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
type CacheResolverSuite struct {
|
||||
tester.Suite
|
||||
credentials account.M365Config
|
||||
}
|
||||
|
||||
func TestCacheResolverIntegrationSuite(t *testing.T) {
|
||||
suite.Run(t, &CacheResolverSuite{
|
||||
Suite: tester.NewIntegrationSuite(
|
||||
t,
|
||||
[][]string{tester.M365AcctCredEnvs},
|
||||
),
|
||||
})
|
||||
}
|
||||
|
||||
func (suite *CacheResolverSuite) SetupSuite() {
|
||||
t := suite.T()
|
||||
|
||||
a := tester.NewM365Account(t)
|
||||
m365, err := a.M365Config()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.credentials = m365
|
||||
}
|
||||
|
||||
func (suite *CacheResolverSuite) TestPopulate() {
|
||||
ac, err := api.NewClient(suite.credentials)
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
eventFunc := func(t *testing.T) graph.ContainerResolver {
|
||||
return &eventCalendarCache{
|
||||
userID: tester.M365UserID(t),
|
||||
enumer: ac.Events(),
|
||||
getter: ac.Events(),
|
||||
}
|
||||
}
|
||||
|
||||
contactFunc := func(t *testing.T) graph.ContainerResolver {
|
||||
return &contactFolderCache{
|
||||
userID: tester.M365UserID(t),
|
||||
enumer: ac.Contacts(),
|
||||
getter: ac.Contacts(),
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name, folderInCache, root, basePath string
|
||||
resolverFunc func(t *testing.T) graph.ContainerResolver
|
||||
canFind assert.BoolAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "Default Event Cache",
|
||||
// Fine as long as this isn't running against a migrated Exchange server.
|
||||
folderInCache: DefaultCalendar,
|
||||
root: DefaultCalendar,
|
||||
basePath: DefaultCalendar,
|
||||
resolverFunc: eventFunc,
|
||||
canFind: assert.True,
|
||||
},
|
||||
{
|
||||
name: "Default Event Folder Hidden",
|
||||
folderInCache: DefaultContactFolder,
|
||||
root: DefaultCalendar,
|
||||
canFind: assert.False,
|
||||
resolverFunc: eventFunc,
|
||||
},
|
||||
{
|
||||
name: "Name Not in Cache",
|
||||
folderInCache: "testFooBarWhoBar",
|
||||
root: DefaultCalendar,
|
||||
canFind: assert.False,
|
||||
resolverFunc: eventFunc,
|
||||
},
|
||||
{
|
||||
name: "Default Contact Cache",
|
||||
folderInCache: DefaultContactFolder,
|
||||
root: DefaultContactFolder,
|
||||
basePath: DefaultContactFolder,
|
||||
canFind: assert.True,
|
||||
resolverFunc: contactFunc,
|
||||
},
|
||||
{
|
||||
name: "Default Contact Hidden",
|
||||
folderInCache: DefaultContactFolder,
|
||||
root: DefaultContactFolder,
|
||||
canFind: assert.False,
|
||||
resolverFunc: contactFunc,
|
||||
},
|
||||
{
|
||||
name: "Name Not in Cache",
|
||||
folderInCache: "testFooBarWhoBar",
|
||||
root: DefaultContactFolder,
|
||||
canFind: assert.False,
|
||||
resolverFunc: contactFunc,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
resolver := test.resolverFunc(t)
|
||||
|
||||
err := resolver.Populate(ctx, fault.New(true), test.root, test.basePath)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
_, isFound := resolver.LocationInCache(test.folderInCache)
|
||||
test.canFind(t, isFound, "folder path", test.folderInCache)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -1,47 +0,0 @@
|
||||
package exchange
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
type ExchangeIteratorSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestExchangeIteratorSuite(t *testing.T) {
|
||||
suite.Run(t, &ExchangeIteratorSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
func (suite *ExchangeIteratorSuite) TestDisplayable() {
|
||||
t := suite.T()
|
||||
bytes := exchMock.ContactBytes("Displayable")
|
||||
contact, err := api.BytesToContactable(bytes)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
aDisplayable, ok := contact.(graph.Displayable)
|
||||
assert.True(t, ok)
|
||||
assert.NotNil(t, aDisplayable.GetId())
|
||||
assert.NotNil(t, aDisplayable.GetDisplayName())
|
||||
}
|
||||
|
||||
func (suite *ExchangeIteratorSuite) TestDescendable() {
|
||||
t := suite.T()
|
||||
bytes := exchMock.MessageBytes("Descendable")
|
||||
message, err := api.BytesToMessageable(bytes)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
aDescendable, ok := message.(graph.Descendable)
|
||||
assert.True(t, ok)
|
||||
assert.NotNil(t, aDescendable.GetId())
|
||||
assert.NotNil(t, aDescendable.GetParentFolderId())
|
||||
}
|
||||
@ -1,181 +0,0 @@
|
||||
package connector
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/connector/support"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
selTD "github.com/alcionai/corso/src/pkg/selectors/testdata"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Disconnected Test Section
|
||||
// ---------------------------------------------------------------
|
||||
type DisconnectedGraphConnectorSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestDisconnectedGraphSuite(t *testing.T) {
|
||||
s := &DisconnectedGraphConnectorSuite{
|
||||
Suite: tester.NewUnitSuite(t),
|
||||
}
|
||||
|
||||
suite.Run(t, s)
|
||||
}
|
||||
|
||||
func statusTestTask(
|
||||
t *testing.T,
|
||||
gc *GraphConnector,
|
||||
objects, success, folder int,
|
||||
) {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
status := support.CreateStatus(
|
||||
ctx,
|
||||
support.Restore, folder,
|
||||
support.CollectionMetrics{
|
||||
Objects: objects,
|
||||
Successes: success,
|
||||
Bytes: 0,
|
||||
},
|
||||
"statusTestTask")
|
||||
gc.UpdateStatus(status)
|
||||
}
|
||||
|
||||
func (suite *DisconnectedGraphConnectorSuite) TestGraphConnector_Status() {
|
||||
t := suite.T()
|
||||
gc := GraphConnector{wg: &sync.WaitGroup{}}
|
||||
|
||||
// Two tasks
|
||||
gc.incrementAwaitingMessages()
|
||||
gc.incrementAwaitingMessages()
|
||||
|
||||
// Each helper task processes 4 objects, 1 success, 3 errors, 1 folders
|
||||
go statusTestTask(t, &gc, 4, 1, 1)
|
||||
go statusTestTask(t, &gc, 4, 1, 1)
|
||||
|
||||
stats := gc.Wait()
|
||||
|
||||
assert.NotEmpty(t, gc.PrintableStatus())
|
||||
// Expect 8 objects
|
||||
assert.Equal(t, 8, stats.Objects)
|
||||
// Expect 2 success
|
||||
assert.Equal(t, 2, stats.Successes)
|
||||
// Expect 2 folders
|
||||
assert.Equal(t, 2, stats.Folders)
|
||||
}
|
||||
|
||||
func (suite *DisconnectedGraphConnectorSuite) TestVerifyBackupInputs_allServices() {
|
||||
sites := []string{"abc.site.foo", "bar.site.baz"}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
excludes func(t *testing.T) selectors.Selector
|
||||
filters func(t *testing.T) selectors.Selector
|
||||
includes func(t *testing.T) selectors.Selector
|
||||
checkError assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "Valid User",
|
||||
checkError: assert.NoError,
|
||||
excludes: func(t *testing.T) selectors.Selector {
|
||||
sel := selectors.NewOneDriveBackup([]string{"elliotReid@someHospital.org", "foo@SomeCompany.org"})
|
||||
sel.Exclude(selTD.OneDriveBackupFolderScope(sel))
|
||||
sel.DiscreteOwner = "elliotReid@someHospital.org"
|
||||
return sel.Selector
|
||||
},
|
||||
filters: func(t *testing.T) selectors.Selector {
|
||||
sel := selectors.NewOneDriveBackup([]string{"elliotReid@someHospital.org", "foo@SomeCompany.org"})
|
||||
sel.Filter(selTD.OneDriveBackupFolderScope(sel))
|
||||
sel.DiscreteOwner = "elliotReid@someHospital.org"
|
||||
return sel.Selector
|
||||
},
|
||||
includes: func(t *testing.T) selectors.Selector {
|
||||
sel := selectors.NewOneDriveBackup([]string{"elliotReid@someHospital.org", "foo@SomeCompany.org"})
|
||||
sel.Include(selTD.OneDriveBackupFolderScope(sel))
|
||||
sel.DiscreteOwner = "elliotReid@someHospital.org"
|
||||
return sel.Selector
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Invalid User",
|
||||
checkError: assert.NoError,
|
||||
excludes: func(t *testing.T) selectors.Selector {
|
||||
sel := selectors.NewOneDriveBackup([]string{"foo@SomeCompany.org"})
|
||||
sel.Exclude(selTD.OneDriveBackupFolderScope(sel))
|
||||
return sel.Selector
|
||||
},
|
||||
filters: func(t *testing.T) selectors.Selector {
|
||||
sel := selectors.NewOneDriveBackup([]string{"foo@SomeCompany.org"})
|
||||
sel.Filter(selTD.OneDriveBackupFolderScope(sel))
|
||||
return sel.Selector
|
||||
},
|
||||
includes: func(t *testing.T) selectors.Selector {
|
||||
sel := selectors.NewOneDriveBackup([]string{"foo@SomeCompany.org"})
|
||||
sel.Include(selTD.OneDriveBackupFolderScope(sel))
|
||||
return sel.Selector
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid sites",
|
||||
checkError: assert.NoError,
|
||||
excludes: func(t *testing.T) selectors.Selector {
|
||||
sel := selectors.NewSharePointBackup([]string{"abc.site.foo", "bar.site.baz"})
|
||||
sel.DiscreteOwner = "abc.site.foo"
|
||||
sel.Exclude(sel.AllData())
|
||||
return sel.Selector
|
||||
},
|
||||
filters: func(t *testing.T) selectors.Selector {
|
||||
sel := selectors.NewSharePointBackup([]string{"abc.site.foo", "bar.site.baz"})
|
||||
sel.DiscreteOwner = "abc.site.foo"
|
||||
sel.Filter(sel.AllData())
|
||||
return sel.Selector
|
||||
},
|
||||
includes: func(t *testing.T) selectors.Selector {
|
||||
sel := selectors.NewSharePointBackup([]string{"abc.site.foo", "bar.site.baz"})
|
||||
sel.DiscreteOwner = "abc.site.foo"
|
||||
sel.Include(sel.AllData())
|
||||
return sel.Selector
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid sites",
|
||||
checkError: assert.Error,
|
||||
excludes: func(t *testing.T) selectors.Selector {
|
||||
sel := selectors.NewSharePointBackup([]string{"fnords.smarfs.brawnhilda"})
|
||||
sel.Exclude(sel.AllData())
|
||||
return sel.Selector
|
||||
},
|
||||
filters: func(t *testing.T) selectors.Selector {
|
||||
sel := selectors.NewSharePointBackup([]string{"fnords.smarfs.brawnhilda"})
|
||||
sel.Filter(sel.AllData())
|
||||
return sel.Selector
|
||||
},
|
||||
includes: func(t *testing.T) selectors.Selector {
|
||||
sel := selectors.NewSharePointBackup([]string{"fnords.smarfs.brawnhilda"})
|
||||
sel.Include(sel.AllData())
|
||||
return sel.Selector
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
err := verifyBackupInputs(test.excludes(t), sites)
|
||||
test.checkError(t, err, clues.ToCore(err))
|
||||
err = verifyBackupInputs(test.filters(t), sites)
|
||||
test.checkError(t, err, clues.ToCore(err))
|
||||
err = verifyBackupInputs(test.includes(t), sites)
|
||||
test.checkError(t, err, clues.ToCore(err))
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -1,154 +0,0 @@
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
type URLCacheIntegrationSuite struct {
|
||||
tester.Suite
|
||||
ac api.Client
|
||||
user string
|
||||
driveID string
|
||||
}
|
||||
|
||||
func TestURLCacheIntegrationSuite(t *testing.T) {
|
||||
suite.Run(t, &URLCacheIntegrationSuite{
|
||||
Suite: tester.NewIntegrationSuite(
|
||||
t,
|
||||
[][]string{tester.M365AcctCredEnvs}),
|
||||
})
|
||||
}
|
||||
|
||||
func (suite *URLCacheIntegrationSuite) SetupSuite() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
suite.user = tester.SecondaryM365UserID(t)
|
||||
|
||||
acct := tester.NewM365Account(t)
|
||||
|
||||
creds, err := acct.M365Config()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.ac, err = api.NewClient(creds)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
drive, err := suite.ac.Users().GetDefaultDrive(ctx, suite.user)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.driveID = ptr.Val(drive.GetId())
|
||||
}
|
||||
|
||||
// Basic test for urlCache. Create some files in onedrive, then access them via
|
||||
// url cache
|
||||
func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
||||
var (
|
||||
t = suite.T()
|
||||
ac = suite.ac.Drives()
|
||||
driveID = suite.driveID
|
||||
newFolderName = tester.DefaultTestRestoreDestination("folder").ContainerName
|
||||
driveItemPager = suite.ac.Drives().NewItemPager(driveID, "", api.DriveItemSelectDefault())
|
||||
)
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
// Create a new test folder
|
||||
root, err := ac.GetRootFolder(ctx, driveID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
newFolder, err := ac.Drives().PostItemInContainer(
|
||||
ctx,
|
||||
driveID,
|
||||
ptr.Val(root.GetId()),
|
||||
newItem(newFolderName, true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
require.NotNil(t, newFolder.GetId())
|
||||
|
||||
nfid := ptr.Val(newFolder.GetId())
|
||||
|
||||
// Create a bunch of files in the new folder
|
||||
var items []models.DriveItemable
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
newItemName := "test_url_cache_basic_" + dttm.FormatNow(dttm.SafeForTesting)
|
||||
|
||||
item, err := ac.Drives().PostItemInContainer(
|
||||
ctx,
|
||||
driveID,
|
||||
nfid,
|
||||
newItem(newItemName, false))
|
||||
if err != nil {
|
||||
// Something bad happened, skip this item
|
||||
continue
|
||||
}
|
||||
|
||||
items = append(items, item)
|
||||
}
|
||||
|
||||
// Create a new URL cache with a long TTL
|
||||
cache, err := newURLCache(
|
||||
suite.driveID,
|
||||
1*time.Hour,
|
||||
driveItemPager,
|
||||
fault.New(true))
|
||||
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = cache.refreshCache(ctx)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// Launch parallel requests to the cache, one per item
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < len(items); i++ {
|
||||
wg.Add(1)
|
||||
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
|
||||
// Read item from URL cache
|
||||
props, err := cache.getItemProperties(
|
||||
ctx,
|
||||
ptr.Val(items[i].GetId()))
|
||||
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
require.NotNil(t, props)
|
||||
require.NotEmpty(t, props.downloadURL)
|
||||
require.Equal(t, false, props.isDeleted)
|
||||
|
||||
// Validate download URL
|
||||
c := graph.NewNoTimeoutHTTPWrapper()
|
||||
|
||||
resp, err := c.Request(
|
||||
ctx,
|
||||
http.MethodGet,
|
||||
props.downloadURL,
|
||||
nil,
|
||||
nil)
|
||||
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Validate that <= 1 delta queries were made
|
||||
require.LessOrEqual(t, cache.deltaQueryCount, 1)
|
||||
}
|
||||
@ -1,28 +0,0 @@
|
||||
package sharepoint
|
||||
|
||||
import (
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
)
|
||||
|
||||
// sharePointListInfo translates models.Listable metadata into searchable content
|
||||
// List Details: https://learn.microsoft.com/en-us/graph/api/resources/list?view=graph-rest-1.0
|
||||
func sharePointListInfo(lst models.Listable, size int64) *details.SharePointInfo {
|
||||
var (
|
||||
name = ptr.Val(lst.GetDisplayName())
|
||||
webURL = ptr.Val(lst.GetWebUrl())
|
||||
created = ptr.Val(lst.GetCreatedDateTime())
|
||||
modified = ptr.Val(lst.GetLastModifiedDateTime())
|
||||
)
|
||||
|
||||
return &details.SharePointInfo{
|
||||
ItemType: details.SharePointList,
|
||||
ItemName: name,
|
||||
Created: created,
|
||||
Modified: modified,
|
||||
WebURL: webURL,
|
||||
Size: size,
|
||||
}
|
||||
}
|
||||
@ -1,59 +0,0 @@
|
||||
package sharepoint
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
)
|
||||
|
||||
type SharePointInfoSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestSharePointInfoSuite(t *testing.T) {
|
||||
suite.Run(t, &SharePointInfoSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
func (suite *SharePointInfoSuite) TestSharePointInfo() {
|
||||
tests := []struct {
|
||||
name string
|
||||
listAndDeets func() (models.Listable, *details.SharePointInfo)
|
||||
}{
|
||||
{
|
||||
name: "Empty List",
|
||||
listAndDeets: func() (models.Listable, *details.SharePointInfo) {
|
||||
i := &details.SharePointInfo{ItemType: details.SharePointList}
|
||||
return models.NewList(), i
|
||||
},
|
||||
}, {
|
||||
name: "Only Name",
|
||||
listAndDeets: func() (models.Listable, *details.SharePointInfo) {
|
||||
aTitle := "Whole List"
|
||||
listing := models.NewList()
|
||||
listing.SetDisplayName(&aTitle)
|
||||
i := &details.SharePointInfo{
|
||||
ItemType: details.SharePointList,
|
||||
ItemName: aTitle,
|
||||
}
|
||||
|
||||
return listing, i
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
list, expected := test.listAndDeets()
|
||||
info := sharePointListInfo(list, 10)
|
||||
assert.Equal(t, expected.ItemType, info.ItemType)
|
||||
assert.Equal(t, expected.ItemName, info.ItemName)
|
||||
assert.Equal(t, expected.WebURL, info.WebURL)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -52,7 +52,9 @@ const (
|
||||
Service = "service"
|
||||
StartTime = "start_time"
|
||||
Status = "status"
|
||||
RepoID = "not_found"
|
||||
|
||||
// default values for keys
|
||||
RepoIDNotFound = "not_found"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
387
src/internal/kopia/backup_bases.go
Normal file
387
src/internal/kopia/backup_bases.go
Normal file
@ -0,0 +1,387 @@
|
||||
package kopia
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/kopia/kopia/repo/manifest"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
)
|
||||
|
||||
// TODO(ashmrtn): Move this into some inject package. Here to avoid import
|
||||
// cycles.
|
||||
type BackupBases interface {
|
||||
RemoveMergeBaseByManifestID(manifestID manifest.ID)
|
||||
Backups() []BackupEntry
|
||||
MinBackupVersion() int
|
||||
MergeBases() []ManifestEntry
|
||||
ClearMergeBases()
|
||||
AssistBases() []ManifestEntry
|
||||
ClearAssistBases()
|
||||
MergeBackupBases(
|
||||
ctx context.Context,
|
||||
other BackupBases,
|
||||
reasonToKey func(Reason) string,
|
||||
) BackupBases
|
||||
}
|
||||
|
||||
type backupBases struct {
|
||||
// backups and mergeBases should be modified together as they relate similar
|
||||
// data.
|
||||
backups []BackupEntry
|
||||
mergeBases []ManifestEntry
|
||||
assistBases []ManifestEntry
|
||||
}
|
||||
|
||||
func (bb *backupBases) RemoveMergeBaseByManifestID(manifestID manifest.ID) {
|
||||
idx := slices.IndexFunc(
|
||||
bb.mergeBases,
|
||||
func(man ManifestEntry) bool {
|
||||
return man.ID == manifestID
|
||||
})
|
||||
if idx >= 0 {
|
||||
bb.mergeBases = slices.Delete(bb.mergeBases, idx, idx+1)
|
||||
}
|
||||
|
||||
// TODO(ashmrtn): This may not be strictly necessary but is at least easier to
|
||||
// reason about.
|
||||
idx = slices.IndexFunc(
|
||||
bb.assistBases,
|
||||
func(man ManifestEntry) bool {
|
||||
return man.ID == manifestID
|
||||
})
|
||||
if idx >= 0 {
|
||||
bb.assistBases = slices.Delete(bb.assistBases, idx, idx+1)
|
||||
}
|
||||
|
||||
idx = slices.IndexFunc(
|
||||
bb.backups,
|
||||
func(bup BackupEntry) bool {
|
||||
return bup.SnapshotID == string(manifestID)
|
||||
})
|
||||
if idx >= 0 {
|
||||
bb.backups = slices.Delete(bb.backups, idx, idx+1)
|
||||
}
|
||||
}
|
||||
|
||||
func (bb backupBases) Backups() []BackupEntry {
|
||||
return slices.Clone(bb.backups)
|
||||
}
|
||||
|
||||
func (bb *backupBases) MinBackupVersion() int {
|
||||
min := version.NoBackup
|
||||
|
||||
if bb == nil {
|
||||
return min
|
||||
}
|
||||
|
||||
for _, bup := range bb.backups {
|
||||
if min == version.NoBackup || bup.Version < min {
|
||||
min = bup.Version
|
||||
}
|
||||
}
|
||||
|
||||
return min
|
||||
}
|
||||
|
||||
func (bb backupBases) MergeBases() []ManifestEntry {
|
||||
return slices.Clone(bb.mergeBases)
|
||||
}
|
||||
|
||||
func (bb *backupBases) ClearMergeBases() {
|
||||
bb.mergeBases = nil
|
||||
bb.backups = nil
|
||||
}
|
||||
|
||||
func (bb backupBases) AssistBases() []ManifestEntry {
|
||||
return slices.Clone(bb.assistBases)
|
||||
}
|
||||
|
||||
func (bb *backupBases) ClearAssistBases() {
|
||||
bb.assistBases = nil
|
||||
}
|
||||
|
||||
// MergeBackupBases reduces the two BackupBases into a single BackupBase.
|
||||
// Assumes the passed in BackupBases represents a prior backup version (across
|
||||
// some migration that disrupts lookup), and that the BackupBases used to call
|
||||
// this function contains the current version.
|
||||
//
|
||||
// reasonToKey should be a function that, given a Reason, will produce some
|
||||
// string that represents Reason in the context of the merge operation. For
|
||||
// example, to merge BackupBases across a ResourceOwner migration, the Reason's
|
||||
// service and category can be used as the key.
|
||||
//
|
||||
// Selection priority, for each reason key generated by reasonsToKey, follows
|
||||
// these rules:
|
||||
// 1. If the called BackupBases has an entry for a given resaon, ignore the
|
||||
// other BackupBases matching that reason.
|
||||
// 2. If the the receiver BackupBases has only AssistBases, look for a matching
|
||||
// MergeBase manifest in the passed in BackupBases.
|
||||
// 3. If the called BackupBases has no entry for a reason, look for both
|
||||
// AssistBases and MergeBases in the passed in BackupBases.
|
||||
func (bb *backupBases) MergeBackupBases(
|
||||
ctx context.Context,
|
||||
other BackupBases,
|
||||
reasonToKey func(reason Reason) string,
|
||||
) BackupBases {
|
||||
if other == nil || (len(other.MergeBases()) == 0 && len(other.AssistBases()) == 0) {
|
||||
return bb
|
||||
}
|
||||
|
||||
if bb == nil || (len(bb.MergeBases()) == 0 && len(bb.AssistBases()) == 0) {
|
||||
return other
|
||||
}
|
||||
|
||||
toMerge := map[string]struct{}{}
|
||||
assist := map[string]struct{}{}
|
||||
|
||||
// Track the bases in bb.
|
||||
for _, m := range bb.mergeBases {
|
||||
for _, r := range m.Reasons {
|
||||
k := reasonToKey(r)
|
||||
|
||||
toMerge[k] = struct{}{}
|
||||
assist[k] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for _, m := range bb.assistBases {
|
||||
for _, r := range m.Reasons {
|
||||
k := reasonToKey(r)
|
||||
assist[k] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
var toAdd []ManifestEntry
|
||||
|
||||
// Calculate the set of mergeBases to pull from other into this one.
|
||||
for _, m := range other.MergeBases() {
|
||||
useReasons := []Reason{}
|
||||
|
||||
for _, r := range m.Reasons {
|
||||
k := reasonToKey(r)
|
||||
if _, ok := toMerge[k]; ok {
|
||||
// Assume other contains prior manifest versions.
|
||||
// We don't want to stack a prior version incomplete onto
|
||||
// a current version's complete snapshot.
|
||||
continue
|
||||
}
|
||||
|
||||
useReasons = append(useReasons, r)
|
||||
}
|
||||
|
||||
if len(useReasons) > 0 {
|
||||
m.Reasons = useReasons
|
||||
toAdd = append(toAdd, m)
|
||||
}
|
||||
}
|
||||
|
||||
res := &backupBases{
|
||||
backups: bb.Backups(),
|
||||
mergeBases: bb.MergeBases(),
|
||||
assistBases: bb.AssistBases(),
|
||||
}
|
||||
|
||||
// Add new mergeBases and backups.
|
||||
for _, man := range toAdd {
|
||||
// Will get empty string if not found which is fine, it'll fail one of the
|
||||
// other checks.
|
||||
bID, _ := man.GetTag(TagBackupID)
|
||||
|
||||
bup, ok := getBackupByID(other.Backups(), bID)
|
||||
if !ok {
|
||||
logger.Ctx(ctx).Infow(
|
||||
"not unioning snapshot missing backup",
|
||||
"other_manifest_id", man.ID,
|
||||
"other_backup_id", bID)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
bup.Reasons = man.Reasons
|
||||
|
||||
res.backups = append(res.backups, bup)
|
||||
res.mergeBases = append(res.mergeBases, man)
|
||||
res.assistBases = append(res.assistBases, man)
|
||||
}
|
||||
|
||||
// Add assistBases from other to this one as needed.
|
||||
for _, m := range other.AssistBases() {
|
||||
useReasons := []Reason{}
|
||||
|
||||
// Assume that all complete manifests in assist overlap with MergeBases.
|
||||
if len(m.IncompleteReason) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, r := range m.Reasons {
|
||||
k := reasonToKey(r)
|
||||
if _, ok := assist[k]; ok {
|
||||
// This reason is already covered by either:
|
||||
// * complete manifest in bb
|
||||
// * incomplete manifest in bb
|
||||
//
|
||||
// If it was already in the assist set then it must be the case that
|
||||
// it's newer than any complete manifests in other for the same reason.
|
||||
continue
|
||||
}
|
||||
|
||||
useReasons = append(useReasons, r)
|
||||
}
|
||||
|
||||
if len(useReasons) > 0 {
|
||||
m.Reasons = useReasons
|
||||
res.assistBases = append(res.assistBases, m)
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func findNonUniqueManifests(
|
||||
ctx context.Context,
|
||||
manifests []ManifestEntry,
|
||||
) map[manifest.ID]struct{} {
|
||||
// ReasonKey -> manifests with that reason.
|
||||
reasons := map[string][]ManifestEntry{}
|
||||
toDrop := map[manifest.ID]struct{}{}
|
||||
|
||||
for _, man := range manifests {
|
||||
// Incomplete snapshots are used only for kopia-assisted incrementals. The
|
||||
// fact that we need this check here makes it seem like this should live in
|
||||
// the kopia code. However, keeping it here allows for better debugging as
|
||||
// the kopia code only has access to a path builder which means it cannot
|
||||
// remove the resource owner from the error/log output. That is also below
|
||||
// the point where we decide if we should do a full backup or an incremental.
|
||||
if len(man.IncompleteReason) > 0 {
|
||||
logger.Ctx(ctx).Infow(
|
||||
"dropping incomplete manifest",
|
||||
"manifest_id", man.ID)
|
||||
|
||||
toDrop[man.ID] = struct{}{}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
for _, reason := range man.Reasons {
|
||||
reasonKey := reason.ResourceOwner + reason.Service.String() + reason.Category.String()
|
||||
reasons[reasonKey] = append(reasons[reasonKey], man)
|
||||
}
|
||||
}
|
||||
|
||||
for reason, mans := range reasons {
|
||||
ictx := clues.Add(ctx, "reason", reason)
|
||||
|
||||
if len(mans) == 0 {
|
||||
// Not sure how this would happen but just in case...
|
||||
continue
|
||||
} else if len(mans) > 1 {
|
||||
mIDs := make([]manifest.ID, 0, len(mans))
|
||||
for _, m := range mans {
|
||||
toDrop[m.ID] = struct{}{}
|
||||
mIDs = append(mIDs, m.ID)
|
||||
}
|
||||
|
||||
// TODO(ashmrtn): We should actually just remove this reason from the
|
||||
// manifests and then if they have no reasons remaining drop them from the
|
||||
// set.
|
||||
logger.Ctx(ictx).Infow(
|
||||
"dropping manifests with duplicate reason",
|
||||
"manifest_ids", mIDs)
|
||||
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return toDrop
|
||||
}
|
||||
|
||||
func getBackupByID(backups []BackupEntry, bID string) (BackupEntry, bool) {
|
||||
if len(bID) == 0 {
|
||||
return BackupEntry{}, false
|
||||
}
|
||||
|
||||
idx := slices.IndexFunc(backups, func(b BackupEntry) bool {
|
||||
return string(b.ID) == bID
|
||||
})
|
||||
|
||||
if idx < 0 || idx >= len(backups) {
|
||||
return BackupEntry{}, false
|
||||
}
|
||||
|
||||
return backups[idx], true
|
||||
}
|
||||
|
||||
// fixupAndVerify goes through the set of backups and snapshots used for merging
|
||||
// and ensures:
|
||||
// - the reasons for selecting merge snapshots are distinct
|
||||
// - all bases used for merging have a backup model with item and details
|
||||
// snapshot ID
|
||||
//
|
||||
// Backups that have overlapping reasons or that are not complete are removed
|
||||
// from the set. Dropping these is safe because it only affects how much data we
|
||||
// pull. On the other hand, *not* dropping them is unsafe as it will muck up
|
||||
// merging when we add stuff to kopia (possibly multiple entries for the same
|
||||
// item etc).
|
||||
func (bb *backupBases) fixupAndVerify(ctx context.Context) {
|
||||
toDrop := findNonUniqueManifests(ctx, bb.mergeBases)
|
||||
|
||||
var (
|
||||
backupsToKeep []BackupEntry
|
||||
mergeToKeep []ManifestEntry
|
||||
)
|
||||
|
||||
for _, man := range bb.mergeBases {
|
||||
if _, ok := toDrop[man.ID]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
bID, _ := man.GetTag(TagBackupID)
|
||||
|
||||
bup, ok := getBackupByID(bb.backups, bID)
|
||||
if !ok {
|
||||
toDrop[man.ID] = struct{}{}
|
||||
|
||||
logger.Ctx(ctx).Info(
|
||||
"dropping manifest due to missing backup",
|
||||
"manifest_id", man.ID)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
deetsID := bup.StreamStoreID
|
||||
if len(deetsID) == 0 {
|
||||
deetsID = bup.DetailsID
|
||||
}
|
||||
|
||||
if len(bup.SnapshotID) == 0 || len(deetsID) == 0 {
|
||||
toDrop[man.ID] = struct{}{}
|
||||
|
||||
logger.Ctx(ctx).Info(
|
||||
"dropping manifest due to invalid backup",
|
||||
"manifest_id", man.ID)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
backupsToKeep = append(backupsToKeep, bup)
|
||||
mergeToKeep = append(mergeToKeep, man)
|
||||
}
|
||||
|
||||
var assistToKeep []ManifestEntry
|
||||
|
||||
for _, man := range bb.assistBases {
|
||||
if _, ok := toDrop[man.ID]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
assistToKeep = append(assistToKeep, man)
|
||||
}
|
||||
|
||||
bb.backups = backupsToKeep
|
||||
bb.mergeBases = mergeToKeep
|
||||
bb.assistBases = assistToKeep
|
||||
}
|
||||
705
src/internal/kopia/backup_bases_test.go
Normal file
705
src/internal/kopia/backup_bases_test.go
Normal file
@ -0,0 +1,705 @@
|
||||
package kopia
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/kopia/kopia/repo/manifest"
|
||||
"github.com/kopia/kopia/snapshot"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/model"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
"github.com/alcionai/corso/src/pkg/backup"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
|
||||
func makeManifest(id, incmpl, bID string, reasons ...Reason) ManifestEntry {
|
||||
bIDKey, _ := makeTagKV(TagBackupID)
|
||||
|
||||
return ManifestEntry{
|
||||
Manifest: &snapshot.Manifest{
|
||||
ID: manifest.ID(id),
|
||||
IncompleteReason: incmpl,
|
||||
Tags: map[string]string{bIDKey: bID},
|
||||
},
|
||||
Reasons: reasons,
|
||||
}
|
||||
}
|
||||
|
||||
type BackupBasesUnitSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestBackupBasesUnitSuite(t *testing.T) {
|
||||
suite.Run(t, &BackupBasesUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
func (suite *BackupBasesUnitSuite) TestMinBackupVersion() {
|
||||
table := []struct {
|
||||
name string
|
||||
bb *backupBases
|
||||
expectedVersion int
|
||||
}{
|
||||
{
|
||||
name: "Nil BackupBase",
|
||||
expectedVersion: version.NoBackup,
|
||||
},
|
||||
{
|
||||
name: "No Backups",
|
||||
bb: &backupBases{},
|
||||
expectedVersion: version.NoBackup,
|
||||
},
|
||||
{
|
||||
name: "Unsorted Backups",
|
||||
bb: &backupBases{
|
||||
backups: []BackupEntry{
|
||||
{
|
||||
Backup: &backup.Backup{
|
||||
Version: 4,
|
||||
},
|
||||
},
|
||||
{
|
||||
Backup: &backup.Backup{
|
||||
Version: 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
Backup: &backup.Backup{
|
||||
Version: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedVersion: 0,
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
assert.Equal(suite.T(), test.expectedVersion, test.bb.MinBackupVersion())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *BackupBasesUnitSuite) TestRemoveMergeBaseByManifestID() {
|
||||
backups := []BackupEntry{
|
||||
{Backup: &backup.Backup{SnapshotID: "1"}},
|
||||
{Backup: &backup.Backup{SnapshotID: "2"}},
|
||||
{Backup: &backup.Backup{SnapshotID: "3"}},
|
||||
}
|
||||
|
||||
merges := []ManifestEntry{
|
||||
makeManifest("1", "", ""),
|
||||
makeManifest("2", "", ""),
|
||||
makeManifest("3", "", ""),
|
||||
}
|
||||
|
||||
expected := &backupBases{
|
||||
backups: []BackupEntry{backups[0], backups[1]},
|
||||
mergeBases: []ManifestEntry{merges[0], merges[1]},
|
||||
assistBases: []ManifestEntry{merges[0], merges[1]},
|
||||
}
|
||||
|
||||
delID := manifest.ID("3")
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
// Below indices specify which items to add from the defined sets above.
|
||||
backup []int
|
||||
merge []int
|
||||
assist []int
|
||||
}{
|
||||
{
|
||||
name: "Not In Bases",
|
||||
backup: []int{0, 1},
|
||||
merge: []int{0, 1},
|
||||
assist: []int{0, 1},
|
||||
},
|
||||
{
|
||||
name: "Different Indexes",
|
||||
backup: []int{2, 0, 1},
|
||||
merge: []int{0, 2, 1},
|
||||
assist: []int{0, 1, 2},
|
||||
},
|
||||
{
|
||||
name: "First Item",
|
||||
backup: []int{2, 0, 1},
|
||||
merge: []int{2, 0, 1},
|
||||
assist: []int{2, 0, 1},
|
||||
},
|
||||
{
|
||||
name: "Middle Item",
|
||||
backup: []int{0, 2, 1},
|
||||
merge: []int{0, 2, 1},
|
||||
assist: []int{0, 2, 1},
|
||||
},
|
||||
{
|
||||
name: "Final Item",
|
||||
backup: []int{0, 1, 2},
|
||||
merge: []int{0, 1, 2},
|
||||
assist: []int{0, 1, 2},
|
||||
},
|
||||
{
|
||||
name: "Only In Backups",
|
||||
backup: []int{0, 1, 2},
|
||||
merge: []int{0, 1},
|
||||
assist: []int{0, 1},
|
||||
},
|
||||
{
|
||||
name: "Only In Merges",
|
||||
backup: []int{0, 1},
|
||||
merge: []int{0, 1, 2},
|
||||
assist: []int{0, 1},
|
||||
},
|
||||
{
|
||||
name: "Only In Assists",
|
||||
backup: []int{0, 1},
|
||||
merge: []int{0, 1},
|
||||
assist: []int{0, 1, 2},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
bb := &backupBases{}
|
||||
|
||||
for _, i := range test.backup {
|
||||
bb.backups = append(bb.backups, backups[i])
|
||||
}
|
||||
|
||||
for _, i := range test.merge {
|
||||
bb.mergeBases = append(bb.mergeBases, merges[i])
|
||||
}
|
||||
|
||||
for _, i := range test.assist {
|
||||
bb.assistBases = append(bb.assistBases, merges[i])
|
||||
}
|
||||
|
||||
bb.RemoveMergeBaseByManifestID(delID)
|
||||
AssertBackupBasesEqual(t, expected, bb)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *BackupBasesUnitSuite) TestClearMergeBases() {
|
||||
bb := &backupBases{
|
||||
backups: make([]BackupEntry, 2),
|
||||
mergeBases: make([]ManifestEntry, 2),
|
||||
}
|
||||
|
||||
bb.ClearMergeBases()
|
||||
assert.Empty(suite.T(), bb.Backups())
|
||||
assert.Empty(suite.T(), bb.MergeBases())
|
||||
}
|
||||
|
||||
func (suite *BackupBasesUnitSuite) TestClearAssistBases() {
|
||||
bb := &backupBases{assistBases: make([]ManifestEntry, 2)}
|
||||
|
||||
bb.ClearAssistBases()
|
||||
assert.Empty(suite.T(), bb.AssistBases())
|
||||
}
|
||||
|
||||
func (suite *BackupBasesUnitSuite) TestMergeBackupBases() {
|
||||
ro := "resource_owner"
|
||||
|
||||
type testInput struct {
|
||||
id int
|
||||
incomplete bool
|
||||
cat []path.CategoryType
|
||||
}
|
||||
|
||||
// Make a function so tests can modify things without messing with each other.
|
||||
makeBackupBases := func(ti []testInput) *backupBases {
|
||||
res := &backupBases{}
|
||||
|
||||
for _, i := range ti {
|
||||
baseID := fmt.Sprintf("id%d", i.id)
|
||||
ir := ""
|
||||
|
||||
if i.incomplete {
|
||||
ir = "checkpoint"
|
||||
}
|
||||
|
||||
reasons := make([]Reason, 0, len(i.cat))
|
||||
|
||||
for _, c := range i.cat {
|
||||
reasons = append(reasons, Reason{
|
||||
ResourceOwner: ro,
|
||||
Service: path.ExchangeService,
|
||||
Category: c,
|
||||
})
|
||||
}
|
||||
|
||||
m := makeManifest(baseID, ir, "b"+baseID, reasons...)
|
||||
res.assistBases = append(res.assistBases, m)
|
||||
|
||||
if i.incomplete {
|
||||
continue
|
||||
}
|
||||
|
||||
b := BackupEntry{
|
||||
Backup: &backup.Backup{
|
||||
BaseModel: model.BaseModel{ID: model.StableID("b" + baseID)},
|
||||
SnapshotID: baseID,
|
||||
StreamStoreID: "ss" + baseID,
|
||||
},
|
||||
Reasons: reasons,
|
||||
}
|
||||
|
||||
res.backups = append(res.backups, b)
|
||||
res.mergeBases = append(res.mergeBases, m)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
bb []testInput
|
||||
other []testInput
|
||||
expect []testInput
|
||||
}{
|
||||
{
|
||||
name: "Other Empty",
|
||||
bb: []testInput{
|
||||
{cat: []path.CategoryType{path.EmailCategory}},
|
||||
},
|
||||
expect: []testInput{
|
||||
{cat: []path.CategoryType{path.EmailCategory}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "BB Empty",
|
||||
other: []testInput{
|
||||
{cat: []path.CategoryType{path.EmailCategory}},
|
||||
},
|
||||
expect: []testInput{
|
||||
{cat: []path.CategoryType{path.EmailCategory}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Other overlaps Complete And Incomplete",
|
||||
bb: []testInput{
|
||||
{cat: []path.CategoryType{path.EmailCategory}},
|
||||
{
|
||||
id: 1,
|
||||
cat: []path.CategoryType{path.EmailCategory},
|
||||
incomplete: true,
|
||||
},
|
||||
},
|
||||
other: []testInput{
|
||||
{
|
||||
id: 2,
|
||||
cat: []path.CategoryType{path.EmailCategory},
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
cat: []path.CategoryType{path.EmailCategory},
|
||||
incomplete: true,
|
||||
},
|
||||
},
|
||||
expect: []testInput{
|
||||
{cat: []path.CategoryType{path.EmailCategory}},
|
||||
{
|
||||
id: 1,
|
||||
cat: []path.CategoryType{path.EmailCategory},
|
||||
incomplete: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Other Overlaps Complete",
|
||||
bb: []testInput{
|
||||
{cat: []path.CategoryType{path.EmailCategory}},
|
||||
},
|
||||
other: []testInput{
|
||||
{
|
||||
id: 2,
|
||||
cat: []path.CategoryType{path.EmailCategory},
|
||||
},
|
||||
},
|
||||
expect: []testInput{
|
||||
{cat: []path.CategoryType{path.EmailCategory}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Other Overlaps Incomplete",
|
||||
bb: []testInput{
|
||||
{
|
||||
id: 1,
|
||||
cat: []path.CategoryType{path.EmailCategory},
|
||||
incomplete: true,
|
||||
},
|
||||
},
|
||||
other: []testInput{
|
||||
{
|
||||
id: 2,
|
||||
cat: []path.CategoryType{path.EmailCategory},
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
cat: []path.CategoryType{path.EmailCategory},
|
||||
incomplete: true,
|
||||
},
|
||||
},
|
||||
expect: []testInput{
|
||||
{
|
||||
id: 1,
|
||||
cat: []path.CategoryType{path.EmailCategory},
|
||||
incomplete: true,
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
cat: []path.CategoryType{path.EmailCategory},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Other Disjoint",
|
||||
bb: []testInput{
|
||||
{cat: []path.CategoryType{path.EmailCategory}},
|
||||
{
|
||||
id: 1,
|
||||
cat: []path.CategoryType{path.EmailCategory},
|
||||
incomplete: true,
|
||||
},
|
||||
},
|
||||
other: []testInput{
|
||||
{
|
||||
id: 2,
|
||||
cat: []path.CategoryType{path.ContactsCategory},
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
cat: []path.CategoryType{path.ContactsCategory},
|
||||
incomplete: true,
|
||||
},
|
||||
},
|
||||
expect: []testInput{
|
||||
{cat: []path.CategoryType{path.EmailCategory}},
|
||||
{
|
||||
id: 1,
|
||||
cat: []path.CategoryType{path.EmailCategory},
|
||||
incomplete: true,
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
cat: []path.CategoryType{path.ContactsCategory},
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
cat: []path.CategoryType{path.ContactsCategory},
|
||||
incomplete: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Other Reduced Reasons",
|
||||
bb: []testInput{
|
||||
{cat: []path.CategoryType{path.EmailCategory}},
|
||||
{
|
||||
id: 1,
|
||||
cat: []path.CategoryType{path.EmailCategory},
|
||||
incomplete: true,
|
||||
},
|
||||
},
|
||||
other: []testInput{
|
||||
{
|
||||
id: 2,
|
||||
cat: []path.CategoryType{
|
||||
path.EmailCategory,
|
||||
path.ContactsCategory,
|
||||
},
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
cat: []path.CategoryType{
|
||||
path.EmailCategory,
|
||||
path.ContactsCategory,
|
||||
},
|
||||
incomplete: true,
|
||||
},
|
||||
},
|
||||
expect: []testInput{
|
||||
{cat: []path.CategoryType{path.EmailCategory}},
|
||||
{
|
||||
id: 1,
|
||||
cat: []path.CategoryType{path.EmailCategory},
|
||||
incomplete: true,
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
cat: []path.CategoryType{path.ContactsCategory},
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
cat: []path.CategoryType{path.ContactsCategory},
|
||||
incomplete: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
bb := makeBackupBases(test.bb)
|
||||
other := makeBackupBases(test.other)
|
||||
expect := makeBackupBases(test.expect)
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
got := bb.MergeBackupBases(
|
||||
ctx,
|
||||
other,
|
||||
func(reason Reason) string {
|
||||
return reason.Service.String() + reason.Category.String()
|
||||
})
|
||||
AssertBackupBasesEqual(t, expect, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *BackupBasesUnitSuite) TestFixupAndVerify() {
|
||||
ro := "resource_owner"
|
||||
|
||||
makeMan := func(pct path.CategoryType, id, incmpl, bID string) ManifestEntry {
|
||||
reason := Reason{
|
||||
ResourceOwner: ro,
|
||||
Service: path.ExchangeService,
|
||||
Category: pct,
|
||||
}
|
||||
|
||||
return makeManifest(id, incmpl, bID, reason)
|
||||
}
|
||||
|
||||
// Make a function so tests can modify things without messing with each other.
|
||||
validMail1 := func() *backupBases {
|
||||
return &backupBases{
|
||||
backups: []BackupEntry{
|
||||
{
|
||||
Backup: &backup.Backup{
|
||||
BaseModel: model.BaseModel{
|
||||
ID: "bid1",
|
||||
},
|
||||
SnapshotID: "id1",
|
||||
StreamStoreID: "ssid1",
|
||||
},
|
||||
},
|
||||
},
|
||||
mergeBases: []ManifestEntry{
|
||||
makeMan(path.EmailCategory, "id1", "", "bid1"),
|
||||
},
|
||||
assistBases: []ManifestEntry{
|
||||
makeMan(path.EmailCategory, "id1", "", "bid1"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
bb *backupBases
|
||||
expect BackupBases
|
||||
}{
|
||||
{
|
||||
name: "empty BaseBackups",
|
||||
bb: &backupBases{},
|
||||
},
|
||||
{
|
||||
name: "Merge Base Without Backup",
|
||||
bb: func() *backupBases {
|
||||
res := validMail1()
|
||||
res.backups = nil
|
||||
|
||||
return res
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "Backup Missing Snapshot ID",
|
||||
bb: func() *backupBases {
|
||||
res := validMail1()
|
||||
res.backups[0].SnapshotID = ""
|
||||
|
||||
return res
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "Backup Missing Deets ID",
|
||||
bb: func() *backupBases {
|
||||
res := validMail1()
|
||||
res.backups[0].StreamStoreID = ""
|
||||
|
||||
return res
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "Incomplete Snapshot",
|
||||
bb: func() *backupBases {
|
||||
res := validMail1()
|
||||
res.mergeBases[0].IncompleteReason = "ir"
|
||||
res.assistBases[0].IncompleteReason = "ir"
|
||||
|
||||
return res
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "Duplicate Reason",
|
||||
bb: func() *backupBases {
|
||||
res := validMail1()
|
||||
res.mergeBases[0].Reasons = append(
|
||||
res.mergeBases[0].Reasons,
|
||||
res.mergeBases[0].Reasons[0])
|
||||
res.assistBases = res.mergeBases
|
||||
|
||||
return res
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "Single Valid Entry",
|
||||
bb: validMail1(),
|
||||
expect: validMail1(),
|
||||
},
|
||||
{
|
||||
name: "Single Valid Entry With Incomplete Assist With Same Reason",
|
||||
bb: func() *backupBases {
|
||||
res := validMail1()
|
||||
res.assistBases = append(
|
||||
res.assistBases,
|
||||
makeMan(path.EmailCategory, "id2", "checkpoint", "bid2"))
|
||||
|
||||
return res
|
||||
}(),
|
||||
expect: func() *backupBases {
|
||||
res := validMail1()
|
||||
res.assistBases = append(
|
||||
res.assistBases,
|
||||
makeMan(path.EmailCategory, "id2", "checkpoint", "bid2"))
|
||||
|
||||
return res
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "Single Valid Entry With Backup With Old Deets ID",
|
||||
bb: func() *backupBases {
|
||||
res := validMail1()
|
||||
res.backups[0].DetailsID = res.backups[0].StreamStoreID
|
||||
res.backups[0].StreamStoreID = ""
|
||||
|
||||
return res
|
||||
}(),
|
||||
expect: func() *backupBases {
|
||||
res := validMail1()
|
||||
res.backups[0].DetailsID = res.backups[0].StreamStoreID
|
||||
res.backups[0].StreamStoreID = ""
|
||||
|
||||
return res
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "Single Valid Entry With Multiple Reasons",
|
||||
bb: func() *backupBases {
|
||||
res := validMail1()
|
||||
res.mergeBases[0].Reasons = append(
|
||||
res.mergeBases[0].Reasons,
|
||||
Reason{
|
||||
ResourceOwner: ro,
|
||||
Service: path.ExchangeService,
|
||||
Category: path.ContactsCategory,
|
||||
})
|
||||
res.assistBases = res.mergeBases
|
||||
|
||||
return res
|
||||
}(),
|
||||
expect: func() *backupBases {
|
||||
res := validMail1()
|
||||
res.mergeBases[0].Reasons = append(
|
||||
res.mergeBases[0].Reasons,
|
||||
Reason{
|
||||
ResourceOwner: ro,
|
||||
Service: path.ExchangeService,
|
||||
Category: path.ContactsCategory,
|
||||
})
|
||||
res.assistBases = res.mergeBases
|
||||
|
||||
return res
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "Two Entries Overlapping Reasons",
|
||||
bb: func() *backupBases {
|
||||
res := validMail1()
|
||||
res.mergeBases = append(
|
||||
res.mergeBases,
|
||||
makeMan(path.EmailCategory, "id2", "", "bid2"))
|
||||
res.assistBases = res.mergeBases
|
||||
|
||||
return res
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "Three Entries One Invalid",
|
||||
bb: func() *backupBases {
|
||||
res := validMail1()
|
||||
res.backups = append(
|
||||
res.backups,
|
||||
BackupEntry{
|
||||
Backup: &backup.Backup{
|
||||
BaseModel: model.BaseModel{
|
||||
ID: "bid2",
|
||||
},
|
||||
},
|
||||
},
|
||||
BackupEntry{
|
||||
Backup: &backup.Backup{
|
||||
BaseModel: model.BaseModel{
|
||||
ID: "bid3",
|
||||
},
|
||||
SnapshotID: "id3",
|
||||
StreamStoreID: "ssid3",
|
||||
},
|
||||
})
|
||||
res.mergeBases = append(
|
||||
res.mergeBases,
|
||||
makeMan(path.ContactsCategory, "id2", "checkpoint", "bid2"),
|
||||
makeMan(path.EventsCategory, "id3", "", "bid3"))
|
||||
res.assistBases = res.mergeBases
|
||||
|
||||
return res
|
||||
}(),
|
||||
expect: func() *backupBases {
|
||||
res := validMail1()
|
||||
res.backups = append(
|
||||
res.backups,
|
||||
BackupEntry{
|
||||
Backup: &backup.Backup{
|
||||
BaseModel: model.BaseModel{
|
||||
ID: "bid3",
|
||||
},
|
||||
SnapshotID: "id3",
|
||||
StreamStoreID: "ssid3",
|
||||
},
|
||||
})
|
||||
res.mergeBases = append(
|
||||
res.mergeBases,
|
||||
makeMan(path.EventsCategory, "id3", "", "bid3"))
|
||||
res.assistBases = res.mergeBases
|
||||
|
||||
return res
|
||||
}(),
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
ctx, flush := tester.NewContext(suite.T())
|
||||
defer flush()
|
||||
|
||||
test.bb.fixupAndVerify(ctx)
|
||||
AssertBackupBasesEqual(suite.T(), test.expect, test.bb)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -47,12 +47,6 @@ func (r Reason) Key() string {
|
||||
return r.ResourceOwner + r.Service.String() + r.Category.String()
|
||||
}
|
||||
|
||||
type backupBases struct {
|
||||
backups []BackupEntry
|
||||
mergeBases []ManifestEntry
|
||||
assistBases []ManifestEntry
|
||||
}
|
||||
|
||||
type BackupEntry struct {
|
||||
*backup.Backup
|
||||
Reasons []Reason
|
||||
@ -198,6 +192,8 @@ func (b *baseFinder) findBasesInSet(
|
||||
Manifest: man,
|
||||
Reasons: []Reason{reason},
|
||||
})
|
||||
|
||||
logger.Ctx(ictx).Info("found incomplete backup")
|
||||
}
|
||||
|
||||
continue
|
||||
@ -209,6 +205,18 @@ func (b *baseFinder) findBasesInSet(
|
||||
// Safe to continue here as we'll just end up attempting to use an older
|
||||
// backup as the base.
|
||||
logger.CtxErr(ictx, err).Debug("searching for base backup")
|
||||
|
||||
if !foundIncomplete {
|
||||
foundIncomplete = true
|
||||
|
||||
kopiaAssistSnaps = append(kopiaAssistSnaps, ManifestEntry{
|
||||
Manifest: man,
|
||||
Reasons: []Reason{reason},
|
||||
})
|
||||
|
||||
logger.Ctx(ictx).Info("found incomplete backup")
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
@ -222,12 +230,27 @@ func (b *baseFinder) findBasesInSet(
|
||||
"empty backup stream store ID",
|
||||
"search_backup_id", bup.ID)
|
||||
|
||||
if !foundIncomplete {
|
||||
foundIncomplete = true
|
||||
|
||||
kopiaAssistSnaps = append(kopiaAssistSnaps, ManifestEntry{
|
||||
Manifest: man,
|
||||
Reasons: []Reason{reason},
|
||||
})
|
||||
|
||||
logger.Ctx(ictx).Infow(
|
||||
"found incomplete backup",
|
||||
"search_backup_id", bup.ID)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// If we've made it to this point then we're considering the backup
|
||||
// complete as it has both an item data snapshot and a backup details
|
||||
// snapshot.
|
||||
logger.Ctx(ictx).Infow("found complete backup", "base_backup_id", bup.ID)
|
||||
|
||||
me := ManifestEntry{
|
||||
Manifest: man,
|
||||
Reasons: []Reason{reason},
|
||||
@ -272,11 +295,11 @@ func (b *baseFinder) getBase(
|
||||
return b.findBasesInSet(ctx, reason, metas)
|
||||
}
|
||||
|
||||
func (b *baseFinder) findBases(
|
||||
func (b *baseFinder) FindBases(
|
||||
ctx context.Context,
|
||||
reasons []Reason,
|
||||
tags map[string]string,
|
||||
) (backupBases, error) {
|
||||
) BackupBases {
|
||||
var (
|
||||
// All maps go from ID -> entry. We need to track by ID so we can coalesce
|
||||
// the reason for selecting something. Kopia assisted snapshots also use
|
||||
@ -340,24 +363,13 @@ func (b *baseFinder) findBases(
|
||||
}
|
||||
}
|
||||
|
||||
return backupBases{
|
||||
res := &backupBases{
|
||||
backups: maps.Values(baseBups),
|
||||
mergeBases: maps.Values(baseSnaps),
|
||||
assistBases: maps.Values(kopiaAssistSnaps),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *baseFinder) FindBases(
|
||||
ctx context.Context,
|
||||
reasons []Reason,
|
||||
tags map[string]string,
|
||||
) ([]ManifestEntry, error) {
|
||||
bb, err := b.findBases(ctx, reasons, tags)
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err)
|
||||
}
|
||||
|
||||
// assistBases contains all snapshots so we can return it while maintaining
|
||||
// almost all compatibility.
|
||||
return bb.assistBases, nil
|
||||
res.fixupAndVerify(ctx)
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
@ -5,11 +5,9 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/kopia/kopia/repo/manifest"
|
||||
"github.com/kopia/kopia/snapshot"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
@ -111,13 +109,6 @@ func (sm mockEmptySnapshotManager) FindManifests(
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (sm mockEmptySnapshotManager) LoadSnapshots(
|
||||
context.Context,
|
||||
[]manifest.ID,
|
||||
) ([]*snapshot.Manifest, error) {
|
||||
return nil, clues.New("not implemented")
|
||||
}
|
||||
|
||||
func (sm mockEmptySnapshotManager) LoadSnapshot(
|
||||
context.Context,
|
||||
manifest.ID,
|
||||
@ -145,7 +136,7 @@ type manifestInfo struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func newManifestInfo2(
|
||||
func newManifestInfo(
|
||||
id manifest.ID,
|
||||
modTime time.Time,
|
||||
incomplete bool,
|
||||
@ -189,12 +180,12 @@ func newManifestInfo2(
|
||||
return res
|
||||
}
|
||||
|
||||
type mockSnapshotManager2 struct {
|
||||
type mockSnapshotManager struct {
|
||||
data []manifestInfo
|
||||
findErr error
|
||||
}
|
||||
|
||||
func matchesTags2(mi manifestInfo, tags map[string]string) bool {
|
||||
func matchesTags(mi manifestInfo, tags map[string]string) bool {
|
||||
for k := range tags {
|
||||
if _, ok := mi.tags[k]; !ok {
|
||||
return false
|
||||
@ -204,7 +195,7 @@ func matchesTags2(mi manifestInfo, tags map[string]string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (msm *mockSnapshotManager2) FindManifests(
|
||||
func (msm *mockSnapshotManager) FindManifests(
|
||||
ctx context.Context,
|
||||
tags map[string]string,
|
||||
) ([]*manifest.EntryMetadata, error) {
|
||||
@ -219,7 +210,7 @@ func (msm *mockSnapshotManager2) FindManifests(
|
||||
res := []*manifest.EntryMetadata{}
|
||||
|
||||
for _, mi := range msm.data {
|
||||
if matchesTags2(mi, tags) {
|
||||
if matchesTags(mi, tags) {
|
||||
res = append(res, mi.metadata)
|
||||
}
|
||||
}
|
||||
@ -227,14 +218,7 @@ func (msm *mockSnapshotManager2) FindManifests(
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (msm *mockSnapshotManager2) LoadSnapshots(
|
||||
ctx context.Context,
|
||||
ids []manifest.ID,
|
||||
) ([]*snapshot.Manifest, error) {
|
||||
return nil, clues.New("not implemented")
|
||||
}
|
||||
|
||||
func (msm *mockSnapshotManager2) LoadSnapshot(
|
||||
func (msm *mockSnapshotManager) LoadSnapshot(
|
||||
ctx context.Context,
|
||||
id manifest.ID,
|
||||
) (*snapshot.Manifest, error) {
|
||||
@ -244,6 +228,10 @@ func (msm *mockSnapshotManager2) LoadSnapshot(
|
||||
|
||||
for _, mi := range msm.data {
|
||||
if mi.man.ID == id {
|
||||
if mi.err != nil {
|
||||
return nil, mi.err
|
||||
}
|
||||
|
||||
return mi.man, nil
|
||||
}
|
||||
}
|
||||
@ -273,10 +261,12 @@ func newBackupModel(
|
||||
err: err,
|
||||
}
|
||||
|
||||
if !oldDetailsID {
|
||||
res.b.StreamStoreID = "ssid"
|
||||
} else {
|
||||
res.b.DetailsID = "ssid"
|
||||
if hasDetailsSnap {
|
||||
if !oldDetailsID {
|
||||
res.b.StreamStoreID = "ssid"
|
||||
} else {
|
||||
res.b.DetailsID = "ssid"
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
@ -340,10 +330,9 @@ func (suite *BaseFinderUnitSuite) TestNoResult_NoBackupsOrSnapshots() {
|
||||
},
|
||||
}
|
||||
|
||||
bb, err := bf.findBases(ctx, reasons, nil)
|
||||
assert.NoError(t, err, "getting bases: %v", clues.ToCore(err))
|
||||
assert.Empty(t, bb.mergeBases)
|
||||
assert.Empty(t, bb.assistBases)
|
||||
bb := bf.FindBases(ctx, reasons, nil)
|
||||
assert.Empty(t, bb.MergeBases())
|
||||
assert.Empty(t, bb.AssistBases())
|
||||
}
|
||||
|
||||
func (suite *BaseFinderUnitSuite) TestNoResult_ErrorListingSnapshots() {
|
||||
@ -353,7 +342,7 @@ func (suite *BaseFinderUnitSuite) TestNoResult_ErrorListingSnapshots() {
|
||||
defer flush()
|
||||
|
||||
bf := baseFinder{
|
||||
sm: &mockSnapshotManager2{findErr: assert.AnError},
|
||||
sm: &mockSnapshotManager{findErr: assert.AnError},
|
||||
bg: mockEmptyModelGetter{},
|
||||
}
|
||||
reasons := []Reason{
|
||||
@ -364,10 +353,9 @@ func (suite *BaseFinderUnitSuite) TestNoResult_ErrorListingSnapshots() {
|
||||
},
|
||||
}
|
||||
|
||||
bb, err := bf.findBases(ctx, reasons, nil)
|
||||
assert.NoError(t, err, "getting bases: %v", clues.ToCore(err))
|
||||
assert.Empty(t, bb.mergeBases)
|
||||
assert.Empty(t, bb.assistBases)
|
||||
bb := bf.FindBases(ctx, reasons, nil)
|
||||
assert.Empty(t, bb.MergeBases())
|
||||
assert.Empty(t, bb.AssistBases())
|
||||
}
|
||||
|
||||
func (suite *BaseFinderUnitSuite) TestGetBases() {
|
||||
@ -387,7 +375,7 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
||||
name: "Return Older Base If Fail To Get Manifest",
|
||||
input: testUser1Mail,
|
||||
manifestData: []manifestInfo{
|
||||
newManifestInfo2(
|
||||
newManifestInfo(
|
||||
testID2,
|
||||
testT2,
|
||||
testCompleteMan,
|
||||
@ -396,7 +384,7 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
||||
testMail,
|
||||
testUser1,
|
||||
),
|
||||
newManifestInfo2(
|
||||
newManifestInfo(
|
||||
testID1,
|
||||
testT1,
|
||||
testCompleteMan,
|
||||
@ -407,21 +395,21 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
||||
),
|
||||
},
|
||||
expectedBaseReasons: map[int][]Reason{
|
||||
0: testUser1Mail,
|
||||
1: testUser1Mail,
|
||||
},
|
||||
expectedAssistManifestReasons: map[int][]Reason{
|
||||
0: testUser1Mail,
|
||||
1: testUser1Mail,
|
||||
},
|
||||
backupData: []backupInfo{
|
||||
newBackupModel(testBackup2, true, true, false, nil),
|
||||
newBackupModel(testBackup1, false, false, false, assert.AnError),
|
||||
newBackupModel(testBackup1, true, true, false, nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Return Older Base If Fail To Get Backup",
|
||||
input: testUser1Mail,
|
||||
manifestData: []manifestInfo{
|
||||
newManifestInfo2(
|
||||
newManifestInfo(
|
||||
testID2,
|
||||
testT2,
|
||||
testCompleteMan,
|
||||
@ -430,7 +418,7 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
||||
testMail,
|
||||
testUser1,
|
||||
),
|
||||
newManifestInfo2(
|
||||
newManifestInfo(
|
||||
testID1,
|
||||
testT1,
|
||||
testCompleteMan,
|
||||
@ -441,21 +429,22 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
||||
),
|
||||
},
|
||||
expectedBaseReasons: map[int][]Reason{
|
||||
0: testUser1Mail,
|
||||
1: testUser1Mail,
|
||||
},
|
||||
expectedAssistManifestReasons: map[int][]Reason{
|
||||
0: testUser1Mail,
|
||||
1: testUser1Mail,
|
||||
},
|
||||
backupData: []backupInfo{
|
||||
newBackupModel(testBackup2, true, true, false, nil),
|
||||
newBackupModel(testBackup1, false, false, false, assert.AnError),
|
||||
newBackupModel(testBackup2, false, false, false, assert.AnError),
|
||||
newBackupModel(testBackup1, true, true, false, nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Return Older Base If Missing Details",
|
||||
input: testUser1Mail,
|
||||
manifestData: []manifestInfo{
|
||||
newManifestInfo2(
|
||||
newManifestInfo(
|
||||
testID2,
|
||||
testT2,
|
||||
testCompleteMan,
|
||||
@ -464,7 +453,7 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
||||
testMail,
|
||||
testUser1,
|
||||
),
|
||||
newManifestInfo2(
|
||||
newManifestInfo(
|
||||
testID1,
|
||||
testT1,
|
||||
testCompleteMan,
|
||||
@ -475,21 +464,22 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
||||
),
|
||||
},
|
||||
expectedBaseReasons: map[int][]Reason{
|
||||
0: testUser1Mail,
|
||||
1: testUser1Mail,
|
||||
},
|
||||
expectedAssistManifestReasons: map[int][]Reason{
|
||||
0: testUser1Mail,
|
||||
1: testUser1Mail,
|
||||
},
|
||||
backupData: []backupInfo{
|
||||
newBackupModel(testBackup2, true, true, false, nil),
|
||||
newBackupModel(testBackup1, true, false, false, nil),
|
||||
newBackupModel(testBackup2, true, false, false, nil),
|
||||
newBackupModel(testBackup1, true, true, false, nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Old Backup Details Pointer",
|
||||
input: testUser1Mail,
|
||||
manifestData: []manifestInfo{
|
||||
newManifestInfo2(
|
||||
newManifestInfo(
|
||||
testID1,
|
||||
testT1,
|
||||
testCompleteMan,
|
||||
@ -516,7 +506,7 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
||||
name: "All One Snapshot",
|
||||
input: testAllUsersAllCats,
|
||||
manifestData: []manifestInfo{
|
||||
newManifestInfo2(
|
||||
newManifestInfo(
|
||||
testID1,
|
||||
testT1,
|
||||
testCompleteMan,
|
||||
@ -543,7 +533,7 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
||||
name: "Multiple Bases Some Overlapping Reasons",
|
||||
input: testAllUsersAllCats,
|
||||
manifestData: []manifestInfo{
|
||||
newManifestInfo2(
|
||||
newManifestInfo(
|
||||
testID1,
|
||||
testT1,
|
||||
testCompleteMan,
|
||||
@ -555,7 +545,7 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
||||
testUser2,
|
||||
testUser3,
|
||||
),
|
||||
newManifestInfo2(
|
||||
newManifestInfo(
|
||||
testID2,
|
||||
testT2,
|
||||
testCompleteMan,
|
||||
@ -648,7 +638,7 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
||||
name: "Newer Incomplete Assist Snapshot",
|
||||
input: testUser1Mail,
|
||||
manifestData: []manifestInfo{
|
||||
newManifestInfo2(
|
||||
newManifestInfo(
|
||||
testID1,
|
||||
testT1,
|
||||
testCompleteMan,
|
||||
@ -657,7 +647,7 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
||||
testMail,
|
||||
testUser1,
|
||||
),
|
||||
newManifestInfo2(
|
||||
newManifestInfo(
|
||||
testID2,
|
||||
testT2,
|
||||
testIncompleteMan,
|
||||
@ -684,7 +674,7 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
||||
name: "Incomplete Older Than Complete",
|
||||
input: testUser1Mail,
|
||||
manifestData: []manifestInfo{
|
||||
newManifestInfo2(
|
||||
newManifestInfo(
|
||||
testID1,
|
||||
testT1,
|
||||
testIncompleteMan,
|
||||
@ -693,7 +683,7 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
||||
testMail,
|
||||
testUser1,
|
||||
),
|
||||
newManifestInfo2(
|
||||
newManifestInfo(
|
||||
testID2,
|
||||
testT2,
|
||||
testCompleteMan,
|
||||
@ -719,7 +709,7 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
||||
name: "Newest Incomplete Only Incomplete",
|
||||
input: testUser1Mail,
|
||||
manifestData: []manifestInfo{
|
||||
newManifestInfo2(
|
||||
newManifestInfo(
|
||||
testID1,
|
||||
testT1,
|
||||
testIncompleteMan,
|
||||
@ -728,7 +718,7 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
||||
testMail,
|
||||
testUser1,
|
||||
),
|
||||
newManifestInfo2(
|
||||
newManifestInfo(
|
||||
testID2,
|
||||
testT2,
|
||||
testIncompleteMan,
|
||||
@ -752,7 +742,7 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
||||
name: "Some Bases Not Found",
|
||||
input: testAllUsersMail,
|
||||
manifestData: []manifestInfo{
|
||||
newManifestInfo2(
|
||||
newManifestInfo(
|
||||
testID1,
|
||||
testT1,
|
||||
testCompleteMan,
|
||||
@ -778,7 +768,7 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
||||
// Manifests are currently returned in the order they're defined by the
|
||||
// mock.
|
||||
manifestData: []manifestInfo{
|
||||
newManifestInfo2(
|
||||
newManifestInfo(
|
||||
testID2,
|
||||
testT2,
|
||||
testCompleteMan,
|
||||
@ -787,7 +777,7 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
||||
testMail,
|
||||
testUser1,
|
||||
),
|
||||
newManifestInfo2(
|
||||
newManifestInfo(
|
||||
testID1,
|
||||
testT1,
|
||||
testCompleteMan,
|
||||
@ -819,38 +809,37 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
||||
defer flush()
|
||||
|
||||
bf := baseFinder{
|
||||
sm: &mockSnapshotManager2{data: test.manifestData},
|
||||
sm: &mockSnapshotManager{data: test.manifestData},
|
||||
bg: &mockModelGetter{data: test.backupData},
|
||||
}
|
||||
|
||||
bb, err := bf.findBases(
|
||||
bb := bf.FindBases(
|
||||
ctx,
|
||||
test.input,
|
||||
nil)
|
||||
require.NoError(t, err, "getting bases: %v", clues.ToCore(err))
|
||||
|
||||
checkBackupEntriesMatch(
|
||||
t,
|
||||
bb.backups,
|
||||
bb.Backups(),
|
||||
test.backupData,
|
||||
test.expectedBaseReasons)
|
||||
checkManifestEntriesMatch(
|
||||
t,
|
||||
bb.mergeBases,
|
||||
bb.MergeBases(),
|
||||
test.manifestData,
|
||||
test.expectedBaseReasons)
|
||||
checkManifestEntriesMatch(
|
||||
t,
|
||||
bb.assistBases,
|
||||
bb.AssistBases(),
|
||||
test.manifestData,
|
||||
test.expectedAssistManifestReasons)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *BaseFinderUnitSuite) TestFetchPrevSnapshots_CustomTags() {
|
||||
func (suite *BaseFinderUnitSuite) TestFindBases_CustomTags() {
|
||||
manifestData := []manifestInfo{
|
||||
newManifestInfo2(
|
||||
newManifestInfo(
|
||||
testID1,
|
||||
testT1,
|
||||
testCompleteMan,
|
||||
@ -914,19 +903,18 @@ func (suite *BaseFinderUnitSuite) TestFetchPrevSnapshots_CustomTags() {
|
||||
defer flush()
|
||||
|
||||
bf := baseFinder{
|
||||
sm: &mockSnapshotManager2{data: manifestData},
|
||||
sm: &mockSnapshotManager{data: manifestData},
|
||||
bg: &mockModelGetter{data: backupData},
|
||||
}
|
||||
|
||||
bb, err := bf.findBases(
|
||||
bb := bf.FindBases(
|
||||
ctx,
|
||||
testAllUsersAllCats,
|
||||
test.tags)
|
||||
require.NoError(t, err, "getting bases: %v", clues.ToCore(err))
|
||||
|
||||
checkManifestEntriesMatch(
|
||||
t,
|
||||
bb.mergeBases,
|
||||
bb.MergeBases(),
|
||||
manifestData,
|
||||
test.expectedIdxs)
|
||||
})
|
||||
|
||||
@ -42,7 +42,7 @@ func (kdc *kopiaDataCollection) Items(
|
||||
for _, item := range kdc.items {
|
||||
s, err := kdc.FetchItemByName(ctx, item)
|
||||
if err != nil {
|
||||
el.AddRecoverable(clues.Wrap(err, "fetching item").
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "fetching item").
|
||||
WithClues(ctx).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
|
||||
@ -13,8 +13,8 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
|
||||
@ -39,6 +39,6 @@ type (
|
||||
ctx context.Context,
|
||||
reasons []kopia.Reason,
|
||||
tags map[string]string,
|
||||
) ([]kopia.ManifestEntry, error)
|
||||
) kopia.BackupBases
|
||||
}
|
||||
)
|
||||
|
||||
@ -12,8 +12,8 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/connector/exchange/mock"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/m365/exchange/mock"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
|
||||
63
src/internal/kopia/mock_backup_base.go
Normal file
63
src/internal/kopia/mock_backup_base.go
Normal file
@ -0,0 +1,63 @@
|
||||
package kopia
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func AssertBackupBasesEqual(t *testing.T, expect, got BackupBases) {
|
||||
if expect == nil && got == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if expect == nil {
|
||||
assert.Empty(t, got.Backups(), "backups")
|
||||
assert.Empty(t, got.MergeBases(), "merge bases")
|
||||
assert.Empty(t, got.AssistBases(), "assist bases")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if got == nil {
|
||||
if len(expect.Backups()) > 0 && len(expect.MergeBases()) > 0 && len(expect.AssistBases()) > 0 {
|
||||
assert.Fail(t, "got was nil but expected non-nil result %v", expect)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
assert.ElementsMatch(t, expect.Backups(), got.Backups(), "backups")
|
||||
assert.ElementsMatch(t, expect.MergeBases(), got.MergeBases(), "merge bases")
|
||||
assert.ElementsMatch(t, expect.AssistBases(), got.AssistBases(), "assist bases")
|
||||
}
|
||||
|
||||
func NewMockBackupBases() *MockBackupBases {
|
||||
return &MockBackupBases{backupBases: &backupBases{}}
|
||||
}
|
||||
|
||||
type MockBackupBases struct {
|
||||
*backupBases
|
||||
}
|
||||
|
||||
func (bb *MockBackupBases) WithBackups(b ...BackupEntry) *MockBackupBases {
|
||||
bb.backupBases.backups = append(bb.Backups(), b...)
|
||||
return bb
|
||||
}
|
||||
|
||||
func (bb *MockBackupBases) WithMergeBases(m ...ManifestEntry) *MockBackupBases {
|
||||
bb.backupBases.mergeBases = append(bb.MergeBases(), m...)
|
||||
bb.backupBases.assistBases = append(bb.AssistBases(), m...)
|
||||
|
||||
return bb
|
||||
}
|
||||
|
||||
func (bb *MockBackupBases) WithAssistBases(m ...ManifestEntry) *MockBackupBases {
|
||||
bb.backupBases.assistBases = append(bb.AssistBases(), m...)
|
||||
return bb
|
||||
}
|
||||
|
||||
func (bb *MockBackupBases) ClearMockAssistBases() *MockBackupBases {
|
||||
bb.backupBases.ClearAssistBases()
|
||||
return bb
|
||||
}
|
||||
@ -26,15 +26,16 @@ func s3BlobStorage(ctx context.Context, s storage.Storage) (blob.Storage, error)
|
||||
}
|
||||
|
||||
opts := s3.Options{
|
||||
BucketName: cfg.Bucket,
|
||||
Endpoint: endpoint,
|
||||
Prefix: cfg.Prefix,
|
||||
DoNotUseTLS: cfg.DoNotUseTLS,
|
||||
DoNotVerifyTLS: cfg.DoNotVerifyTLS,
|
||||
Tags: s.SessionTags,
|
||||
SessionName: s.SessionName,
|
||||
RoleARN: s.Role,
|
||||
RoleDuration: s.SessionDuration,
|
||||
BucketName: cfg.Bucket,
|
||||
Endpoint: endpoint,
|
||||
Prefix: cfg.Prefix,
|
||||
DoNotUseTLS: cfg.DoNotUseTLS,
|
||||
DoNotVerifyTLS: cfg.DoNotVerifyTLS,
|
||||
Tags: s.SessionTags,
|
||||
SessionName: s.SessionName,
|
||||
RoleARN: s.Role,
|
||||
RoleDuration: s.SessionDuration,
|
||||
TLSHandshakeTimeout: 60,
|
||||
}
|
||||
|
||||
store, err := s3.New(ctx, &opts, false)
|
||||
|
||||
@ -22,10 +22,10 @@ import (
|
||||
"github.com/kopia/kopia/snapshot/snapshotfs"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph/metadata"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/diagnostics"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph/metadata"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
@ -133,6 +133,12 @@ type itemDetails struct {
|
||||
}
|
||||
|
||||
type corsoProgress struct {
|
||||
// this is an unwanted hack. We can't extend the kopia interface
|
||||
// funcs to pass through a context. This is the second best way to
|
||||
// get an at least partially formed context into funcs that need it
|
||||
// for logging and other purposes.
|
||||
ctx context.Context
|
||||
|
||||
snapshotfs.UploadProgress
|
||||
pending map[string]*itemDetails
|
||||
deets *details.Builder
|
||||
@ -183,11 +189,10 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
|
||||
// never had to materialize their details in-memory.
|
||||
if d.info == nil {
|
||||
if d.prevPath == nil {
|
||||
cp.errs.AddRecoverable(clues.New("item sourced from previous backup with no previous path").
|
||||
cp.errs.AddRecoverable(cp.ctx, clues.New("item sourced from previous backup with no previous path").
|
||||
With(
|
||||
"service", d.repoPath.Service().String(),
|
||||
"category", d.repoPath.Category().String(),
|
||||
).
|
||||
"category", d.repoPath.Category().String()).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
return
|
||||
@ -198,11 +203,10 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
|
||||
|
||||
err := cp.toMerge.addRepoRef(d.prevPath.ToBuilder(), d.repoPath, d.locationPath)
|
||||
if err != nil {
|
||||
cp.errs.AddRecoverable(clues.Wrap(err, "adding item to merge list").
|
||||
cp.errs.AddRecoverable(cp.ctx, clues.Wrap(err, "adding item to merge list").
|
||||
With(
|
||||
"service", d.repoPath.Service().String(),
|
||||
"category", d.repoPath.Category().String(),
|
||||
).
|
||||
"category", d.repoPath.Category().String()).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
}
|
||||
|
||||
@ -215,11 +219,10 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
|
||||
!d.cached,
|
||||
*d.info)
|
||||
if err != nil {
|
||||
cp.errs.AddRecoverable(clues.New("adding item to details").
|
||||
cp.errs.AddRecoverable(cp.ctx, clues.New("adding item to details").
|
||||
With(
|
||||
"service", d.repoPath.Service().String(),
|
||||
"category", d.repoPath.Category().String(),
|
||||
).
|
||||
"category", d.repoPath.Category().String()).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
return
|
||||
@ -278,7 +281,7 @@ func (cp *corsoProgress) Error(relpath string, err error, isIgnored bool) {
|
||||
|
||||
defer cp.UploadProgress.Error(relpath, err, isIgnored)
|
||||
|
||||
cp.errs.AddRecoverable(clues.Wrap(err, "kopia reported error").
|
||||
cp.errs.AddRecoverable(cp.ctx, clues.Wrap(err, "kopia reported error").
|
||||
With("is_ignored", isIgnored, "relative_path", relpath).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
}
|
||||
@ -350,7 +353,7 @@ func collectionEntries(
|
||||
itemPath, err := streamedEnts.FullPath().AppendItem(e.UUID())
|
||||
if err != nil {
|
||||
err = clues.Wrap(err, "getting full item path")
|
||||
progress.errs.AddRecoverable(err)
|
||||
progress.errs.AddRecoverable(ctx, err)
|
||||
|
||||
logger.CtxErr(ctx, err).Error("getting full item path")
|
||||
|
||||
|
||||
@ -20,8 +20,8 @@ import (
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
pmMock "github.com/alcionai/corso/src/internal/common/prefixmatcher/mock"
|
||||
exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
@ -472,8 +472,12 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFile() {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
bd := &details.Builder{}
|
||||
cp := corsoProgress{
|
||||
ctx: ctx,
|
||||
UploadProgress: &snapshotfs.NullUploadProgress{},
|
||||
deets: bd,
|
||||
pending: map[string]*itemDetails{},
|
||||
@ -526,6 +530,10 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFile() {
|
||||
|
||||
func (suite *CorsoProgressUnitSuite) TestFinishedFileCachedNoPrevPathErrors() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
bd := &details.Builder{}
|
||||
cachedItems := map[string]testInfo{
|
||||
suite.targetFileName: {
|
||||
@ -535,6 +543,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileCachedNoPrevPathErrors() {
|
||||
},
|
||||
}
|
||||
cp := corsoProgress{
|
||||
ctx: ctx,
|
||||
UploadProgress: &snapshotfs.NullUploadProgress{},
|
||||
deets: bd,
|
||||
pending: map[string]*itemDetails{},
|
||||
@ -565,6 +574,9 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBaseItemDoesntBuildHierarch
|
||||
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
prevPath := makePath(
|
||||
suite.T(),
|
||||
[]string{testTenant, service, testUser, category, testInboxDir, testFileName2},
|
||||
@ -582,6 +594,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBaseItemDoesntBuildHierarch
|
||||
// Setup stuff.
|
||||
db := &details.Builder{}
|
||||
cp := corsoProgress{
|
||||
ctx: ctx,
|
||||
UploadProgress: &snapshotfs.NullUploadProgress{},
|
||||
deets: db,
|
||||
pending: map[string]*itemDetails{},
|
||||
@ -617,8 +630,12 @@ func (suite *CorsoProgressUnitSuite) TestFinishedHashingFile() {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
bd := &details.Builder{}
|
||||
cp := corsoProgress{
|
||||
ctx: ctx,
|
||||
UploadProgress: &snapshotfs.NullUploadProgress{},
|
||||
deets: bd,
|
||||
pending: map[string]*itemDetails{},
|
||||
@ -682,6 +699,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree() {
|
||||
}
|
||||
|
||||
progress := &corsoProgress{
|
||||
ctx: ctx,
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
@ -801,6 +819,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_MixedDirectory()
|
||||
defer flush()
|
||||
|
||||
progress := &corsoProgress{
|
||||
ctx: ctx,
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
@ -908,6 +927,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_Fails() {
|
||||
defer flush()
|
||||
|
||||
progress := &corsoProgress{
|
||||
ctx: ctx,
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
}
|
||||
@ -1004,6 +1024,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeErrors() {
|
||||
defer flush()
|
||||
|
||||
progress := &corsoProgress{
|
||||
ctx: ctx,
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
@ -1298,6 +1319,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() {
|
||||
defer flush()
|
||||
|
||||
progress := &corsoProgress{
|
||||
ctx: ctx,
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
@ -2221,6 +2243,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
||||
defer flush()
|
||||
|
||||
progress := &corsoProgress{
|
||||
ctx: ctx,
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
@ -2375,6 +2398,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSkipsDeletedSubtre
|
||||
)
|
||||
|
||||
progress := &corsoProgress{
|
||||
ctx: ctx,
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
@ -2477,6 +2501,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_HandleEmptyBase()
|
||||
)
|
||||
|
||||
progress := &corsoProgress{
|
||||
ctx: ctx,
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
@ -2733,6 +2758,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsCorrectSubt
|
||||
)
|
||||
|
||||
progress := &corsoProgress{
|
||||
ctx: ctx,
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
@ -2901,6 +2927,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsMigrateSubt
|
||||
)
|
||||
|
||||
progress := &corsoProgress{
|
||||
ctx: ctx,
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
|
||||
@ -160,6 +160,7 @@ func (w Wrapper) ConsumeBackupCollections(
|
||||
}
|
||||
|
||||
progress := &corsoProgress{
|
||||
ctx: ctx,
|
||||
pending: map[string]*itemDetails{},
|
||||
deets: &details.Builder{},
|
||||
toMerge: newMergeDetails(),
|
||||
@ -415,7 +416,7 @@ func loadDirsAndItems(
|
||||
|
||||
dir, err := getDir(ictx, dirItems.dir, snapshotRoot)
|
||||
if err != nil {
|
||||
el.AddRecoverable(clues.Wrap(err, "loading storage directory").
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "loading storage directory").
|
||||
WithClues(ictx).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
@ -431,7 +432,7 @@ func loadDirsAndItems(
|
||||
}
|
||||
|
||||
if err := mergeCol.addCollection(dirItems.dir.String(), dc); err != nil {
|
||||
el.AddRecoverable(clues.Wrap(err, "adding collection to merge collection").
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "adding collection to merge collection").
|
||||
WithClues(ctx).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
@ -493,7 +494,7 @@ func (w Wrapper) ProduceRestoreCollections(
|
||||
|
||||
parentStoragePath, err := itemPaths.StoragePath.Dir()
|
||||
if err != nil {
|
||||
el.AddRecoverable(clues.Wrap(err, "getting storage directory path").
|
||||
el.AddRecoverable(ictx, clues.Wrap(err, "getting storage directory path").
|
||||
WithClues(ictx).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
|
||||
@ -19,10 +19,10 @@ import (
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
pmMock "github.com/alcionai/corso/src/internal/common/prefixmatcher/mock"
|
||||
exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock"
|
||||
"github.com/alcionai/corso/src/internal/connector/onedrive/metadata"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/data/mock"
|
||||
exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock"
|
||||
"github.com/alcionai/corso/src/internal/m365/onedrive/metadata"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control/repository"
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
package connector
|
||||
package m365
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -8,15 +8,13 @@ import (
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
||||
"github.com/alcionai/corso/src/internal/connector/discovery"
|
||||
"github.com/alcionai/corso/src/internal/connector/exchange"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/connector/onedrive"
|
||||
"github.com/alcionai/corso/src/internal/connector/sharepoint"
|
||||
"github.com/alcionai/corso/src/internal/connector/support"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/diagnostics"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/internal/m365/discovery"
|
||||
"github.com/alcionai/corso/src/internal/m365/exchange"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/onedrive"
|
||||
"github.com/alcionai/corso/src/internal/m365/sharepoint"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/filters"
|
||||
@ -34,7 +32,7 @@ import (
|
||||
// The metadata field can include things like delta tokens or the previous backup's
|
||||
// folder hierarchy. The absence of metadata causes the collection creation to ignore
|
||||
// prior history (ie, incrementals) and run a full backup.
|
||||
func (gc *GraphConnector) ProduceBackupCollections(
|
||||
func (ctrl *Controller) ProduceBackupCollections(
|
||||
ctx context.Context,
|
||||
owner idname.Provider,
|
||||
sels selectors.Selector,
|
||||
@ -45,7 +43,7 @@ func (gc *GraphConnector) ProduceBackupCollections(
|
||||
) ([]data.BackupCollection, prefixmatcher.StringSetReader, bool, error) {
|
||||
ctx, end := diagnostics.Span(
|
||||
ctx,
|
||||
"gc:produceBackupCollections",
|
||||
"m365:produceBackupCollections",
|
||||
diagnostics.Index("service", sels.Service.String()))
|
||||
defer end()
|
||||
|
||||
@ -55,14 +53,14 @@ func (gc *GraphConnector) ProduceBackupCollections(
|
||||
ctrlOpts.Parallelism.ItemFetch = graph.Parallelism(sels.PathService()).
|
||||
ItemOverride(ctx, ctrlOpts.Parallelism.ItemFetch)
|
||||
|
||||
err := verifyBackupInputs(sels, gc.IDNameLookup.IDs())
|
||||
err := verifyBackupInputs(sels, ctrl.IDNameLookup.IDs())
|
||||
if err != nil {
|
||||
return nil, nil, false, clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
serviceEnabled, canMakeDeltaQueries, err := checkServiceEnabled(
|
||||
ctx,
|
||||
gc.AC.Users(),
|
||||
ctrl.AC.Users(),
|
||||
path.ServiceType(sels.Service),
|
||||
sels.DiscreteOwner)
|
||||
if err != nil {
|
||||
@ -87,14 +85,14 @@ func (gc *GraphConnector) ProduceBackupCollections(
|
||||
|
||||
switch sels.Service {
|
||||
case selectors.ServiceExchange:
|
||||
colls, ssmb, canUsePreviousBackup, err = exchange.DataCollections(
|
||||
colls, ssmb, canUsePreviousBackup, err = exchange.ProduceBackupCollections(
|
||||
ctx,
|
||||
gc.AC,
|
||||
ctrl.AC,
|
||||
sels,
|
||||
gc.credentials.AzureTenantID,
|
||||
ctrl.credentials.AzureTenantID,
|
||||
owner,
|
||||
metadata,
|
||||
gc.UpdateStatus,
|
||||
ctrl.UpdateStatus,
|
||||
ctrlOpts,
|
||||
errs)
|
||||
if err != nil {
|
||||
@ -102,15 +100,15 @@ func (gc *GraphConnector) ProduceBackupCollections(
|
||||
}
|
||||
|
||||
case selectors.ServiceOneDrive:
|
||||
colls, ssmb, canUsePreviousBackup, err = onedrive.DataCollections(
|
||||
colls, ssmb, canUsePreviousBackup, err = onedrive.ProduceBackupCollections(
|
||||
ctx,
|
||||
gc.AC,
|
||||
ctrl.AC,
|
||||
sels,
|
||||
owner,
|
||||
metadata,
|
||||
lastBackupVersion,
|
||||
gc.credentials.AzureTenantID,
|
||||
gc.UpdateStatus,
|
||||
ctrl.credentials.AzureTenantID,
|
||||
ctrl.UpdateStatus,
|
||||
ctrlOpts,
|
||||
errs)
|
||||
if err != nil {
|
||||
@ -118,14 +116,14 @@ func (gc *GraphConnector) ProduceBackupCollections(
|
||||
}
|
||||
|
||||
case selectors.ServiceSharePoint:
|
||||
colls, ssmb, canUsePreviousBackup, err = sharepoint.DataCollections(
|
||||
colls, ssmb, canUsePreviousBackup, err = sharepoint.ProduceBackupCollections(
|
||||
ctx,
|
||||
gc.AC,
|
||||
ctrl.AC,
|
||||
sels,
|
||||
owner,
|
||||
metadata,
|
||||
gc.credentials,
|
||||
gc,
|
||||
ctrl.credentials,
|
||||
ctrl,
|
||||
ctrlOpts,
|
||||
errs)
|
||||
if err != nil {
|
||||
@ -144,7 +142,7 @@ func (gc *GraphConnector) ProduceBackupCollections(
|
||||
// break the process state, putting us into deadlock or
|
||||
// panics.
|
||||
if c.State() != data.DeletedState {
|
||||
gc.incrementAwaitingMessages()
|
||||
ctrl.incrementAwaitingMessages()
|
||||
}
|
||||
}
|
||||
|
||||
@ -154,7 +152,7 @@ func (gc *GraphConnector) ProduceBackupCollections(
|
||||
// IsBackupRunnable verifies that the users provided has the services enabled and
|
||||
// data can be backed up. The canMakeDeltaQueries provides info if the mailbox is
|
||||
// full and delta queries can be made on it.
|
||||
func (gc *GraphConnector) IsBackupRunnable(
|
||||
func (ctrl *Controller) IsBackupRunnable(
|
||||
ctx context.Context,
|
||||
service path.ServiceType,
|
||||
resourceOwner string,
|
||||
@ -164,7 +162,7 @@ func (gc *GraphConnector) IsBackupRunnable(
|
||||
return true, nil
|
||||
}
|
||||
|
||||
info, err := gc.AC.Users().GetInfo(ctx, resourceOwner)
|
||||
info, err := ctrl.AC.Users().GetInfo(ctx, resourceOwner)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -225,59 +223,3 @@ func checkServiceEnabled(
|
||||
|
||||
return true, canMakeDeltaQueries, nil
|
||||
}
|
||||
|
||||
// ConsumeRestoreCollections restores data from the specified collections
|
||||
// into M365 using the GraphAPI.
|
||||
// SideEffect: gc.status is updated at the completion of operation
|
||||
func (gc *GraphConnector) ConsumeRestoreCollections(
|
||||
ctx context.Context,
|
||||
backupVersion int,
|
||||
sels selectors.Selector,
|
||||
dest control.RestoreDestination,
|
||||
opts control.Options,
|
||||
dcs []data.RestoreCollection,
|
||||
errs *fault.Bus,
|
||||
) (*details.Details, error) {
|
||||
ctx, end := diagnostics.Span(ctx, "connector:restore")
|
||||
defer end()
|
||||
|
||||
ctx = graph.BindRateLimiterConfig(ctx, graph.LimiterCfg{Service: sels.PathService()})
|
||||
|
||||
var (
|
||||
status *support.ConnectorOperationStatus
|
||||
deets = &details.Builder{}
|
||||
err error
|
||||
)
|
||||
|
||||
switch sels.Service {
|
||||
case selectors.ServiceExchange:
|
||||
status, err = exchange.RestoreCollections(ctx, gc.AC, dest, dcs, deets, errs)
|
||||
case selectors.ServiceOneDrive:
|
||||
status, err = onedrive.RestoreCollections(
|
||||
ctx,
|
||||
onedrive.NewRestoreHandler(gc.AC),
|
||||
backupVersion,
|
||||
dest,
|
||||
opts,
|
||||
dcs,
|
||||
deets,
|
||||
errs)
|
||||
case selectors.ServiceSharePoint:
|
||||
status, err = sharepoint.RestoreCollections(
|
||||
ctx,
|
||||
backupVersion,
|
||||
gc.AC,
|
||||
dest,
|
||||
opts,
|
||||
dcs,
|
||||
deets,
|
||||
errs)
|
||||
default:
|
||||
err = clues.Wrap(clues.New(sels.Service.String()), "service not supported")
|
||||
}
|
||||
|
||||
gc.incrementAwaitingMessages()
|
||||
gc.UpdateStatus(status)
|
||||
|
||||
return deets.Details(), err
|
||||
}
|
||||
@ -1,4 +1,4 @@
|
||||
package connector
|
||||
package m365
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -11,8 +11,9 @@ import (
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
inMock "github.com/alcionai/corso/src/internal/common/idname/mock"
|
||||
"github.com/alcionai/corso/src/internal/connector/exchange"
|
||||
"github.com/alcionai/corso/src/internal/connector/sharepoint"
|
||||
"github.com/alcionai/corso/src/internal/m365/exchange"
|
||||
"github.com/alcionai/corso/src/internal/m365/resource"
|
||||
"github.com/alcionai/corso/src/internal/m365/sharepoint"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
@ -59,19 +60,13 @@ func (suite *DataCollectionIntgSuite) SetupSuite() {
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
}
|
||||
|
||||
// TestExchangeDataCollection verifies interface between operation and
|
||||
// GraphConnector remains stable to receive a non-zero amount of Collections
|
||||
// for the Exchange Package. Enabled exchange applications:
|
||||
// - mail
|
||||
// - contacts
|
||||
// - events
|
||||
func (suite *DataCollectionIntgSuite) TestExchangeDataCollection() {
|
||||
ctx, flush := tester.NewContext(suite.T())
|
||||
defer flush()
|
||||
|
||||
selUsers := []string{suite.user}
|
||||
|
||||
connector := loadConnector(ctx, suite.T(), Users)
|
||||
ctrl := newController(ctx, suite.T(), resource.Users)
|
||||
tests := []struct {
|
||||
name string
|
||||
getSelector func(t *testing.T) selectors.Selector
|
||||
@ -127,14 +122,14 @@ func (suite *DataCollectionIntgSuite) TestExchangeDataCollection() {
|
||||
ctrlOpts := control.Defaults()
|
||||
ctrlOpts.ToggleFeatures.DisableDelta = !canMakeDeltaQueries
|
||||
|
||||
collections, excludes, canUsePreviousBackup, err := exchange.DataCollections(
|
||||
collections, excludes, canUsePreviousBackup, err := exchange.ProduceBackupCollections(
|
||||
ctx,
|
||||
suite.ac,
|
||||
sel,
|
||||
suite.tenantID,
|
||||
uidn,
|
||||
nil,
|
||||
connector.UpdateStatus,
|
||||
ctrl.UpdateStatus,
|
||||
ctrlOpts,
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
@ -142,7 +137,7 @@ func (suite *DataCollectionIntgSuite) TestExchangeDataCollection() {
|
||||
assert.True(t, excludes.Empty())
|
||||
|
||||
for range collections {
|
||||
connector.incrementAwaitingMessages()
|
||||
ctrl.incrementAwaitingMessages()
|
||||
}
|
||||
|
||||
// Categories with delta endpoints will produce a collection for metadata
|
||||
@ -158,7 +153,7 @@ func (suite *DataCollectionIntgSuite) TestExchangeDataCollection() {
|
||||
}
|
||||
}
|
||||
|
||||
status := connector.Wait()
|
||||
status := ctrl.Wait()
|
||||
assert.NotZero(t, status.Successes)
|
||||
t.Log(status.String())
|
||||
})
|
||||
@ -172,8 +167,7 @@ func (suite *DataCollectionIntgSuite) TestDataCollections_invalidResourceOwner()
|
||||
defer flush()
|
||||
|
||||
owners := []string{"snuffleupagus"}
|
||||
|
||||
connector := loadConnector(ctx, suite.T(), Users)
|
||||
ctrl := newController(ctx, suite.T(), resource.Users)
|
||||
tests := []struct {
|
||||
name string
|
||||
getSelector func(t *testing.T) selectors.Selector
|
||||
@ -238,7 +232,7 @@ func (suite *DataCollectionIntgSuite) TestDataCollections_invalidResourceOwner()
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
collections, excludes, canUsePreviousBackup, err := connector.ProduceBackupCollections(
|
||||
collections, excludes, canUsePreviousBackup, err := ctrl.ProduceBackupCollections(
|
||||
ctx,
|
||||
test.getSelector(t),
|
||||
test.getSelector(t),
|
||||
@ -254,16 +248,12 @@ func (suite *DataCollectionIntgSuite) TestDataCollections_invalidResourceOwner()
|
||||
}
|
||||
}
|
||||
|
||||
// TestSharePointDataCollection verifies interface between operation and
|
||||
// GraphConnector remains stable to receive a non-zero amount of Collections
|
||||
// for the SharePoint Package.
|
||||
func (suite *DataCollectionIntgSuite) TestSharePointDataCollection() {
|
||||
ctx, flush := tester.NewContext(suite.T())
|
||||
defer flush()
|
||||
|
||||
selSites := []string{suite.site}
|
||||
|
||||
connector := loadConnector(ctx, suite.T(), Sites)
|
||||
ctrl := newController(ctx, suite.T(), resource.Sites)
|
||||
tests := []struct {
|
||||
name string
|
||||
expected int
|
||||
@ -297,14 +287,14 @@ func (suite *DataCollectionIntgSuite) TestSharePointDataCollection() {
|
||||
|
||||
sel := test.getSelector()
|
||||
|
||||
collections, excludes, canUsePreviousBackup, err := sharepoint.DataCollections(
|
||||
collections, excludes, canUsePreviousBackup, err := sharepoint.ProduceBackupCollections(
|
||||
ctx,
|
||||
suite.ac,
|
||||
sel,
|
||||
sel,
|
||||
nil,
|
||||
connector.credentials,
|
||||
connector,
|
||||
ctrl.credentials,
|
||||
ctrl,
|
||||
control.Defaults(),
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
@ -313,7 +303,7 @@ func (suite *DataCollectionIntgSuite) TestSharePointDataCollection() {
|
||||
assert.True(t, excludes.Empty())
|
||||
|
||||
for range collections {
|
||||
connector.incrementAwaitingMessages()
|
||||
ctrl.incrementAwaitingMessages()
|
||||
}
|
||||
|
||||
// we don't know an exact count of drives this will produce,
|
||||
@ -328,7 +318,7 @@ func (suite *DataCollectionIntgSuite) TestSharePointDataCollection() {
|
||||
}
|
||||
}
|
||||
|
||||
status := connector.Wait()
|
||||
status := ctrl.Wait()
|
||||
assert.NotZero(t, status.Successes)
|
||||
t.Log(status.String())
|
||||
})
|
||||
@ -341,7 +331,7 @@ func (suite *DataCollectionIntgSuite) TestSharePointDataCollection() {
|
||||
|
||||
type SPCollectionIntgSuite struct {
|
||||
tester.Suite
|
||||
connector *GraphConnector
|
||||
connector *Controller
|
||||
user string
|
||||
}
|
||||
|
||||
@ -358,7 +348,7 @@ func (suite *SPCollectionIntgSuite) SetupSuite() {
|
||||
ctx, flush := tester.NewContext(suite.T())
|
||||
defer flush()
|
||||
|
||||
suite.connector = loadConnector(ctx, suite.T(), Sites)
|
||||
suite.connector = newController(ctx, suite.T(), resource.Sites)
|
||||
suite.user = tester.M365UserID(suite.T())
|
||||
|
||||
tester.LogTimeOfTest(suite.T())
|
||||
@ -372,11 +362,11 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Libraries() {
|
||||
|
||||
var (
|
||||
siteID = tester.M365SiteID(t)
|
||||
gc = loadConnector(ctx, t, Sites)
|
||||
ctrl = newController(ctx, t, resource.Sites)
|
||||
siteIDs = []string{siteID}
|
||||
)
|
||||
|
||||
id, name, err := gc.PopulateOwnerIDAndNamesFrom(ctx, siteID, nil)
|
||||
id, name, err := ctrl.PopulateOwnerIDAndNamesFrom(ctx, siteID, nil)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
sel := selectors.NewSharePointBackup(siteIDs)
|
||||
@ -384,7 +374,7 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Libraries() {
|
||||
|
||||
sel.SetDiscreteOwnerIDName(id, name)
|
||||
|
||||
cols, excludes, canUsePreviousBackup, err := gc.ProduceBackupCollections(
|
||||
cols, excludes, canUsePreviousBackup, err := ctrl.ProduceBackupCollections(
|
||||
ctx,
|
||||
inMock.NewProvider(id, name),
|
||||
sel.Selector,
|
||||
@ -419,11 +409,11 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Lists() {
|
||||
|
||||
var (
|
||||
siteID = tester.M365SiteID(t)
|
||||
gc = loadConnector(ctx, t, Sites)
|
||||
ctrl = newController(ctx, t, resource.Sites)
|
||||
siteIDs = []string{siteID}
|
||||
)
|
||||
|
||||
id, name, err := gc.PopulateOwnerIDAndNamesFrom(ctx, siteID, nil)
|
||||
id, name, err := ctrl.PopulateOwnerIDAndNamesFrom(ctx, siteID, nil)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
sel := selectors.NewSharePointBackup(siteIDs)
|
||||
@ -431,7 +421,7 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Lists() {
|
||||
|
||||
sel.SetDiscreteOwnerIDName(id, name)
|
||||
|
||||
cols, excludes, canUsePreviousBackup, err := gc.ProduceBackupCollections(
|
||||
cols, excludes, canUsePreviousBackup, err := ctrl.ProduceBackupCollections(
|
||||
ctx,
|
||||
inMock.NewProvider(id, name),
|
||||
sel.Selector,
|
||||
@ -1,6 +1,4 @@
|
||||
// Package connector uploads and retrieves data from M365 through
|
||||
// the msgraph-go-sdk.
|
||||
package connector
|
||||
package m365
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -10,28 +8,25 @@ import (
|
||||
"github.com/alcionai/clues"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/connector/support"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/resource"
|
||||
"github.com/alcionai/corso/src/internal/m365/support"
|
||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Graph Connector
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// must comply with BackupProducer and RestoreConsumer
|
||||
var (
|
||||
_ inject.BackupProducer = &GraphConnector{}
|
||||
_ inject.RestoreConsumer = &GraphConnector{}
|
||||
_ inject.BackupProducer = &Controller{}
|
||||
_ inject.RestoreConsumer = &Controller{}
|
||||
)
|
||||
|
||||
// GraphConnector is a struct used to wrap the GraphServiceClient and
|
||||
// Controller is a struct used to wrap the GraphServiceClient and
|
||||
// GraphRequestAdapter from the msgraph-sdk-go. Additional fields are for
|
||||
// bookkeeping and interfacing with other component.
|
||||
type GraphConnector struct {
|
||||
type Controller struct {
|
||||
AC api.Client
|
||||
|
||||
tenant string
|
||||
@ -43,20 +38,20 @@ type GraphConnector struct {
|
||||
// reference for processes that choose to populate the values.
|
||||
IDNameLookup idname.Cacher
|
||||
|
||||
// wg is used to track completion of GC tasks
|
||||
// wg is used to track completion of tasks
|
||||
wg *sync.WaitGroup
|
||||
region *trace.Region
|
||||
|
||||
// mutex used to synchronize updates to `status`
|
||||
mu sync.Mutex
|
||||
status support.ConnectorOperationStatus // contains the status of the last run status
|
||||
status support.ControllerOperationStatus // contains the status of the last run status
|
||||
}
|
||||
|
||||
func NewGraphConnector(
|
||||
func NewController(
|
||||
ctx context.Context,
|
||||
acct account.Account,
|
||||
r Resource,
|
||||
) (*GraphConnector, error) {
|
||||
rc resource.Category,
|
||||
) (*Controller, error) {
|
||||
creds, err := acct.M365Config()
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "retrieving m365 account configuration").WithClues(ctx)
|
||||
@ -67,106 +62,97 @@ func NewGraphConnector(
|
||||
return nil, clues.Wrap(err, "creating api client").WithClues(ctx)
|
||||
}
|
||||
|
||||
rc, err := r.resourceClient(ac)
|
||||
rCli, err := getResourceClient(rc, ac)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "creating resource client").WithClues(ctx)
|
||||
}
|
||||
|
||||
gc := GraphConnector{
|
||||
ctrl := Controller{
|
||||
AC: ac,
|
||||
IDNameLookup: idname.NewCache(nil),
|
||||
|
||||
credentials: creds,
|
||||
ownerLookup: rc,
|
||||
ownerLookup: rCli,
|
||||
tenant: acct.ID(),
|
||||
wg: &sync.WaitGroup{},
|
||||
}
|
||||
|
||||
return &gc, nil
|
||||
return &ctrl, nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Processing Status
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// AwaitStatus waits for all gc tasks to complete and then returns status
|
||||
func (gc *GraphConnector) Wait() *data.CollectionStats {
|
||||
// AwaitStatus waits for all tasks to complete and then returns status
|
||||
func (ctrl *Controller) Wait() *data.CollectionStats {
|
||||
defer func() {
|
||||
if gc.region != nil {
|
||||
gc.region.End()
|
||||
gc.region = nil
|
||||
if ctrl.region != nil {
|
||||
ctrl.region.End()
|
||||
ctrl.region = nil
|
||||
}
|
||||
}()
|
||||
gc.wg.Wait()
|
||||
ctrl.wg.Wait()
|
||||
|
||||
// clean up and reset statefulness
|
||||
dcs := data.CollectionStats{
|
||||
Folders: gc.status.Folders,
|
||||
Objects: gc.status.Metrics.Objects,
|
||||
Successes: gc.status.Metrics.Successes,
|
||||
Bytes: gc.status.Metrics.Bytes,
|
||||
Details: gc.status.String(),
|
||||
Folders: ctrl.status.Folders,
|
||||
Objects: ctrl.status.Metrics.Objects,
|
||||
Successes: ctrl.status.Metrics.Successes,
|
||||
Bytes: ctrl.status.Metrics.Bytes,
|
||||
Details: ctrl.status.String(),
|
||||
}
|
||||
|
||||
gc.wg = &sync.WaitGroup{}
|
||||
gc.status = support.ConnectorOperationStatus{}
|
||||
ctrl.wg = &sync.WaitGroup{}
|
||||
ctrl.status = support.ControllerOperationStatus{}
|
||||
|
||||
return &dcs
|
||||
}
|
||||
|
||||
// UpdateStatus is used by gc initiated tasks to indicate completion
|
||||
func (gc *GraphConnector) UpdateStatus(status *support.ConnectorOperationStatus) {
|
||||
defer gc.wg.Done()
|
||||
// UpdateStatus is used by initiated tasks to indicate completion
|
||||
func (ctrl *Controller) UpdateStatus(status *support.ControllerOperationStatus) {
|
||||
defer ctrl.wg.Done()
|
||||
|
||||
if status == nil {
|
||||
return
|
||||
}
|
||||
|
||||
gc.mu.Lock()
|
||||
defer gc.mu.Unlock()
|
||||
gc.status = support.MergeStatus(gc.status, *status)
|
||||
ctrl.mu.Lock()
|
||||
defer ctrl.mu.Unlock()
|
||||
ctrl.status = support.MergeStatus(ctrl.status, *status)
|
||||
}
|
||||
|
||||
// Status returns the current status of the graphConnector operation.
|
||||
func (gc *GraphConnector) Status() support.ConnectorOperationStatus {
|
||||
return gc.status
|
||||
// Status returns the current status of the controller process.
|
||||
func (ctrl *Controller) Status() support.ControllerOperationStatus {
|
||||
return ctrl.status
|
||||
}
|
||||
|
||||
// PrintableStatus returns a string formatted version of the GC status.
|
||||
func (gc *GraphConnector) PrintableStatus() string {
|
||||
return gc.status.String()
|
||||
// PrintableStatus returns a string formatted version of the status.
|
||||
func (ctrl *Controller) PrintableStatus() string {
|
||||
return ctrl.status.String()
|
||||
}
|
||||
|
||||
func (gc *GraphConnector) incrementAwaitingMessages() {
|
||||
gc.wg.Add(1)
|
||||
func (ctrl *Controller) incrementAwaitingMessages() {
|
||||
ctrl.wg.Add(1)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Resource Lookup Handling
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type Resource int
|
||||
|
||||
const (
|
||||
UnknownResource Resource = iota
|
||||
AllResources // unused
|
||||
Users
|
||||
Sites
|
||||
)
|
||||
|
||||
func (r Resource) resourceClient(ac api.Client) (*resourceClient, error) {
|
||||
switch r {
|
||||
case Users:
|
||||
return &resourceClient{enum: r, getter: ac.Users()}, nil
|
||||
case Sites:
|
||||
return &resourceClient{enum: r, getter: ac.Sites()}, nil
|
||||
func getResourceClient(rc resource.Category, ac api.Client) (*resourceClient, error) {
|
||||
switch rc {
|
||||
case resource.Users:
|
||||
return &resourceClient{enum: rc, getter: ac.Users()}, nil
|
||||
case resource.Sites:
|
||||
return &resourceClient{enum: rc, getter: ac.Sites()}, nil
|
||||
default:
|
||||
return nil, clues.New("unrecognized owner resource enum").With("resource_enum", r)
|
||||
return nil, clues.New("unrecognized owner resource enum").With("resource_enum", rc)
|
||||
}
|
||||
}
|
||||
|
||||
type resourceClient struct {
|
||||
enum Resource
|
||||
enum resource.Category
|
||||
getter getIDAndNamer
|
||||
}
|
||||
|
||||
@ -243,18 +229,18 @@ func (r resourceClient) getOwnerIDAndNameFrom(
|
||||
// The id-name swapper is optional. Some processes will look up all owners in
|
||||
// the tenant before reaching this step. In that case, the data gets handed
|
||||
// down for this func to consume instead of performing further queries. The
|
||||
// data gets stored inside the gc instance for later re-use.
|
||||
func (gc *GraphConnector) PopulateOwnerIDAndNamesFrom(
|
||||
// data gets stored inside the controller instance for later re-use.
|
||||
func (ctrl *Controller) PopulateOwnerIDAndNamesFrom(
|
||||
ctx context.Context,
|
||||
owner string, // input value, can be either id or name
|
||||
ins idname.Cacher,
|
||||
) (string, string, error) {
|
||||
id, name, err := gc.ownerLookup.getOwnerIDAndNameFrom(ctx, gc.AC, owner, ins)
|
||||
id, name, err := ctrl.ownerLookup.getOwnerIDAndNameFrom(ctx, ctrl.AC, owner, ins)
|
||||
if err != nil {
|
||||
return "", "", clues.Wrap(err, "identifying resource owner")
|
||||
}
|
||||
|
||||
gc.IDNameLookup = idname.NewCache(map[string]string{id: name})
|
||||
ctrl.IDNameLookup = idname.NewCache(map[string]string{id: name})
|
||||
|
||||
return id, name, nil
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@ -6,7 +6,7 @@ import (
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
@ -9,7 +9,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/connector/discovery"
|
||||
"github.com/alcionai/corso/src/internal/m365/discovery"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/credentials"
|
||||
@ -2,14 +2,18 @@ package exchange
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/internal/common/pii"
|
||||
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/connector/support"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/support"
|
||||
"github.com/alcionai/corso/src/internal/observe"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
@ -18,7 +22,303 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
// filterContainersAndFillCollections is a utility function
|
||||
// MetadataFileNames produces the category-specific set of filenames used to
|
||||
// store graph metadata such as delta tokens and folderID->path references.
|
||||
func MetadataFileNames(cat path.CategoryType) []string {
|
||||
switch cat {
|
||||
case path.EmailCategory, path.ContactsCategory:
|
||||
return []string{graph.DeltaURLsFileName, graph.PreviousPathFileName}
|
||||
default:
|
||||
return []string{graph.PreviousPathFileName}
|
||||
}
|
||||
}
|
||||
|
||||
type CatDeltaPaths map[path.CategoryType]DeltaPaths
|
||||
|
||||
type DeltaPaths map[string]DeltaPath
|
||||
|
||||
func (dps DeltaPaths) AddDelta(k, d string) {
|
||||
dp, ok := dps[k]
|
||||
if !ok {
|
||||
dp = DeltaPath{}
|
||||
}
|
||||
|
||||
dp.Delta = d
|
||||
dps[k] = dp
|
||||
}
|
||||
|
||||
func (dps DeltaPaths) AddPath(k, p string) {
|
||||
dp, ok := dps[k]
|
||||
if !ok {
|
||||
dp = DeltaPath{}
|
||||
}
|
||||
|
||||
dp.Path = p
|
||||
dps[k] = dp
|
||||
}
|
||||
|
||||
type DeltaPath struct {
|
||||
Delta string
|
||||
Path string
|
||||
}
|
||||
|
||||
// ParseMetadataCollections produces a map of structs holding delta
|
||||
// and path lookup maps.
|
||||
func parseMetadataCollections(
|
||||
ctx context.Context,
|
||||
colls []data.RestoreCollection,
|
||||
) (CatDeltaPaths, bool, error) {
|
||||
// cdp stores metadata
|
||||
cdp := CatDeltaPaths{
|
||||
path.ContactsCategory: {},
|
||||
path.EmailCategory: {},
|
||||
path.EventsCategory: {},
|
||||
}
|
||||
|
||||
// found tracks the metadata we've loaded, to make sure we don't
|
||||
// fetch overlapping copies.
|
||||
found := map[path.CategoryType]map[string]struct{}{
|
||||
path.ContactsCategory: {},
|
||||
path.EmailCategory: {},
|
||||
path.EventsCategory: {},
|
||||
}
|
||||
|
||||
// errors from metadata items should not stop the backup,
|
||||
// but it should prevent us from using previous backups
|
||||
errs := fault.New(true)
|
||||
|
||||
for _, coll := range colls {
|
||||
var (
|
||||
breakLoop bool
|
||||
items = coll.Items(ctx, errs)
|
||||
category = coll.FullPath().Category()
|
||||
)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, false, clues.Wrap(ctx.Err(), "parsing collection metadata").WithClues(ctx)
|
||||
|
||||
case item, ok := <-items:
|
||||
if !ok || errs.Failure() != nil {
|
||||
breakLoop = true
|
||||
break
|
||||
}
|
||||
|
||||
var (
|
||||
m = map[string]string{}
|
||||
cdps = cdp[category]
|
||||
)
|
||||
|
||||
err := json.NewDecoder(item.ToReader()).Decode(&m)
|
||||
if err != nil {
|
||||
return nil, false, clues.New("decoding metadata json").WithClues(ctx)
|
||||
}
|
||||
|
||||
switch item.UUID() {
|
||||
case graph.PreviousPathFileName:
|
||||
if _, ok := found[category]["path"]; ok {
|
||||
return nil, false, clues.Wrap(clues.New(category.String()), "multiple versions of path metadata").WithClues(ctx)
|
||||
}
|
||||
|
||||
for k, p := range m {
|
||||
cdps.AddPath(k, p)
|
||||
}
|
||||
|
||||
found[category]["path"] = struct{}{}
|
||||
|
||||
case graph.DeltaURLsFileName:
|
||||
if _, ok := found[category]["delta"]; ok {
|
||||
return nil, false, clues.Wrap(clues.New(category.String()), "multiple versions of delta metadata").WithClues(ctx)
|
||||
}
|
||||
|
||||
for k, d := range m {
|
||||
cdps.AddDelta(k, d)
|
||||
}
|
||||
|
||||
found[category]["delta"] = struct{}{}
|
||||
}
|
||||
|
||||
cdp[category] = cdps
|
||||
}
|
||||
|
||||
if breakLoop {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if errs.Failure() != nil {
|
||||
logger.CtxErr(ctx, errs.Failure()).Info("reading metadata collection items")
|
||||
|
||||
return CatDeltaPaths{
|
||||
path.ContactsCategory: {},
|
||||
path.EmailCategory: {},
|
||||
path.EventsCategory: {},
|
||||
}, false, nil
|
||||
}
|
||||
|
||||
// Remove any entries that contain a path or a delta, but not both.
|
||||
// That metadata is considered incomplete, and needs to incur a
|
||||
// complete backup on the next run.
|
||||
for _, dps := range cdp {
|
||||
for k, dp := range dps {
|
||||
if len(dp.Path) == 0 {
|
||||
delete(dps, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return cdp, true, nil
|
||||
}
|
||||
|
||||
// ProduceBackupCollections returns a DataCollection which the caller can
|
||||
// use to read mailbox data out for the specified user
|
||||
func ProduceBackupCollections(
|
||||
ctx context.Context,
|
||||
ac api.Client,
|
||||
selector selectors.Selector,
|
||||
tenantID string,
|
||||
user idname.Provider,
|
||||
metadata []data.RestoreCollection,
|
||||
su support.StatusUpdater,
|
||||
ctrlOpts control.Options,
|
||||
errs *fault.Bus,
|
||||
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, bool, error) {
|
||||
eb, err := selector.ToExchangeBackup()
|
||||
if err != nil {
|
||||
return nil, nil, false, clues.Wrap(err, "exchange dataCollection selector").WithClues(ctx)
|
||||
}
|
||||
|
||||
var (
|
||||
collections = []data.BackupCollection{}
|
||||
el = errs.Local()
|
||||
categories = map[path.CategoryType]struct{}{}
|
||||
handlers = BackupHandlers(ac)
|
||||
)
|
||||
|
||||
// Turn on concurrency limiter middleware for exchange backups
|
||||
// unless explicitly disabled through DisableConcurrencyLimiterFN cli flag
|
||||
if !ctrlOpts.ToggleFeatures.DisableConcurrencyLimiter {
|
||||
graph.InitializeConcurrencyLimiter(ctrlOpts.Parallelism.ItemFetch)
|
||||
}
|
||||
|
||||
cdps, canUsePreviousBackup, err := parseMetadataCollections(ctx, metadata)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
for _, scope := range eb.Scopes() {
|
||||
if el.Failure() != nil {
|
||||
break
|
||||
}
|
||||
|
||||
dcs, err := createCollections(
|
||||
ctx,
|
||||
handlers,
|
||||
tenantID,
|
||||
user,
|
||||
scope,
|
||||
cdps[scope.Category().PathType()],
|
||||
ctrlOpts,
|
||||
su,
|
||||
errs)
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
categories[scope.Category().PathType()] = struct{}{}
|
||||
|
||||
collections = append(collections, dcs...)
|
||||
}
|
||||
|
||||
if len(collections) > 0 {
|
||||
baseCols, err := graph.BaseCollections(
|
||||
ctx,
|
||||
collections,
|
||||
tenantID,
|
||||
user.ID(),
|
||||
path.ExchangeService,
|
||||
categories,
|
||||
su,
|
||||
errs)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
collections = append(collections, baseCols...)
|
||||
}
|
||||
|
||||
return collections, nil, canUsePreviousBackup, el.Failure()
|
||||
}
|
||||
|
||||
// createCollections - utility function that retrieves M365
|
||||
// IDs through Microsoft Graph API. The selectors.ExchangeScope
|
||||
// determines the type of collections that are retrieved.
|
||||
func createCollections(
|
||||
ctx context.Context,
|
||||
handlers map[path.CategoryType]backupHandler,
|
||||
tenantID string,
|
||||
user idname.Provider,
|
||||
scope selectors.ExchangeScope,
|
||||
dps DeltaPaths,
|
||||
ctrlOpts control.Options,
|
||||
su support.StatusUpdater,
|
||||
errs *fault.Bus,
|
||||
) ([]data.BackupCollection, error) {
|
||||
ctx = clues.Add(ctx, "category", scope.Category().PathType())
|
||||
|
||||
var (
|
||||
allCollections = make([]data.BackupCollection, 0)
|
||||
category = scope.Category().PathType()
|
||||
qp = graph.QueryParams{
|
||||
Category: category,
|
||||
ResourceOwner: user,
|
||||
TenantID: tenantID,
|
||||
}
|
||||
)
|
||||
|
||||
handler, ok := handlers[category]
|
||||
if !ok {
|
||||
return nil, clues.New("unsupported backup category type").WithClues(ctx)
|
||||
}
|
||||
|
||||
foldersComplete := observe.MessageWithCompletion(
|
||||
ctx,
|
||||
observe.Bulletf("%s", qp.Category))
|
||||
defer close(foldersComplete)
|
||||
|
||||
rootFolder, cc := handler.NewContainerCache(user.ID())
|
||||
|
||||
if err := cc.Populate(ctx, errs, rootFolder); err != nil {
|
||||
return nil, clues.Wrap(err, "populating container cache")
|
||||
}
|
||||
|
||||
collections, err := populateCollections(
|
||||
ctx,
|
||||
qp,
|
||||
handler,
|
||||
su,
|
||||
cc,
|
||||
scope,
|
||||
dps,
|
||||
ctrlOpts,
|
||||
errs)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "filling collections")
|
||||
}
|
||||
|
||||
foldersComplete <- struct{}{}
|
||||
|
||||
for _, coll := range collections {
|
||||
allCollections = append(allCollections, coll)
|
||||
}
|
||||
|
||||
return allCollections, nil
|
||||
}
|
||||
|
||||
// populateCollections is a utility function
|
||||
// that places the M365 object ids belonging to specific directories
|
||||
// into a BackupCollection. Messages outside of those directories are omitted.
|
||||
// @param collection is filled with during this function.
|
||||
@ -27,7 +327,7 @@ import (
|
||||
// TODO(ashmrtn): This should really return []data.BackupCollection but
|
||||
// unfortunately some of our tests rely on being able to lookup returned
|
||||
// collections by ID and it would be non-trivial to change them.
|
||||
func filterContainersAndFillCollections(
|
||||
func populateCollections(
|
||||
ctx context.Context,
|
||||
qp graph.QueryParams,
|
||||
bh backupHandler,
|
||||
@ -104,7 +404,7 @@ func filterContainersAndFillCollections(
|
||||
!ctrlOpts.ToggleFeatures.DisableDelta)
|
||||
if err != nil {
|
||||
if !graph.IsErrDeletedInFlight(err) {
|
||||
el.AddRecoverable(clues.Stack(err).Label(fault.LabelForceNoBackupCreation))
|
||||
el.AddRecoverable(ctx, clues.Stack(err).Label(fault.LabelForceNoBackupCreation))
|
||||
continue
|
||||
}
|
||||
|
||||
@ -167,7 +467,7 @@ func filterContainersAndFillCollections(
|
||||
)
|
||||
|
||||
if collections[id] != nil {
|
||||
el.AddRecoverable(clues.Wrap(err, "conflict: tombstone exists for a live collection").WithClues(ictx))
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "conflict: tombstone exists for a live collection").WithClues(ictx))
|
||||
continue
|
||||
}
|
||||
|
||||
@ -1,7 +1,9 @@
|
||||
package exchange
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
@ -11,9 +13,9 @@ import (
|
||||
|
||||
inMock "github.com/alcionai/corso/src/internal/common/idname/mock"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/connector/support"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/support"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
@ -107,12 +109,12 @@ func (m mockResolver) Items() []graph.CachedContainer {
|
||||
return m.items
|
||||
}
|
||||
|
||||
func (m mockResolver) AddToCache(ctx context.Context, gc graph.Container) error {
|
||||
func (m mockResolver) AddToCache(ctx context.Context, ctrl graph.Container) error {
|
||||
if len(m.added) == 0 {
|
||||
m.added = map[string]string{}
|
||||
}
|
||||
|
||||
m.added[ptr.Val(gc.GetDisplayName())] = ptr.Val(gc.GetId())
|
||||
m.added[ptr.Val(ctrl.GetDisplayName())] = ptr.Val(ctrl.GetId())
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -125,33 +127,765 @@ func (m mockResolver) LocationInCache(string) (string, bool)
|
||||
func (m mockResolver) Populate(context.Context, *fault.Bus, string, ...string) error { return nil }
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// tests
|
||||
// Unit tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type ServiceIteratorsSuite struct {
|
||||
type DataCollectionsUnitSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestDataCollectionsUnitSuite(t *testing.T) {
|
||||
suite.Run(t, &DataCollectionsUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
||||
type fileValues struct {
|
||||
fileName string
|
||||
value string
|
||||
}
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
data []fileValues
|
||||
expect map[string]DeltaPath
|
||||
canUsePreviousBackup bool
|
||||
expectError assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "delta urls only",
|
||||
data: []fileValues{
|
||||
{graph.DeltaURLsFileName, "delta-link"},
|
||||
},
|
||||
expect: map[string]DeltaPath{},
|
||||
canUsePreviousBackup: true,
|
||||
expectError: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "multiple delta urls",
|
||||
data: []fileValues{
|
||||
{graph.DeltaURLsFileName, "delta-link"},
|
||||
{graph.DeltaURLsFileName, "delta-link-2"},
|
||||
},
|
||||
canUsePreviousBackup: false,
|
||||
expectError: assert.Error,
|
||||
},
|
||||
{
|
||||
name: "previous path only",
|
||||
data: []fileValues{
|
||||
{graph.PreviousPathFileName, "prev-path"},
|
||||
},
|
||||
expect: map[string]DeltaPath{
|
||||
"key": {
|
||||
Delta: "delta-link",
|
||||
Path: "prev-path",
|
||||
},
|
||||
},
|
||||
canUsePreviousBackup: true,
|
||||
expectError: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "multiple previous paths",
|
||||
data: []fileValues{
|
||||
{graph.PreviousPathFileName, "prev-path"},
|
||||
{graph.PreviousPathFileName, "prev-path-2"},
|
||||
},
|
||||
canUsePreviousBackup: false,
|
||||
expectError: assert.Error,
|
||||
},
|
||||
{
|
||||
name: "delta urls and previous paths",
|
||||
data: []fileValues{
|
||||
{graph.DeltaURLsFileName, "delta-link"},
|
||||
{graph.PreviousPathFileName, "prev-path"},
|
||||
},
|
||||
expect: map[string]DeltaPath{
|
||||
"key": {
|
||||
Delta: "delta-link",
|
||||
Path: "prev-path",
|
||||
},
|
||||
},
|
||||
canUsePreviousBackup: true,
|
||||
expectError: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "delta urls and empty previous paths",
|
||||
data: []fileValues{
|
||||
{graph.DeltaURLsFileName, "delta-link"},
|
||||
{graph.PreviousPathFileName, ""},
|
||||
},
|
||||
expect: map[string]DeltaPath{},
|
||||
canUsePreviousBackup: true,
|
||||
expectError: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "empty delta urls and previous paths",
|
||||
data: []fileValues{
|
||||
{graph.DeltaURLsFileName, ""},
|
||||
{graph.PreviousPathFileName, "prev-path"},
|
||||
},
|
||||
expect: map[string]DeltaPath{
|
||||
"key": {
|
||||
Delta: "delta-link",
|
||||
Path: "prev-path",
|
||||
},
|
||||
},
|
||||
canUsePreviousBackup: true,
|
||||
expectError: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "delta urls with special chars",
|
||||
data: []fileValues{
|
||||
{graph.DeltaURLsFileName, "`!@#$%^&*()_[]{}/\"\\"},
|
||||
{graph.PreviousPathFileName, "prev-path"},
|
||||
},
|
||||
expect: map[string]DeltaPath{
|
||||
"key": {
|
||||
Delta: "`!@#$%^&*()_[]{}/\"\\",
|
||||
Path: "prev-path",
|
||||
},
|
||||
},
|
||||
canUsePreviousBackup: true,
|
||||
expectError: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "delta urls with escaped chars",
|
||||
data: []fileValues{
|
||||
{graph.DeltaURLsFileName, `\n\r\t\b\f\v\0\\`},
|
||||
{graph.PreviousPathFileName, "prev-path"},
|
||||
},
|
||||
expect: map[string]DeltaPath{
|
||||
"key": {
|
||||
Delta: "\\n\\r\\t\\b\\f\\v\\0\\\\",
|
||||
Path: "prev-path",
|
||||
},
|
||||
},
|
||||
canUsePreviousBackup: true,
|
||||
expectError: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "delta urls with newline char runes",
|
||||
data: []fileValues{
|
||||
// rune(92) = \, rune(110) = n. Ensuring it's not possible to
|
||||
// error in serializing/deserializing and produce a single newline
|
||||
// character from those two runes.
|
||||
{graph.DeltaURLsFileName, string([]rune{rune(92), rune(110)})},
|
||||
{graph.PreviousPathFileName, "prev-path"},
|
||||
},
|
||||
expect: map[string]DeltaPath{
|
||||
"key": {
|
||||
Delta: "\\n",
|
||||
Path: "prev-path",
|
||||
},
|
||||
},
|
||||
canUsePreviousBackup: true,
|
||||
expectError: assert.NoError,
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
entries := []graph.MetadataCollectionEntry{}
|
||||
|
||||
for _, d := range test.data {
|
||||
entries = append(
|
||||
entries,
|
||||
graph.NewMetadataEntry(d.fileName, map[string]string{"key": d.value}))
|
||||
}
|
||||
|
||||
coll, err := graph.MakeMetadataCollection(
|
||||
"t", "u",
|
||||
path.ExchangeService,
|
||||
path.EmailCategory,
|
||||
entries,
|
||||
func(cos *support.ControllerOperationStatus) {},
|
||||
)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
cdps, canUsePreviousBackup, err := parseMetadataCollections(ctx, []data.RestoreCollection{
|
||||
data.NoFetchRestoreCollection{Collection: coll},
|
||||
})
|
||||
test.expectError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, test.canUsePreviousBackup, canUsePreviousBackup, "can use previous backup")
|
||||
|
||||
emails := cdps[path.EmailCategory]
|
||||
|
||||
assert.Len(t, emails, len(test.expect))
|
||||
|
||||
for k, v := range emails {
|
||||
assert.Equal(t, v.Delta, emails[k].Delta, "delta")
|
||||
assert.Equal(t, v.Path, emails[k].Path, "path")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type failingColl struct {
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func (f failingColl) Items(ctx context.Context, errs *fault.Bus) <-chan data.Stream {
|
||||
ic := make(chan data.Stream)
|
||||
defer close(ic)
|
||||
|
||||
errs.AddRecoverable(ctx, assert.AnError)
|
||||
|
||||
return ic
|
||||
}
|
||||
|
||||
func (f failingColl) FullPath() path.Path {
|
||||
tmp, err := path.Build(
|
||||
"tenant",
|
||||
"user",
|
||||
path.ExchangeService,
|
||||
path.EmailCategory,
|
||||
false,
|
||||
"inbox")
|
||||
require.NoError(f.t, err, clues.ToCore(err))
|
||||
|
||||
return tmp
|
||||
}
|
||||
|
||||
func (f failingColl) FetchItemByName(context.Context, string) (data.Stream, error) {
|
||||
// no fetch calls will be made
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// This check is to ensure that we don't error out, but still return
|
||||
// canUsePreviousBackup as false on read errors
|
||||
func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections_ReadFailure() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
fc := failingColl{t}
|
||||
|
||||
_, canUsePreviousBackup, err := parseMetadataCollections(ctx, []data.RestoreCollection{fc})
|
||||
require.NoError(t, err)
|
||||
require.False(t, canUsePreviousBackup)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Integration tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func newStatusUpdater(t *testing.T, wg *sync.WaitGroup) func(status *support.ControllerOperationStatus) {
|
||||
updater := func(status *support.ControllerOperationStatus) {
|
||||
defer wg.Done()
|
||||
}
|
||||
|
||||
return updater
|
||||
}
|
||||
|
||||
type DataCollectionsIntegrationSuite struct {
|
||||
tester.Suite
|
||||
user string
|
||||
site string
|
||||
tenantID string
|
||||
ac api.Client
|
||||
}
|
||||
|
||||
func TestDataCollectionsIntegrationSuite(t *testing.T) {
|
||||
suite.Run(t, &DataCollectionsIntegrationSuite{
|
||||
Suite: tester.NewIntegrationSuite(
|
||||
t,
|
||||
[][]string{tester.M365AcctCredEnvs},
|
||||
),
|
||||
})
|
||||
}
|
||||
|
||||
func (suite *DataCollectionsIntegrationSuite) SetupSuite() {
|
||||
suite.user = tester.M365UserID(suite.T())
|
||||
suite.site = tester.M365SiteID(suite.T())
|
||||
|
||||
acct := tester.NewM365Account(suite.T())
|
||||
creds, err := acct.M365Config()
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
suite.ac, err = api.NewClient(creds)
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
suite.tenantID = creds.AzureTenantID
|
||||
|
||||
tester.LogTimeOfTest(suite.T())
|
||||
}
|
||||
|
||||
func (suite *DataCollectionsIntegrationSuite) TestMailFetch() {
|
||||
var (
|
||||
userID = tester.M365UserID(suite.T())
|
||||
users = []string{userID}
|
||||
handlers = BackupHandlers(suite.ac)
|
||||
)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
scope selectors.ExchangeScope
|
||||
folderNames map[string]struct{}
|
||||
canMakeDeltaQueries bool
|
||||
}{
|
||||
{
|
||||
name: "Folder Iterative Check Mail",
|
||||
scope: selectors.NewExchangeBackup(users).MailFolders(
|
||||
[]string{DefaultMailFolder},
|
||||
selectors.PrefixMatch(),
|
||||
)[0],
|
||||
folderNames: map[string]struct{}{
|
||||
DefaultMailFolder: {},
|
||||
},
|
||||
canMakeDeltaQueries: true,
|
||||
},
|
||||
{
|
||||
name: "Folder Iterative Check Mail Non-Delta",
|
||||
scope: selectors.NewExchangeBackup(users).MailFolders(
|
||||
[]string{DefaultMailFolder},
|
||||
selectors.PrefixMatch(),
|
||||
)[0],
|
||||
folderNames: map[string]struct{}{
|
||||
DefaultMailFolder: {},
|
||||
},
|
||||
canMakeDeltaQueries: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
ctrlOpts := control.Defaults()
|
||||
ctrlOpts.ToggleFeatures.DisableDelta = !test.canMakeDeltaQueries
|
||||
|
||||
collections, err := createCollections(
|
||||
ctx,
|
||||
handlers,
|
||||
suite.tenantID,
|
||||
inMock.NewProvider(userID, userID),
|
||||
test.scope,
|
||||
DeltaPaths{},
|
||||
ctrlOpts,
|
||||
func(status *support.ControllerOperationStatus) {},
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
for _, c := range collections {
|
||||
if c.FullPath().Service() == path.ExchangeMetadataService {
|
||||
continue
|
||||
}
|
||||
|
||||
require.NotEmpty(t, c.FullPath().Folder(false))
|
||||
|
||||
// TODO(ashmrtn): Remove when LocationPath is made part of BackupCollection
|
||||
// interface.
|
||||
if !assert.Implements(t, (*data.LocationPather)(nil), c) {
|
||||
continue
|
||||
}
|
||||
|
||||
loc := c.(data.LocationPather).LocationPath().String()
|
||||
|
||||
require.NotEmpty(t, loc)
|
||||
|
||||
delete(test.folderNames, loc)
|
||||
}
|
||||
|
||||
assert.Empty(t, test.folderNames)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *DataCollectionsIntegrationSuite) TestDelta() {
|
||||
var (
|
||||
userID = tester.M365UserID(suite.T())
|
||||
users = []string{userID}
|
||||
handlers = BackupHandlers(suite.ac)
|
||||
)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
scope selectors.ExchangeScope
|
||||
}{
|
||||
{
|
||||
name: "Mail",
|
||||
scope: selectors.NewExchangeBackup(users).MailFolders(
|
||||
[]string{DefaultMailFolder},
|
||||
selectors.PrefixMatch(),
|
||||
)[0],
|
||||
},
|
||||
{
|
||||
name: "Contacts",
|
||||
scope: selectors.NewExchangeBackup(users).ContactFolders(
|
||||
[]string{DefaultContactFolder},
|
||||
selectors.PrefixMatch(),
|
||||
)[0],
|
||||
},
|
||||
{
|
||||
name: "Events",
|
||||
scope: selectors.NewExchangeBackup(users).EventCalendars(
|
||||
[]string{DefaultCalendar},
|
||||
selectors.PrefixMatch(),
|
||||
)[0],
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
// get collections without providing any delta history (ie: full backup)
|
||||
collections, err := createCollections(
|
||||
ctx,
|
||||
handlers,
|
||||
suite.tenantID,
|
||||
inMock.NewProvider(userID, userID),
|
||||
test.scope,
|
||||
DeltaPaths{},
|
||||
control.Defaults(),
|
||||
func(status *support.ControllerOperationStatus) {},
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
assert.Less(t, 1, len(collections), "retrieved metadata and data collections")
|
||||
|
||||
var metadata data.BackupCollection
|
||||
|
||||
for _, coll := range collections {
|
||||
if coll.FullPath().Service() == path.ExchangeMetadataService {
|
||||
metadata = coll
|
||||
}
|
||||
}
|
||||
|
||||
require.NotNil(t, metadata, "collections contains a metadata collection")
|
||||
|
||||
cdps, canUsePreviousBackup, err := parseMetadataCollections(ctx, []data.RestoreCollection{
|
||||
data.NoFetchRestoreCollection{Collection: metadata},
|
||||
})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||
|
||||
dps := cdps[test.scope.Category().PathType()]
|
||||
|
||||
// now do another backup with the previous delta tokens,
|
||||
// which should only contain the difference.
|
||||
collections, err = createCollections(
|
||||
ctx,
|
||||
handlers,
|
||||
suite.tenantID,
|
||||
inMock.NewProvider(userID, userID),
|
||||
test.scope,
|
||||
dps,
|
||||
control.Defaults(),
|
||||
func(status *support.ControllerOperationStatus) {},
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// TODO(keepers): this isn't a very useful test at the moment. It needs to
|
||||
// investigate the items in the original and delta collections to at least
|
||||
// assert some minimum assumptions, such as "deltas should retrieve fewer items".
|
||||
// Delta usage is commented out at the moment, anyway. So this is currently
|
||||
// a sanity check that the minimum behavior won't break.
|
||||
for _, coll := range collections {
|
||||
if coll.FullPath().Service() != path.ExchangeMetadataService {
|
||||
ec, ok := coll.(*Collection)
|
||||
require.True(t, ok, "collection is *Collection")
|
||||
assert.NotNil(t, ec)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestMailSerializationRegression verifies that all mail data stored in the
|
||||
// test account can be successfully downloaded into bytes and restored into
|
||||
// M365 mail objects
|
||||
func (suite *DataCollectionsIntegrationSuite) TestMailSerializationRegression() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
users = []string{suite.user}
|
||||
handlers = BackupHandlers(suite.ac)
|
||||
)
|
||||
|
||||
sel := selectors.NewExchangeBackup(users)
|
||||
sel.Include(sel.MailFolders([]string{DefaultMailFolder}, selectors.PrefixMatch()))
|
||||
|
||||
collections, err := createCollections(
|
||||
ctx,
|
||||
handlers,
|
||||
suite.tenantID,
|
||||
inMock.NewProvider(suite.user, suite.user),
|
||||
sel.Scopes()[0],
|
||||
DeltaPaths{},
|
||||
control.Defaults(),
|
||||
newStatusUpdater(t, &wg),
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
wg.Add(len(collections))
|
||||
|
||||
for _, edc := range collections {
|
||||
suite.Run(edc.FullPath().String(), func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
isMetadata := edc.FullPath().Service() == path.ExchangeMetadataService
|
||||
streamChannel := edc.Items(ctx, fault.New(true))
|
||||
|
||||
// Verify that each message can be restored
|
||||
for stream := range streamChannel {
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
read, err := buf.ReadFrom(stream.ToReader())
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
assert.NotZero(t, read)
|
||||
|
||||
if isMetadata {
|
||||
continue
|
||||
}
|
||||
|
||||
message, err := api.BytesToMessageable(buf.Bytes())
|
||||
assert.NotNil(t, message)
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// TestContactSerializationRegression verifies ability to query contact items
|
||||
// and to store contact within Collection. Downloaded contacts are run through
|
||||
// a regression test to ensure that downloaded items can be uploaded.
|
||||
func (suite *DataCollectionsIntegrationSuite) TestContactSerializationRegression() {
|
||||
var (
|
||||
users = []string{suite.user}
|
||||
handlers = BackupHandlers(suite.ac)
|
||||
)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
scope selectors.ExchangeScope
|
||||
}{
|
||||
{
|
||||
name: "Default Contact Folder",
|
||||
scope: selectors.NewExchangeBackup(users).ContactFolders(
|
||||
[]string{DefaultContactFolder},
|
||||
selectors.PrefixMatch())[0],
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
edcs, err := createCollections(
|
||||
ctx,
|
||||
handlers,
|
||||
suite.tenantID,
|
||||
inMock.NewProvider(suite.user, suite.user),
|
||||
test.scope,
|
||||
DeltaPaths{},
|
||||
control.Defaults(),
|
||||
newStatusUpdater(t, &wg),
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
wg.Add(len(edcs))
|
||||
|
||||
require.GreaterOrEqual(t, len(edcs), 1, "expected 1 <= num collections <= 2")
|
||||
require.GreaterOrEqual(t, 2, len(edcs), "expected 1 <= num collections <= 2")
|
||||
|
||||
for _, edc := range edcs {
|
||||
isMetadata := edc.FullPath().Service() == path.ExchangeMetadataService
|
||||
count := 0
|
||||
|
||||
for stream := range edc.Items(ctx, fault.New(true)) {
|
||||
buf := &bytes.Buffer{}
|
||||
read, err := buf.ReadFrom(stream.ToReader())
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
assert.NotZero(t, read)
|
||||
|
||||
if isMetadata {
|
||||
continue
|
||||
}
|
||||
|
||||
contact, err := api.BytesToContactable(buf.Bytes())
|
||||
assert.NotNil(t, contact)
|
||||
assert.NoError(t, err, "converting contact bytes: "+buf.String(), clues.ToCore(err))
|
||||
count++
|
||||
}
|
||||
|
||||
if isMetadata {
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO(ashmrtn): Remove when LocationPath is made part of BackupCollection
|
||||
// interface.
|
||||
if !assert.Implements(t, (*data.LocationPather)(nil), edc) {
|
||||
continue
|
||||
}
|
||||
|
||||
assert.Equal(
|
||||
t,
|
||||
edc.(data.LocationPather).LocationPath().String(),
|
||||
DefaultContactFolder)
|
||||
assert.NotZero(t, count)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestEventsSerializationRegression ensures functionality of createCollections
|
||||
// to be able to successfully query, download and restore event objects
|
||||
func (suite *DataCollectionsIntegrationSuite) TestEventsSerializationRegression() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
var (
|
||||
users = []string{suite.user}
|
||||
handlers = BackupHandlers(suite.ac)
|
||||
calID string
|
||||
bdayID string
|
||||
)
|
||||
|
||||
fn := func(gcc graph.CachedContainer) error {
|
||||
if ptr.Val(gcc.GetDisplayName()) == DefaultCalendar {
|
||||
calID = ptr.Val(gcc.GetId())
|
||||
}
|
||||
|
||||
if ptr.Val(gcc.GetDisplayName()) == "Birthdays" {
|
||||
bdayID = ptr.Val(gcc.GetId())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
err := suite.ac.Events().EnumerateContainers(ctx, suite.user, DefaultCalendar, fn, fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
tests := []struct {
|
||||
name, expected string
|
||||
scope selectors.ExchangeScope
|
||||
}{
|
||||
{
|
||||
name: "Default Event Calendar",
|
||||
expected: calID,
|
||||
scope: selectors.NewExchangeBackup(users).EventCalendars(
|
||||
[]string{DefaultCalendar},
|
||||
selectors.PrefixMatch(),
|
||||
)[0],
|
||||
},
|
||||
{
|
||||
name: "Birthday Calendar",
|
||||
expected: bdayID,
|
||||
scope: selectors.NewExchangeBackup(users).EventCalendars(
|
||||
[]string{"Birthdays"},
|
||||
selectors.PrefixMatch(),
|
||||
)[0],
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
collections, err := createCollections(
|
||||
ctx,
|
||||
handlers,
|
||||
suite.tenantID,
|
||||
inMock.NewProvider(suite.user, suite.user),
|
||||
test.scope,
|
||||
DeltaPaths{},
|
||||
control.Defaults(),
|
||||
newStatusUpdater(t, &wg),
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
require.Len(t, collections, 2)
|
||||
|
||||
wg.Add(len(collections))
|
||||
|
||||
for _, edc := range collections {
|
||||
var isMetadata bool
|
||||
|
||||
if edc.FullPath().Service() != path.ExchangeMetadataService {
|
||||
isMetadata = true
|
||||
assert.Equal(t, test.expected, edc.FullPath().Folder(false))
|
||||
} else {
|
||||
assert.Equal(t, "", edc.FullPath().Folder(false))
|
||||
}
|
||||
|
||||
for item := range edc.Items(ctx, fault.New(true)) {
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
read, err := buf.ReadFrom(item.ToReader())
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
assert.NotZero(t, read)
|
||||
|
||||
if isMetadata {
|
||||
continue
|
||||
}
|
||||
|
||||
event, err := api.BytesToEventable(buf.Bytes())
|
||||
assert.NotNil(t, event)
|
||||
assert.NoError(t, err, "creating event from bytes: "+buf.String(), clues.ToCore(err))
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type CollectionPopulationSuite struct {
|
||||
tester.Suite
|
||||
creds account.M365Config
|
||||
}
|
||||
|
||||
func TestServiceIteratorsUnitSuite(t *testing.T) {
|
||||
suite.Run(t, &ServiceIteratorsSuite{Suite: tester.NewUnitSuite(t)})
|
||||
suite.Run(t, &CollectionPopulationSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
func (suite *ServiceIteratorsSuite) SetupSuite() {
|
||||
func (suite *CollectionPopulationSuite) SetupSuite() {
|
||||
a := tester.NewMockM365Account(suite.T())
|
||||
m365, err := a.M365Config()
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
suite.creds = m365
|
||||
}
|
||||
|
||||
func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections() {
|
||||
func (suite *CollectionPopulationSuite) TestPopulateCollections() {
|
||||
var (
|
||||
qp = graph.QueryParams{
|
||||
Category: path.EmailCategory, // doesn't matter which one we use.
|
||||
ResourceOwner: inMock.NewProvider("user_id", "user_name"),
|
||||
TenantID: suite.creds.AzureTenantID,
|
||||
}
|
||||
statusUpdater = func(*support.ConnectorOperationStatus) {}
|
||||
statusUpdater = func(*support.ControllerOperationStatus) {}
|
||||
allScope = selectors.NewExchangeBackup(nil).MailFolders(selectors.Any())[0]
|
||||
dps = DeltaPaths{} // incrementals are tested separately
|
||||
commonResult = mockGetterResults{
|
||||
@ -190,7 +924,7 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections() {
|
||||
getter mockGetter
|
||||
resolver graph.ContainerResolver
|
||||
scope selectors.ExchangeScope
|
||||
failFast control.FailureBehavior
|
||||
failFast control.FailurePolicy
|
||||
expectErr assert.ErrorAssertionFunc
|
||||
expectNewColls int
|
||||
expectMetadataColls int
|
||||
@ -349,7 +1083,7 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections() {
|
||||
category: qp.Category,
|
||||
}
|
||||
|
||||
collections, err := filterContainersAndFillCollections(
|
||||
collections, err := populateCollections(
|
||||
ctx,
|
||||
qp,
|
||||
mbh,
|
||||
@ -435,7 +1169,7 @@ func checkMetadata(
|
||||
assert.Equal(t, expect, catPaths[cat])
|
||||
}
|
||||
|
||||
func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_DuplicateFolders() {
|
||||
func (suite *CollectionPopulationSuite) TestFilterContainersAndFillCollections_DuplicateFolders() {
|
||||
type scopeCat struct {
|
||||
scope selectors.ExchangeScope
|
||||
cat path.CategoryType
|
||||
@ -447,7 +1181,7 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_Dupli
|
||||
TenantID: suite.creds.AzureTenantID,
|
||||
}
|
||||
|
||||
statusUpdater = func(*support.ConnectorOperationStatus) {}
|
||||
statusUpdater = func(*support.ControllerOperationStatus) {}
|
||||
|
||||
dataTypes = []scopeCat{
|
||||
{
|
||||
@ -687,7 +1421,7 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_Dupli
|
||||
category: qp.Category,
|
||||
}
|
||||
|
||||
collections, err := filterContainersAndFillCollections(
|
||||
collections, err := populateCollections(
|
||||
ctx,
|
||||
qp,
|
||||
mbh,
|
||||
@ -754,7 +1488,7 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_Dupli
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_repeatedItems() {
|
||||
func (suite *CollectionPopulationSuite) TestFilterContainersAndFillCollections_repeatedItems() {
|
||||
newDelta := api.DeltaUpdate{URL: "delta_url"}
|
||||
|
||||
table := []struct {
|
||||
@ -832,7 +1566,7 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_repea
|
||||
ResourceOwner: inMock.NewProvider("user_id", "user_name"),
|
||||
TenantID: suite.creds.AzureTenantID,
|
||||
}
|
||||
statusUpdater = func(*support.ConnectorOperationStatus) {}
|
||||
statusUpdater = func(*support.ControllerOperationStatus) {}
|
||||
allScope = selectors.NewExchangeBackup(nil).MailFolders(selectors.Any())[0]
|
||||
dps = DeltaPaths{} // incrementals are tested separately
|
||||
container1 = mockContainer{
|
||||
@ -851,7 +1585,7 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_repea
|
||||
require.Equal(t, "user_id", qp.ResourceOwner.ID(), qp.ResourceOwner)
|
||||
require.Equal(t, "user_name", qp.ResourceOwner.Name(), qp.ResourceOwner)
|
||||
|
||||
collections, err := filterContainersAndFillCollections(
|
||||
collections, err := populateCollections(
|
||||
ctx,
|
||||
qp,
|
||||
mbh,
|
||||
@ -907,7 +1641,7 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_repea
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incrementals_nondelta() {
|
||||
func (suite *CollectionPopulationSuite) TestFilterContainersAndFillCollections_incrementals_nondelta() {
|
||||
var (
|
||||
userID = "user_id"
|
||||
tenantID = suite.creds.AzureTenantID
|
||||
@ -917,7 +1651,7 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incre
|
||||
ResourceOwner: inMock.NewProvider("user_id", "user_name"),
|
||||
TenantID: suite.creds.AzureTenantID,
|
||||
}
|
||||
statusUpdater = func(*support.ConnectorOperationStatus) {}
|
||||
statusUpdater = func(*support.ControllerOperationStatus) {}
|
||||
allScope = selectors.NewExchangeBackup(nil).MailFolders(selectors.Any())[0]
|
||||
commonResults = mockGetterResults{
|
||||
added: []string{"added"},
|
||||
@ -1270,7 +2004,7 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incre
|
||||
}
|
||||
}
|
||||
|
||||
collections, err := filterContainersAndFillCollections(
|
||||
collections, err := populateCollections(
|
||||
ctx,
|
||||
qp,
|
||||
mbh,
|
||||
@ -4,7 +4,7 @@ import (
|
||||
"github.com/alcionai/clues"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
)
|
||||
|
||||
// checkIDAndName is a helper function to ensure that
|
||||
@ -13,9 +13,9 @@ import (
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/connector/support"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/support"
|
||||
"github.com/alcionai/corso/src/internal/observe"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
@ -125,7 +125,7 @@ func (col *Collection) LocationPath() *path.Builder {
|
||||
return col.locationPath
|
||||
}
|
||||
|
||||
// TODO(ashmrtn): Fill in with previous path once GraphConnector compares old
|
||||
// TODO(ashmrtn): Fill in with previous path once the Controller compares old
|
||||
// and new folder hierarchies.
|
||||
func (col Collection) PreviousPath() path.Path {
|
||||
return col.prevPath
|
||||
@ -230,7 +230,7 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) {
|
||||
atomic.AddInt64(&success, 1)
|
||||
log.With("err", err).Infow("item not found", clues.InErr(err).Slice()...)
|
||||
} else {
|
||||
errs.AddRecoverable(clues.Wrap(err, "fetching item").Label(fault.LabelForceNoBackupCreation))
|
||||
errs.AddRecoverable(ctx, clues.Wrap(err, "fetching item").Label(fault.LabelForceNoBackupCreation))
|
||||
}
|
||||
|
||||
return
|
||||
@ -238,7 +238,7 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) {
|
||||
|
||||
data, err := col.items.Serialize(ctx, item, user, id)
|
||||
if err != nil {
|
||||
errs.AddRecoverable(clues.Wrap(err, "serializing item").Label(fault.LabelForceNoBackupCreation))
|
||||
errs.AddRecoverable(ctx, clues.Wrap(err, "serializing item").Label(fault.LabelForceNoBackupCreation))
|
||||
return
|
||||
}
|
||||
|
||||
@ -11,8 +11,8 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
@ -46,20 +46,19 @@ func (mi *mockItemer) Serialize(
|
||||
return nil, mi.serializeErr
|
||||
}
|
||||
|
||||
type ExchangeDataCollectionSuite struct {
|
||||
type CollectionSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestExchangeDataCollectionSuite(t *testing.T) {
|
||||
suite.Run(t, &ExchangeDataCollectionSuite{Suite: tester.NewUnitSuite(t)})
|
||||
func TestCollectionSuite(t *testing.T) {
|
||||
suite.Run(t, &CollectionSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
func (suite *ExchangeDataCollectionSuite) TestExchangeDataReader_Valid() {
|
||||
func (suite *CollectionSuite) TestReader_Valid() {
|
||||
m := []byte("test message")
|
||||
description := "aFile"
|
||||
ed := &Stream{id: description, message: m}
|
||||
|
||||
// Read the message using the `ExchangeData` reader and validate it matches what we set
|
||||
buf := &bytes.Buffer{}
|
||||
_, err := buf.ReadFrom(ed.ToReader())
|
||||
assert.NoError(suite.T(), err, clues.ToCore(err))
|
||||
@ -67,7 +66,7 @@ func (suite *ExchangeDataCollectionSuite) TestExchangeDataReader_Valid() {
|
||||
assert.Equal(suite.T(), description, ed.UUID())
|
||||
}
|
||||
|
||||
func (suite *ExchangeDataCollectionSuite) TestExchangeDataReader_Empty() {
|
||||
func (suite *CollectionSuite) TestReader_Empty() {
|
||||
var (
|
||||
empty []byte
|
||||
expected int64
|
||||
@ -82,7 +81,7 @@ func (suite *ExchangeDataCollectionSuite) TestExchangeDataReader_Empty() {
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
}
|
||||
|
||||
func (suite *ExchangeDataCollectionSuite) TestExchangeData_FullPath() {
|
||||
func (suite *CollectionSuite) TestColleciton_FullPath() {
|
||||
t := suite.T()
|
||||
tenant := "a-tenant"
|
||||
user := "a-user"
|
||||
@ -105,7 +104,7 @@ func (suite *ExchangeDataCollectionSuite) TestExchangeData_FullPath() {
|
||||
assert.Equal(t, fullPath, edc.FullPath())
|
||||
}
|
||||
|
||||
func (suite *ExchangeDataCollectionSuite) TestExchangeDataCollection_NewExchangeDataCollection() {
|
||||
func (suite *CollectionSuite) TestCollection_NewCollection() {
|
||||
t := suite.T()
|
||||
tenant := "a-tenant"
|
||||
user := "a-user"
|
||||
@ -129,7 +128,7 @@ func (suite *ExchangeDataCollectionSuite) TestExchangeDataCollection_NewExchange
|
||||
assert.Equal(t, fullPath, edc.FullPath())
|
||||
}
|
||||
|
||||
func (suite *ExchangeDataCollectionSuite) TestNewCollection_state() {
|
||||
func (suite *CollectionSuite) TestNewCollection_state() {
|
||||
fooP, err := path.Build("t", "u", path.ExchangeService, path.EmailCategory, false, "foo")
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
barP, err := path.Build("t", "u", path.ExchangeService, path.EmailCategory, false, "bar")
|
||||
@ -189,7 +188,7 @@ func (suite *ExchangeDataCollectionSuite) TestNewCollection_state() {
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *ExchangeDataCollectionSuite) TestGetItemWithRetries() {
|
||||
func (suite *CollectionSuite) TestGetItemWithRetries() {
|
||||
table := []struct {
|
||||
name string
|
||||
items *mockItemer
|
||||
@ -1,7 +1,7 @@
|
||||
package exchange
|
||||
|
||||
import (
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
@ -32,7 +32,7 @@ func (h contactBackupHandler) itemHandler() itemGetterSerializer {
|
||||
func (h contactBackupHandler) NewContainerCache(
|
||||
userID string,
|
||||
) (string, graph.ContainerResolver) {
|
||||
return DefaultContactFolder, &contactFolderCache{
|
||||
return DefaultContactFolder, &contactContainerCache{
|
||||
userID: userID,
|
||||
enumer: h.ac,
|
||||
getter: h.ac,
|
||||
@ -6,13 +6,13 @@ import (
|
||||
"github.com/alcionai/clues"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
|
||||
var (
|
||||
_ graph.ContainerResolver = &contactFolderCache{}
|
||||
_ graph.ContainerResolver = &contactContainerCache{}
|
||||
_ containerRefresher = &contactRefresher{}
|
||||
)
|
||||
|
||||
@ -35,14 +35,14 @@ func (r *contactRefresher) refreshContainer(
|
||||
return &f, nil
|
||||
}
|
||||
|
||||
type contactFolderCache struct {
|
||||
type contactContainerCache struct {
|
||||
*containerResolver
|
||||
enumer containersEnumerator
|
||||
getter containerGetter
|
||||
userID string
|
||||
}
|
||||
|
||||
func (cfc *contactFolderCache) populateContactRoot(
|
||||
func (cfc *contactContainerCache) populateContactRoot(
|
||||
ctx context.Context,
|
||||
directoryID string,
|
||||
baseContainerPath []string,
|
||||
@ -67,7 +67,7 @@ func (cfc *contactFolderCache) populateContactRoot(
|
||||
// objects into the Contact Folder Cache
|
||||
// Function does NOT use Delta Queries as it is not supported
|
||||
// as of (Oct-07-2022)
|
||||
func (cfc *contactFolderCache) Populate(
|
||||
func (cfc *contactContainerCache) Populate(
|
||||
ctx context.Context,
|
||||
errs *fault.Bus,
|
||||
baseID string,
|
||||
@ -89,7 +89,7 @@ func (cfc *contactFolderCache) Populate(
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cfc *contactFolderCache) init(
|
||||
func (cfc *contactContainerCache) init(
|
||||
ctx context.Context,
|
||||
baseNode string,
|
||||
baseContainerPath []string,
|
||||
@ -7,7 +7,7 @@ import (
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
@ -31,7 +31,7 @@ func newContactRestoreHandler(
|
||||
}
|
||||
|
||||
func (h contactRestoreHandler) newContainerCache(userID string) graph.ContainerResolver {
|
||||
return &contactFolderCache{
|
||||
return &contactContainerCache{
|
||||
userID: userID,
|
||||
enumer: h.ac,
|
||||
getter: h.ac,
|
||||
@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/control/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
@ -51,7 +52,7 @@ func (suite *ContactsRestoreIntgSuite) TestCreateContainerDestination() {
|
||||
path.EmailCategory,
|
||||
suite.creds.AzureTenantID,
|
||||
suite.userID,
|
||||
tester.DefaultTestRestoreDestination("").ContainerName,
|
||||
testdata.DefaultRestoreConfig("").Location,
|
||||
[]string{"Hufflepuff"},
|
||||
[]string{"Ravenclaw"})
|
||||
}
|
||||
@ -6,7 +6,7 @@ import (
|
||||
"github.com/alcionai/clues"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
@ -403,7 +403,7 @@ func (cr *containerResolver) populatePaths(
|
||||
_, err := cr.idToPath(ctx, ptr.Val(f.GetId()), 0)
|
||||
if err != nil {
|
||||
err = clues.Wrap(err, "populating path")
|
||||
el.AddRecoverable(err)
|
||||
el.AddRecoverable(ctx, err)
|
||||
lastErr = err
|
||||
}
|
||||
}
|
||||
@ -13,10 +13,12 @@ import (
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -671,6 +673,118 @@ func (suite *ConfiguredFolderCacheUnitSuite) TestAddToCache() {
|
||||
assert.Equal(t, m.expectedLocation, l.String(), "location path")
|
||||
}
|
||||
|
||||
type ContainerResolverSuite struct {
|
||||
tester.Suite
|
||||
credentials account.M365Config
|
||||
}
|
||||
|
||||
func TestContainerResolverIntegrationSuite(t *testing.T) {
|
||||
suite.Run(t, &ContainerResolverSuite{
|
||||
Suite: tester.NewIntegrationSuite(
|
||||
t,
|
||||
[][]string{tester.M365AcctCredEnvs}),
|
||||
})
|
||||
}
|
||||
|
||||
func (suite *ContainerResolverSuite) SetupSuite() {
|
||||
t := suite.T()
|
||||
|
||||
a := tester.NewM365Account(t)
|
||||
m365, err := a.M365Config()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.credentials = m365
|
||||
}
|
||||
|
||||
func (suite *ContainerResolverSuite) TestPopulate() {
|
||||
ac, err := api.NewClient(suite.credentials)
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
eventFunc := func(t *testing.T) graph.ContainerResolver {
|
||||
return &eventContainerCache{
|
||||
userID: tester.M365UserID(t),
|
||||
enumer: ac.Events(),
|
||||
getter: ac.Events(),
|
||||
}
|
||||
}
|
||||
|
||||
contactFunc := func(t *testing.T) graph.ContainerResolver {
|
||||
return &contactContainerCache{
|
||||
userID: tester.M365UserID(t),
|
||||
enumer: ac.Contacts(),
|
||||
getter: ac.Contacts(),
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name, folderInCache, root, basePath string
|
||||
resolverFunc func(t *testing.T) graph.ContainerResolver
|
||||
canFind assert.BoolAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "Default Event Cache",
|
||||
// Fine as long as this isn't running against a migrated Exchange server.
|
||||
folderInCache: DefaultCalendar,
|
||||
root: DefaultCalendar,
|
||||
basePath: DefaultCalendar,
|
||||
resolverFunc: eventFunc,
|
||||
canFind: assert.True,
|
||||
},
|
||||
{
|
||||
name: "Default Event Folder Hidden",
|
||||
folderInCache: DefaultContactFolder,
|
||||
root: DefaultCalendar,
|
||||
canFind: assert.False,
|
||||
resolverFunc: eventFunc,
|
||||
},
|
||||
{
|
||||
name: "Name Not in Cache",
|
||||
folderInCache: "testFooBarWhoBar",
|
||||
root: DefaultCalendar,
|
||||
canFind: assert.False,
|
||||
resolverFunc: eventFunc,
|
||||
},
|
||||
{
|
||||
name: "Default Contact Cache",
|
||||
folderInCache: DefaultContactFolder,
|
||||
root: DefaultContactFolder,
|
||||
basePath: DefaultContactFolder,
|
||||
canFind: assert.True,
|
||||
resolverFunc: contactFunc,
|
||||
},
|
||||
{
|
||||
name: "Default Contact Hidden",
|
||||
folderInCache: DefaultContactFolder,
|
||||
root: DefaultContactFolder,
|
||||
canFind: assert.False,
|
||||
resolverFunc: contactFunc,
|
||||
},
|
||||
{
|
||||
name: "Name Not in Cache",
|
||||
folderInCache: "testFooBarWhoBar",
|
||||
root: DefaultContactFolder,
|
||||
canFind: assert.False,
|
||||
resolverFunc: contactFunc,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
resolver := test.resolverFunc(t)
|
||||
|
||||
err := resolver.Populate(ctx, fault.New(true), test.root, test.basePath)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
_, isFound := resolver.LocationInCache(test.folderInCache)
|
||||
test.canFind(t, isFound, "folder path", test.folderInCache)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// integration suite
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -688,7 +802,7 @@ func runCreateDestinationTest(
|
||||
|
||||
var (
|
||||
svc = path.ExchangeService
|
||||
gcr = handler.newContainerCache(userID)
|
||||
gcc = handler.newContainerCache(userID)
|
||||
)
|
||||
|
||||
path1, err := path.Build(
|
||||
@ -700,17 +814,17 @@ func runCreateDestinationTest(
|
||||
containerNames1...)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
containerID, gcr, err := createDestination(
|
||||
containerID, gcc, err := createDestination(
|
||||
ctx,
|
||||
handler,
|
||||
handler.formatRestoreDestination(destinationName, path1),
|
||||
userID,
|
||||
gcr,
|
||||
gcc,
|
||||
true,
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
_, _, err = gcr.IDToPath(ctx, containerID)
|
||||
_, _, err = gcc.IDToPath(ctx, containerID)
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
path2, err := path.Build(
|
||||
@ -722,22 +836,22 @@ func runCreateDestinationTest(
|
||||
containerNames2...)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
containerID, gcr, err = createDestination(
|
||||
containerID, gcc, err = createDestination(
|
||||
ctx,
|
||||
handler,
|
||||
handler.formatRestoreDestination(destinationName, path2),
|
||||
userID,
|
||||
gcr,
|
||||
gcc,
|
||||
false,
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
p, l, err := gcr.IDToPath(ctx, containerID)
|
||||
p, l, err := gcc.IDToPath(ctx, containerID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
_, ok := gcr.LocationInCache(l.String())
|
||||
_, ok := gcc.LocationInCache(l.String())
|
||||
require.True(t, ok, "looking for location in cache: %s", l)
|
||||
|
||||
_, ok = gcr.PathInCache(p.String())
|
||||
_, ok = gcc.PathInCache(p.String())
|
||||
require.True(t, ok, "looking for path in cache: %s", p)
|
||||
}
|
||||
@ -1,7 +1,7 @@
|
||||
package exchange
|
||||
|
||||
import (
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
@ -32,7 +32,7 @@ func (h eventBackupHandler) itemHandler() itemGetterSerializer {
|
||||
func (h eventBackupHandler) NewContainerCache(
|
||||
userID string,
|
||||
) (string, graph.ContainerResolver) {
|
||||
return DefaultCalendar, &eventCalendarCache{
|
||||
return DefaultCalendar, &eventContainerCache{
|
||||
userID: userID,
|
||||
enumer: h.ac,
|
||||
getter: h.ac,
|
||||
@ -6,14 +6,14 @@ import (
|
||||
"github.com/alcionai/clues"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
|
||||
var _ graph.ContainerResolver = &eventCalendarCache{}
|
||||
var _ graph.ContainerResolver = &eventContainerCache{}
|
||||
|
||||
type eventCalendarCache struct {
|
||||
type eventContainerCache struct {
|
||||
*containerResolver
|
||||
enumer containersEnumerator
|
||||
getter containerGetter
|
||||
@ -23,7 +23,7 @@ type eventCalendarCache struct {
|
||||
// init ensures that the structure's fields are initialized.
|
||||
// Fields Initialized when cache == nil:
|
||||
// [mc.cache]
|
||||
func (ecc *eventCalendarCache) init(
|
||||
func (ecc *eventContainerCache) init(
|
||||
ctx context.Context,
|
||||
) error {
|
||||
if ecc.containerResolver == nil {
|
||||
@ -37,7 +37,7 @@ func (ecc *eventCalendarCache) init(
|
||||
// DefaultCalendar is the traditional "Calendar".
|
||||
// Action ensures that cache will stop at appropriate level.
|
||||
// @error iff the struct is not properly instantiated
|
||||
func (ecc *eventCalendarCache) populateEventRoot(ctx context.Context) error {
|
||||
func (ecc *eventContainerCache) populateEventRoot(ctx context.Context) error {
|
||||
container := DefaultCalendar
|
||||
|
||||
f, err := ecc.getter.GetContainerByID(ctx, ecc.userID, container)
|
||||
@ -59,7 +59,7 @@ func (ecc *eventCalendarCache) populateEventRoot(ctx context.Context) error {
|
||||
// Populate utility function for populating eventCalendarCache.
|
||||
// Executes 1 additional Graph Query
|
||||
// @param baseID: ignored. Present to conform to interface
|
||||
func (ecc *eventCalendarCache) Populate(
|
||||
func (ecc *eventContainerCache) Populate(
|
||||
ctx context.Context,
|
||||
errs *fault.Bus,
|
||||
baseID string,
|
||||
@ -88,7 +88,7 @@ func (ecc *eventCalendarCache) Populate(
|
||||
|
||||
// AddToCache adds container to map in field 'cache'
|
||||
// @returns error iff the required values are not accessible.
|
||||
func (ecc *eventCalendarCache) AddToCache(ctx context.Context, f graph.Container) error {
|
||||
func (ecc *eventContainerCache) AddToCache(ctx context.Context, f graph.Container) error {
|
||||
if err := checkIDAndName(f); err != nil {
|
||||
return clues.Wrap(err, "validating container").WithClues(ctx)
|
||||
}
|
||||
@ -7,7 +7,7 @@ import (
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
@ -33,7 +33,7 @@ func newEventRestoreHandler(
|
||||
}
|
||||
|
||||
func (h eventRestoreHandler) newContainerCache(userID string) graph.ContainerResolver {
|
||||
return &eventCalendarCache{
|
||||
return &eventContainerCache{
|
||||
userID: userID,
|
||||
enumer: h.ac,
|
||||
getter: h.ac,
|
||||
@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/control/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
@ -51,7 +52,7 @@ func (suite *EventsRestoreIntgSuite) TestCreateContainerDestination() {
|
||||
path.EmailCategory,
|
||||
suite.creds.AzureTenantID,
|
||||
suite.userID,
|
||||
tester.DefaultTestRestoreDestination("").ContainerName,
|
||||
testdata.DefaultRestoreConfig("").Location,
|
||||
[]string{"Durmstrang"},
|
||||
[]string{"Beauxbatons"})
|
||||
}
|
||||
@ -5,7 +5,7 @@ import (
|
||||
|
||||
"github.com/microsoft/kiota-abstractions-go/serialization"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
@ -1,7 +1,7 @@
|
||||
package exchange
|
||||
|
||||
import (
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
@ -32,7 +32,7 @@ func (h mailBackupHandler) itemHandler() itemGetterSerializer {
|
||||
func (h mailBackupHandler) NewContainerCache(
|
||||
userID string,
|
||||
) (string, graph.ContainerResolver) {
|
||||
return rootFolderAlias, &mailFolderCache{
|
||||
return rootFolderAlias, &mailContainerCache{
|
||||
userID: userID,
|
||||
enumer: h.ac,
|
||||
getter: h.ac,
|
||||
@ -5,13 +5,13 @@ import (
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
|
||||
var (
|
||||
_ graph.ContainerResolver = &mailFolderCache{}
|
||||
_ graph.ContainerResolver = &mailContainerCache{}
|
||||
_ containerRefresher = &mailRefresher{}
|
||||
)
|
||||
|
||||
@ -34,10 +34,10 @@ func (r *mailRefresher) refreshContainer(
|
||||
return &f, nil
|
||||
}
|
||||
|
||||
// mailFolderCache struct used to improve lookup of directories within exchange.Mail
|
||||
// mailContainerCache struct used to improve lookup of directories within exchange.Mail
|
||||
// cache map of cachedContainers where the key = M365ID
|
||||
// nameLookup map: Key: DisplayName Value: ID
|
||||
type mailFolderCache struct {
|
||||
type mailContainerCache struct {
|
||||
*containerResolver
|
||||
enumer containersEnumerator
|
||||
getter containerGetter
|
||||
@ -47,7 +47,7 @@ type mailFolderCache struct {
|
||||
// init ensures that the structure's fields are initialized.
|
||||
// Fields Initialized when cache == nil:
|
||||
// [mc.cache]
|
||||
func (mc *mailFolderCache) init(
|
||||
func (mc *mailContainerCache) init(
|
||||
ctx context.Context,
|
||||
) error {
|
||||
if mc.containerResolver == nil {
|
||||
@ -64,7 +64,7 @@ func (mc *mailFolderCache) init(
|
||||
// rootFolderAlias is the top-level directory for exchange.Mail.
|
||||
// Action ensures that cache will stop at appropriate level.
|
||||
// @error iff the struct is not properly instantiated
|
||||
func (mc *mailFolderCache) populateMailRoot(ctx context.Context) error {
|
||||
func (mc *mailContainerCache) populateMailRoot(ctx context.Context) error {
|
||||
f, err := mc.getter.GetContainerByID(ctx, mc.userID, rootFolderAlias)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "fetching root folder")
|
||||
@ -89,7 +89,7 @@ func (mc *mailFolderCache) populateMailRoot(ctx context.Context) error {
|
||||
// @param baseID: M365ID of the base of the exchange.Mail.Folder
|
||||
// @param baseContainerPath: the set of folder elements that make up the path
|
||||
// for the base container in the cache.
|
||||
func (mc *mailFolderCache) Populate(
|
||||
func (mc *mailContainerCache) Populate(
|
||||
ctx context.Context,
|
||||
errs *fault.Bus,
|
||||
baseID string,
|
||||
@ -87,7 +87,7 @@ func (suite *MailFolderCacheIntegrationSuite) TestDeltaFetch() {
|
||||
|
||||
acm := ac.Mail()
|
||||
|
||||
mfc := mailFolderCache{
|
||||
mfc := mailContainerCache{
|
||||
userID: userID,
|
||||
enumer: acm,
|
||||
getter: acm,
|
||||
@ -8,7 +8,7 @@ import (
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
@ -34,7 +34,7 @@ func newMailRestoreHandler(
|
||||
}
|
||||
|
||||
func (h mailRestoreHandler) newContainerCache(userID string) graph.ContainerResolver {
|
||||
return &mailFolderCache{
|
||||
return &mailContainerCache{
|
||||
userID: userID,
|
||||
enumer: h.ac,
|
||||
getter: h.ac,
|
||||
@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/control/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
@ -51,7 +52,7 @@ func (suite *MailRestoreIntgSuite) TestCreateContainerDestination() {
|
||||
path.EmailCategory,
|
||||
suite.creds.AzureTenantID,
|
||||
suite.userID,
|
||||
tester.DefaultTestRestoreDestination("").ContainerName,
|
||||
testdata.DefaultRestoreConfig("").Location,
|
||||
[]string{"Griffindor", "Croix"},
|
||||
[]string{"Griffindor", "Felicius"})
|
||||
}
|
||||
@ -89,8 +89,6 @@ func NewContactCollection(pathRepresentation path.Path, numMessagesToReturn int)
|
||||
Names: []string{},
|
||||
}
|
||||
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
middleNames := []string{
|
||||
"Argon",
|
||||
"Bernard",
|
||||
@ -140,6 +138,7 @@ func (medc *DataCollection) Items(
|
||||
return res
|
||||
}
|
||||
|
||||
// TODO: move to data/mock for service-agnostic mocking
|
||||
// Data represents a single item retrieved from exchange
|
||||
type Data struct {
|
||||
ID string
|
||||
@ -744,10 +744,10 @@ func serialize(t *testing.T, item serialization.Parsable) []byte {
|
||||
return byteArray
|
||||
}
|
||||
|
||||
func hydrateMessage(byteArray []byte) (models.Messageable, error) {
|
||||
parseNode, err := kjson.NewJsonParseNodeFactory().GetRootParseNode("application/json", byteArray)
|
||||
func hydrateMessage(body []byte) (models.Messageable, error) {
|
||||
parseNode, err := kjson.NewJsonParseNodeFactory().GetRootParseNode("application/json", body)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "deserializing bytes into base m365 object")
|
||||
return nil, clues.Wrap(err, "deserializing bytes into base m365 object").With("body_size", len(body))
|
||||
}
|
||||
|
||||
anObject, err := parseNode.GetObjectValue(models.CreateMessageFromDiscriminatorValue)
|
||||
@ -9,10 +9,10 @@ import (
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/connector/support"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/diagnostics"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/support"
|
||||
"github.com/alcionai/corso/src/internal/observe"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
@ -22,16 +22,16 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
// RestoreCollections restores M365 objects in data.RestoreCollection to MSFT
|
||||
// ConsumeRestoreCollections restores M365 objects in data.RestoreCollection to MSFT
|
||||
// store through GraphAPI.
|
||||
func RestoreCollections(
|
||||
func ConsumeRestoreCollections(
|
||||
ctx context.Context,
|
||||
ac api.Client,
|
||||
dest control.RestoreDestination,
|
||||
restoreCfg control.RestoreConfig,
|
||||
dcs []data.RestoreCollection,
|
||||
deets *details.Builder,
|
||||
errs *fault.Bus,
|
||||
) (*support.ConnectorOperationStatus, error) {
|
||||
) (*support.ControllerOperationStatus, error) {
|
||||
if len(dcs) == 0 {
|
||||
return support.CreateStatus(ctx, support.Restore, 0, support.CollectionMetrics{}, ""), nil
|
||||
}
|
||||
@ -64,7 +64,7 @@ func RestoreCollections(
|
||||
|
||||
handler, ok := handlers[category]
|
||||
if !ok {
|
||||
el.AddRecoverable(clues.New("unsupported restore path category").WithClues(ictx))
|
||||
el.AddRecoverable(ctx, clues.New("unsupported restore path category").WithClues(ictx))
|
||||
continue
|
||||
}
|
||||
|
||||
@ -73,20 +73,20 @@ func RestoreCollections(
|
||||
isNewCache = true
|
||||
}
|
||||
|
||||
containerID, gcr, err := createDestination(
|
||||
containerID, gcc, err := createDestination(
|
||||
ictx,
|
||||
handler,
|
||||
handler.formatRestoreDestination(dest.ContainerName, dc.FullPath()),
|
||||
handler.formatRestoreDestination(restoreCfg.Location, dc.FullPath()),
|
||||
userID,
|
||||
directoryCache[category],
|
||||
isNewCache,
|
||||
errs)
|
||||
if err != nil {
|
||||
el.AddRecoverable(err)
|
||||
el.AddRecoverable(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
directoryCache[category] = gcr
|
||||
directoryCache[category] = gcc
|
||||
|
||||
ictx = clues.Add(ictx, "restore_destination_id", containerID)
|
||||
|
||||
@ -107,7 +107,7 @@ func RestoreCollections(
|
||||
break
|
||||
}
|
||||
|
||||
el.AddRecoverable(err)
|
||||
el.AddRecoverable(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -116,7 +116,7 @@ func RestoreCollections(
|
||||
support.Restore,
|
||||
len(dcs),
|
||||
metrics,
|
||||
dest.ContainerName)
|
||||
restoreCfg.Location)
|
||||
|
||||
return status, el.Failure()
|
||||
}
|
||||
@ -131,7 +131,7 @@ func restoreCollection(
|
||||
deets *details.Builder,
|
||||
errs *fault.Bus,
|
||||
) (support.CollectionMetrics, error) {
|
||||
ctx, end := diagnostics.Span(ctx, "gc:exchange:restoreCollection", diagnostics.Label("path", dc.FullPath()))
|
||||
ctx, end := diagnostics.Span(ctx, "m365:exchange:restoreCollection", diagnostics.Label("path", dc.FullPath()))
|
||||
defer end()
|
||||
|
||||
var (
|
||||
@ -159,14 +159,14 @@ func restoreCollection(
|
||||
}
|
||||
|
||||
ictx := clues.Add(ctx, "item_id", itemData.UUID())
|
||||
trace.Log(ictx, "gc:exchange:restoreCollection:item", itemData.UUID())
|
||||
trace.Log(ictx, "m365:exchange:restoreCollection:item", itemData.UUID())
|
||||
metrics.Objects++
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
_, err := buf.ReadFrom(itemData.ToReader())
|
||||
if err != nil {
|
||||
el.AddRecoverable(clues.Wrap(err, "reading item bytes").WithClues(ictx))
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "reading item bytes").WithClues(ictx))
|
||||
continue
|
||||
}
|
||||
|
||||
@ -174,7 +174,7 @@ func restoreCollection(
|
||||
|
||||
info, err := ir.restore(ictx, body, userID, destinationID, errs)
|
||||
if err != nil {
|
||||
el.AddRecoverable(err)
|
||||
el.AddRecoverable(ictx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -185,7 +185,7 @@ func restoreCollection(
|
||||
// destination folder, then the restore path no longer matches the fullPath.
|
||||
itemPath, err := fullPath.AppendItem(itemData.UUID())
|
||||
if err != nil {
|
||||
el.AddRecoverable(clues.Wrap(err, "adding item to collection path").WithClues(ctx))
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "adding item to collection path").WithClues(ctx))
|
||||
continue
|
||||
}
|
||||
|
||||
@ -331,7 +331,7 @@ func uploadAttachments(
|
||||
itemID,
|
||||
a)
|
||||
if err != nil {
|
||||
el.AddRecoverable(clues.Wrap(err, "uploading mail attachment").WithClues(ctx))
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "uploading mail attachment").WithClues(ctx))
|
||||
}
|
||||
}
|
||||
|
||||
@ -10,9 +10,10 @@ import (
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock"
|
||||
exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/control/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
@ -54,7 +55,7 @@ func (suite *RestoreIntgSuite) TestRestoreContact() {
|
||||
|
||||
var (
|
||||
userID = tester.M365UserID(t)
|
||||
folderName = tester.DefaultTestRestoreDestination("contact").ContainerName
|
||||
folderName = testdata.DefaultRestoreConfig("contact").Location
|
||||
handler = newContactRestoreHandler(suite.ac)
|
||||
)
|
||||
|
||||
@ -88,7 +89,7 @@ func (suite *RestoreIntgSuite) TestRestoreEvent() {
|
||||
|
||||
var (
|
||||
userID = tester.M365UserID(t)
|
||||
subject = tester.DefaultTestRestoreDestination("event").ContainerName
|
||||
subject = testdata.DefaultRestoreConfig("event").Location
|
||||
handler = newEventRestoreHandler(suite.ac)
|
||||
)
|
||||
|
||||
@ -154,7 +155,7 @@ func (suite *RestoreIntgSuite) TestRestoreExchangeObject() {
|
||||
bytes: exchMock.MessageBytes("Restore Exchange Object"),
|
||||
category: path.EmailCategory,
|
||||
destination: func(t *testing.T, ctx context.Context) string {
|
||||
folderName := tester.DefaultTestRestoreDestination("mailobj").ContainerName
|
||||
folderName := testdata.DefaultRestoreConfig("mailobj").Location
|
||||
folder, err := handlers[path.EmailCategory].
|
||||
CreateContainer(ctx, userID, folderName, "")
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
@ -167,7 +168,7 @@ func (suite *RestoreIntgSuite) TestRestoreExchangeObject() {
|
||||
bytes: exchMock.MessageWithDirectAttachment("Restore 1 Attachment"),
|
||||
category: path.EmailCategory,
|
||||
destination: func(t *testing.T, ctx context.Context) string {
|
||||
folderName := tester.DefaultTestRestoreDestination("mailwattch").ContainerName
|
||||
folderName := testdata.DefaultRestoreConfig("mailwattch").Location
|
||||
folder, err := handlers[path.EmailCategory].
|
||||
CreateContainer(ctx, userID, folderName, "")
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
@ -180,7 +181,7 @@ func (suite *RestoreIntgSuite) TestRestoreExchangeObject() {
|
||||
bytes: exchMock.MessageWithItemAttachmentEvent("Event Item Attachment"),
|
||||
category: path.EmailCategory,
|
||||
destination: func(t *testing.T, ctx context.Context) string {
|
||||
folderName := tester.DefaultTestRestoreDestination("eventwattch").ContainerName
|
||||
folderName := testdata.DefaultRestoreConfig("eventwattch").Location
|
||||
folder, err := handlers[path.EmailCategory].
|
||||
CreateContainer(ctx, userID, folderName, "")
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
@ -193,7 +194,7 @@ func (suite *RestoreIntgSuite) TestRestoreExchangeObject() {
|
||||
bytes: exchMock.MessageWithItemAttachmentMail("Mail Item Attachment"),
|
||||
category: path.EmailCategory,
|
||||
destination: func(t *testing.T, ctx context.Context) string {
|
||||
folderName := tester.DefaultTestRestoreDestination("mailitemattch").ContainerName
|
||||
folderName := testdata.DefaultRestoreConfig("mailitemattch").Location
|
||||
folder, err := handlers[path.EmailCategory].
|
||||
CreateContainer(ctx, userID, folderName, "")
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
@ -201,24 +202,22 @@ func (suite *RestoreIntgSuite) TestRestoreExchangeObject() {
|
||||
return ptr.Val(folder.GetId())
|
||||
},
|
||||
},
|
||||
// TODO: Neha complete as part of https://github.com/alcionai/corso/issues/2428
|
||||
// {
|
||||
// name: "Test Mail: Hydrated Item Attachment Mail",
|
||||
// bytes: exchMock.MessageWithNestedItemAttachmentMail(t,
|
||||
// exchMock.MessageBytes("Basic Item Attachment"),
|
||||
// "Mail Item Attachment",
|
||||
// ),
|
||||
// category: path.EmailCategory,
|
||||
// destination: func(t *testing.T, ctx context.Context) string {
|
||||
// folderName := tester.DefaultTestRestoreDestination("mailbasicattch").ContainerName
|
||||
// folder, err := handlers[path.EmailCategory].
|
||||
// CreateContainer(ctx, userID, folderName, "")
|
||||
// require.NoError(t, err, clues.ToCore(err))
|
||||
{
|
||||
name: "Test Mail: Hydrated Item Attachment Mail",
|
||||
bytes: exchMock.MessageWithNestedItemAttachmentMail(t,
|
||||
exchMock.MessageBytes("Basic Item Attachment"),
|
||||
"Mail Item Attachment",
|
||||
),
|
||||
category: path.EmailCategory,
|
||||
destination: func(t *testing.T, ctx context.Context) string {
|
||||
folderName := testdata.DefaultRestoreConfig("mailbasicattch").Location
|
||||
folder, err := handlers[path.EmailCategory].
|
||||
CreateContainer(ctx, userID, folderName, "")
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// return ptr.Val(folder.GetId())
|
||||
// },
|
||||
// },
|
||||
// vales here////
|
||||
return ptr.Val(folder.GetId())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Test Mail: Hydrated Item Attachment Mail One Attach",
|
||||
bytes: exchMock.MessageWithNestedItemAttachmentMail(t,
|
||||
@ -227,7 +226,7 @@ func (suite *RestoreIntgSuite) TestRestoreExchangeObject() {
|
||||
),
|
||||
category: path.EmailCategory,
|
||||
destination: func(t *testing.T, ctx context.Context) string {
|
||||
folderName := tester.DefaultTestRestoreDestination("mailnestattch").ContainerName
|
||||
folderName := testdata.DefaultRestoreConfig("mailnestattch").Location
|
||||
folder, err := handlers[path.EmailCategory].
|
||||
CreateContainer(ctx, userID, folderName, "")
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
@ -244,7 +243,7 @@ func (suite *RestoreIntgSuite) TestRestoreExchangeObject() {
|
||||
),
|
||||
category: path.EmailCategory,
|
||||
destination: func(t *testing.T, ctx context.Context) string {
|
||||
folderName := tester.DefaultTestRestoreDestination("mailcontactattch").ContainerName
|
||||
folderName := testdata.DefaultRestoreConfig("mailcontactattch").Location
|
||||
folder, err := handlers[path.EmailCategory].
|
||||
CreateContainer(ctx, userID, folderName, "")
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
@ -252,25 +251,25 @@ func (suite *RestoreIntgSuite) TestRestoreExchangeObject() {
|
||||
return ptr.Val(folder.GetId())
|
||||
},
|
||||
},
|
||||
// { // Restore will upload the Message without uploading the attachment
|
||||
// name: "Test Mail: Item Attachment_NestedEvent",
|
||||
// bytes: exchMock.MessageWithNestedItemAttachmentEvent("Nested Item Attachment"),
|
||||
// category: path.EmailCategory,
|
||||
// destination: func(t *testing.T, ctx context.Context) string {
|
||||
// folderName := tester.DefaultTestRestoreDestination("nestedattch").ContainerName
|
||||
// folder, err := handlers[path.EmailCategory].
|
||||
// CreateContainer(ctx, userID, folderName, "")
|
||||
// require.NoError(t, err, clues.ToCore(err))
|
||||
{ // Restore will upload the Message without uploading the attachment
|
||||
name: "Test Mail: Item Attachment_NestedEvent",
|
||||
bytes: exchMock.MessageWithNestedItemAttachmentEvent("Nested Item Attachment"),
|
||||
category: path.EmailCategory,
|
||||
destination: func(t *testing.T, ctx context.Context) string {
|
||||
folderName := testdata.DefaultRestoreConfig("nestedattch").Location
|
||||
folder, err := handlers[path.EmailCategory].
|
||||
CreateContainer(ctx, userID, folderName, "")
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// return ptr.Val(folder.GetId())
|
||||
// },
|
||||
// },
|
||||
return ptr.Val(folder.GetId())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Test Mail: One Large Attachment",
|
||||
bytes: exchMock.MessageWithLargeAttachment("Restore Large Attachment"),
|
||||
category: path.EmailCategory,
|
||||
destination: func(t *testing.T, ctx context.Context) string {
|
||||
folderName := tester.DefaultTestRestoreDestination("maillargeattch").ContainerName
|
||||
folderName := testdata.DefaultRestoreConfig("maillargeattch").Location
|
||||
folder, err := handlers[path.EmailCategory].
|
||||
CreateContainer(ctx, userID, folderName, "")
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
@ -283,7 +282,7 @@ func (suite *RestoreIntgSuite) TestRestoreExchangeObject() {
|
||||
bytes: exchMock.MessageWithTwoAttachments("Restore 2 Attachments"),
|
||||
category: path.EmailCategory,
|
||||
destination: func(t *testing.T, ctx context.Context) string {
|
||||
folderName := tester.DefaultTestRestoreDestination("mailtwoattch").ContainerName
|
||||
folderName := testdata.DefaultRestoreConfig("mailtwoattch").Location
|
||||
folder, err := handlers[path.EmailCategory].
|
||||
CreateContainer(ctx, userID, folderName, "")
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
@ -296,7 +295,7 @@ func (suite *RestoreIntgSuite) TestRestoreExchangeObject() {
|
||||
bytes: exchMock.MessageWithOneDriveAttachment("Restore Reference(OneDrive) Attachment"),
|
||||
category: path.EmailCategory,
|
||||
destination: func(t *testing.T, ctx context.Context) string {
|
||||
folderName := tester.DefaultTestRestoreDestination("mailrefattch").ContainerName
|
||||
folderName := testdata.DefaultRestoreConfig("mailrefattch").Location
|
||||
folder, err := handlers[path.EmailCategory].
|
||||
CreateContainer(ctx, userID, folderName, "")
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
@ -309,7 +308,7 @@ func (suite *RestoreIntgSuite) TestRestoreExchangeObject() {
|
||||
bytes: exchMock.ContactBytes("Test_Omega"),
|
||||
category: path.ContactsCategory,
|
||||
destination: func(t *testing.T, ctx context.Context) string {
|
||||
folderName := tester.DefaultTestRestoreDestination("contact").ContainerName
|
||||
folderName := testdata.DefaultRestoreConfig("contact").Location
|
||||
folder, err := handlers[path.ContactsCategory].
|
||||
CreateContainer(ctx, userID, folderName, "")
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
@ -322,7 +321,7 @@ func (suite *RestoreIntgSuite) TestRestoreExchangeObject() {
|
||||
bytes: exchMock.EventBytes("Restored Event Object"),
|
||||
category: path.EventsCategory,
|
||||
destination: func(t *testing.T, ctx context.Context) string {
|
||||
folderName := tester.DefaultTestRestoreDestination("event").ContainerName
|
||||
folderName := testdata.DefaultRestoreConfig("event").Location
|
||||
calendar, err := handlers[path.EventsCategory].
|
||||
CreateContainer(ctx, userID, folderName, "")
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
@ -335,7 +334,7 @@ func (suite *RestoreIntgSuite) TestRestoreExchangeObject() {
|
||||
bytes: exchMock.EventWithAttachment("Restored Event Attachment"),
|
||||
category: path.EventsCategory,
|
||||
destination: func(t *testing.T, ctx context.Context) string {
|
||||
folderName := tester.DefaultTestRestoreDestination("eventobj").ContainerName
|
||||
folderName := testdata.DefaultRestoreConfig("eventobj").Location
|
||||
calendar, err := handlers[path.EventsCategory].
|
||||
CreateContainer(ctx, userID, folderName, "")
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
@ -7,8 +7,8 @@ import (
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/connector/exchange"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/exchange"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
@ -10,7 +10,7 @@ import (
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock"
|
||||
exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
@ -8,7 +8,7 @@ import (
|
||||
ktext "github.com/microsoft/kiota-serialization-text-go"
|
||||
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
|
||||
|
||||
i1a3c1a5501c5e41b7fd169f2d4c768dce9b096ac28fb5431bf02afcc57295411 "github.com/alcionai/corso/src/internal/connector/graph/betasdk/sites"
|
||||
i1a3c1a5501c5e41b7fd169f2d4c768dce9b096ac28fb5431bf02afcc57295411 "github.com/alcionai/corso/src/internal/m365/graph/betasdk/sites"
|
||||
)
|
||||
|
||||
// BetaClient the main entry point of the SDK, exposes the configuration and the fluent API.
|
||||
@ -8,7 +8,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
)
|
||||
@ -2,7 +2,7 @@
|
||||
"lockFileVersion": "1.0.0",
|
||||
"kiotaVersion": "0.10.0.0",
|
||||
"clientClassName": "BetaClient",
|
||||
"clientNamespaceName": "github.com/alcionai/corso/src/internal/connector/graph/betasdk",
|
||||
"clientNamespaceName": "github.com/alcionai/corso/src/internal/m365/graph/betasdk",
|
||||
"language": "Go",
|
||||
"betaVersion": "0.53.0",
|
||||
"usesBackingStore": false,
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user