merge commit

This commit is contained in:
neha-Gupta1 2023-08-23 14:45:30 +05:30
commit fdd67bdaa7
14 changed files with 672 additions and 234 deletions

View File

@ -39,6 +39,7 @@ var serviceCommands = []func(cmd *cobra.Command) *cobra.Command{
addExchangeCommands,
addOneDriveCommands,
addSharePointCommands,
addGroupsCommands,
addTeamsCommands,
}

View File

@ -1,14 +1,27 @@
package backup
import (
"context"
"errors"
"github.com/alcionai/clues"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"golang.org/x/exp/slices"
"github.com/alcionai/corso/src/cli/flags"
. "github.com/alcionai/corso/src/cli/print"
"github.com/alcionai/corso/src/cli/repo"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/common/idname"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/filters"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/repository"
"github.com/alcionai/corso/src/pkg/selectors"
"github.com/alcionai/corso/src/pkg/services/m365"
)
// ------------------------------------------------------------------------------------------------
@ -134,7 +147,38 @@ func createGroupsCmd(cmd *cobra.Command, args []string) error {
return nil
}
return Only(ctx, utils.ErrNotYetImplemented)
if err := validateGroupsBackupCreateFlags(flags.GroupFV, flags.CategoryDataFV); err != nil {
return err
}
r, acct, err := utils.AccountConnectAndWriteRepoConfig(ctx, path.GroupsService, repo.S3Overrides(cmd))
if err != nil {
return Only(ctx, err)
}
defer utils.CloseRepo(ctx, r)
// TODO: log/print recoverable errors
errs := fault.New(false)
ins, err := m365.GroupsMap(ctx, *acct, errs)
if err != nil {
return Only(ctx, clues.Wrap(err, "Failed to retrieve M365 groups"))
}
sel := groupsBackupCreateSelectors(ctx, ins, flags.GroupFV, flags.CategoryDataFV)
selectorSet := []selectors.Selector{}
for _, discSel := range sel.SplitByResourceOwner(ins.IDs()) {
selectorSet = append(selectorSet, discSel.Selector)
}
return runBackups(
ctx,
r,
"Group", "group",
selectorSet,
ins)
}
// ------------------------------------------------------------------------------------------------
@ -172,17 +216,71 @@ func groupsDetailsCmd() *cobra.Command {
// processes a groups service backup.
func detailsGroupsCmd(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if utils.HasNoFlagsAndShownHelp(cmd) {
return nil
}
if err := validateGroupBackupCreateFlags(flags.GroupFV); err != nil {
ctx := cmd.Context()
opts := utils.MakeGroupsOpts(cmd)
r, _, _, ctrlOpts, err := utils.GetAccountAndConnect(ctx, path.GroupsService, repo.S3Overrides(cmd))
if err != nil {
return Only(ctx, err)
}
return Only(ctx, utils.ErrNotYetImplemented)
defer utils.CloseRepo(ctx, r)
ds, err := runDetailsGroupsCmd(ctx, r, flags.BackupIDFV, opts, ctrlOpts.SkipReduce)
if err != nil {
return Only(ctx, err)
}
if len(ds.Entries) == 0 {
Info(ctx, selectors.ErrorNoMatchingItems)
return nil
}
ds.PrintEntries(ctx)
return nil
}
// runDetailsGroupsCmd actually performs the lookup in backup details.
// the fault.Errors return is always non-nil. Callers should check if
// errs.Failure() == nil.
func runDetailsGroupsCmd(
ctx context.Context,
r repository.BackupGetter,
backupID string,
opts utils.GroupsOpts,
skipReduce bool,
) (*details.Details, error) {
if err := utils.ValidateGroupsRestoreFlags(backupID, opts); err != nil {
return nil, err
}
ctx = clues.Add(ctx, "backup_id", backupID)
d, _, errs := r.GetBackupDetails(ctx, backupID)
// TODO: log/track recoverable errors
if errs.Failure() != nil {
if errors.Is(errs.Failure(), data.ErrNotFound) {
return nil, clues.New("no backup exists with the id " + backupID)
}
return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository")
}
ctx = clues.Add(ctx, "details_entries", len(d.Entries))
if !skipReduce {
sel := utils.IncludeGroupsRestoreDataSelectors(ctx, opts)
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
utils.FilterGroupsRestoreInfoSelectors(sel, opts)
d = sel.Reduce(ctx, d, errs)
}
return d, nil
}
// ------------------------------------------------------------------------------------------------
@ -208,7 +306,7 @@ func deleteGroupsCmd(cmd *cobra.Command, args []string) error {
// helpers
// ---------------------------------------------------------------------------
func validateGroupBackupCreateFlags(groups []string) error {
func validateGroupsBackupCreateFlags(groups, cats []string) error {
if len(groups) == 0 {
return clues.New(
"requires one or more --" +
@ -228,3 +326,40 @@ func validateGroupBackupCreateFlags(groups []string) error {
return nil
}
// TODO: users might specify a data type, this only supports AllData().
func groupsBackupCreateSelectors(
ctx context.Context,
ins idname.Cacher,
group, cats []string,
) *selectors.GroupsBackup {
if filters.PathContains(group).Compare(flags.Wildcard) {
return includeAllGroupWithCategories(ins, cats)
}
sel := selectors.NewGroupsBackup(slices.Clone(group))
return addGroupsCategories(sel, cats)
}
func includeAllGroupWithCategories(ins idname.Cacher, categories []string) *selectors.GroupsBackup {
return addGroupsCategories(selectors.NewGroupsBackup(ins.IDs()), categories)
}
func addGroupsCategories(sel *selectors.GroupsBackup, cats []string) *selectors.GroupsBackup {
if len(cats) == 0 {
sel.Include(sel.AllData())
}
// TODO(meain): handle filtering
// for _, d := range cats {
// switch d {
// case dataLibraries:
// sel.Include(sel.LibraryFolders(selectors.Any()))
// case dataPages:
// sel.Include(sel.Pages(selectors.Any()))
// }
// }
return sel
}

View File

@ -1,9 +1,13 @@
package utils
import (
"context"
"github.com/alcionai/clues"
"github.com/spf13/cobra"
"github.com/alcionai/corso/src/cli/flags"
"github.com/alcionai/corso/src/pkg/selectors"
)
type GroupsOpts struct {
@ -28,3 +32,56 @@ func MakeGroupsOpts(cmd *cobra.Command) GroupsOpts {
Populated: flags.GetPopulatedFlags(cmd),
}
}
// ValidateGroupsRestoreFlags checks common flags for correctness and interdependencies
func ValidateGroupsRestoreFlags(backupID string, opts GroupsOpts) error {
if len(backupID) == 0 {
return clues.New("a backup ID is required")
}
// TODO(meain): selectors (refer sharepoint)
return validateRestoreConfigFlags(flags.CollisionsFV, opts.RestoreCfg)
}
// AddGroupInfo adds the scope of the provided values to the selector's
// filter set
func AddGroupInfo(
sel *selectors.GroupsRestore,
v string,
f func(string) []selectors.GroupsScope,
) {
if len(v) == 0 {
return
}
sel.Filter(f(v))
}
// IncludeGroupsRestoreDataSelectors builds the common data-selector
// inclusions for Group commands.
func IncludeGroupsRestoreDataSelectors(ctx context.Context, opts GroupsOpts) *selectors.GroupsRestore {
groups := opts.Groups
ls := len(opts.Groups)
if ls == 0 {
groups = selectors.Any()
}
sel := selectors.NewGroupsRestore(groups)
// TODO(meain): add selectors
sel.Include(sel.AllData())
return sel
}
// FilterGroupsRestoreInfoSelectors builds the common info-selector filters.
func FilterGroupsRestoreInfoSelectors(
sel *selectors.GroupsRestore,
opts GroupsOpts,
) {
// TODO(meain)
// AddGroupInfo(sel, opts.GroupID, sel.Library)
}

View File

@ -0,0 +1,161 @@
package utils_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/tester"
)
type GroupsUtilsSuite struct {
tester.Suite
}
func TestGroupsUtilsSuite(t *testing.T) {
suite.Run(t, &GroupsUtilsSuite{Suite: tester.NewUnitSuite(t)})
}
// Tests selector build for Groups properly
// differentiates between the 3 categories: Pages, Libraries and Lists CLI
func (suite *GroupsUtilsSuite) TestIncludeGroupsRestoreDataSelectors() {
var (
empty = []string{}
single = []string{"single"}
multi = []string{"more", "than", "one"}
)
table := []struct {
name string
opts utils.GroupsOpts
expectIncludeLen int
}{
{
name: "no inputs",
opts: utils.GroupsOpts{},
expectIncludeLen: 2,
},
{
name: "empty",
opts: utils.GroupsOpts{
Groups: empty,
},
expectIncludeLen: 2,
},
{
name: "single inputs",
opts: utils.GroupsOpts{
Groups: single,
},
expectIncludeLen: 2,
},
{
name: "multi inputs",
opts: utils.GroupsOpts{
Groups: multi,
},
expectIncludeLen: 2,
},
// TODO Add library specific tests once we have filters based
// on library folders
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
sel := utils.IncludeGroupsRestoreDataSelectors(ctx, test.opts)
assert.Len(suite.T(), sel.Includes, test.expectIncludeLen)
})
}
}
func (suite *GroupsUtilsSuite) TestValidateGroupsRestoreFlags() {
table := []struct {
name string
backupID string
opts utils.GroupsOpts
expect assert.ErrorAssertionFunc
}{
{
name: "no opts",
backupID: "id",
opts: utils.GroupsOpts{},
expect: assert.NoError,
},
{
name: "no backupID",
backupID: "",
opts: utils.GroupsOpts{},
expect: assert.Error,
},
// TODO: Add tests for selectors once we have them
// {
// name: "all valid",
// backupID: "id",
// opts: utils.GroupsOpts{
// Populated: flags.PopulatedFlags{
// flags.FileCreatedAfterFN: struct{}{},
// flags.FileCreatedBeforeFN: struct{}{},
// flags.FileModifiedAfterFN: struct{}{},
// flags.FileModifiedBeforeFN: struct{}{},
// },
// },
// expect: assert.NoError,
// },
// {
// name: "invalid file created after",
// backupID: "id",
// opts: utils.GroupsOpts{
// FileCreatedAfter: "1235",
// Populated: flags.PopulatedFlags{
// flags.FileCreatedAfterFN: struct{}{},
// },
// },
// expect: assert.Error,
// },
// {
// name: "invalid file created before",
// backupID: "id",
// opts: utils.GroupsOpts{
// FileCreatedBefore: "1235",
// Populated: flags.PopulatedFlags{
// flags.FileCreatedBeforeFN: struct{}{},
// },
// },
// expect: assert.Error,
// },
// {
// name: "invalid file modified after",
// backupID: "id",
// opts: utils.GroupsOpts{
// FileModifiedAfter: "1235",
// Populated: flags.PopulatedFlags{
// flags.FileModifiedAfterFN: struct{}{},
// },
// },
// expect: assert.Error,
// },
// {
// name: "invalid file modified before",
// backupID: "id",
// opts: utils.GroupsOpts{
// FileModifiedBefore: "1235",
// Populated: flags.PopulatedFlags{
// flags.FileModifiedBeforeFN: struct{}{},
// },
// },
// expect: assert.Error,
// },
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
test.expect(t, utils.ValidateGroupsRestoreFlags(test.backupID, test.opts))
})
}
}

View File

@ -8,7 +8,7 @@ require (
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1
github.com/alcionai/clues v0.0.0-20230728164842-7dc4795a43e4
github.com/armon/go-metrics v0.4.1
github.com/aws/aws-sdk-go v1.44.328
github.com/aws/aws-sdk-go v1.44.329
github.com/aws/aws-xray-sdk-go v1.8.1
github.com/cenkalti/backoff/v4 v4.2.1
github.com/google/uuid v1.3.1

View File

@ -66,8 +66,8 @@ github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/
github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/aws/aws-sdk-go v1.44.328 h1:WBwlf8ym9SDQ/GTIBO9eXyvwappKJyOetWJKl4mT7ZU=
github.com/aws/aws-sdk-go v1.44.328/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go v1.44.329 h1:Rqy+wYI8h+iq+FphR59KKTsHR1Lz7YiwRqFzWa7xoYU=
github.com/aws/aws-sdk-go v1.44.329/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo=
github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A=
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=

View File

@ -137,89 +137,113 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() {
backupTag, _ := makeTagKV(TagBackupCategory)
// Current backup and snapshots.
bupCurrent := &backup.Backup{
BaseModel: model.BaseModel{
ID: model.StableID("current-bup-id"),
ModelStoreID: manifest.ID("current-bup-msid"),
},
SnapshotID: "current-snap-msid",
StreamStoreID: "current-deets-msid",
bupCurrent := func() *backup.Backup {
return &backup.Backup{
BaseModel: model.BaseModel{
ID: model.StableID("current-bup-id"),
ModelStoreID: manifest.ID("current-bup-msid"),
},
SnapshotID: "current-snap-msid",
StreamStoreID: "current-deets-msid",
}
}
snapCurrent := &manifest.EntryMetadata{
ID: "current-snap-msid",
Labels: map[string]string{
backupTag: "0",
},
snapCurrent := func() *manifest.EntryMetadata {
return &manifest.EntryMetadata{
ID: "current-snap-msid",
Labels: map[string]string{
backupTag: "0",
},
}
}
deetsCurrent := &manifest.EntryMetadata{
ID: "current-deets-msid",
deetsCurrent := func() *manifest.EntryMetadata {
return &manifest.EntryMetadata{
ID: "current-deets-msid",
}
}
// Legacy backup with details in separate model.
bupLegacy := &backup.Backup{
BaseModel: model.BaseModel{
ID: model.StableID("legacy-bup-id"),
ModelStoreID: manifest.ID("legacy-bup-msid"),
},
SnapshotID: "legacy-snap-msid",
DetailsID: "legacy-deets-msid",
bupLegacy := func() *backup.Backup {
return &backup.Backup{
BaseModel: model.BaseModel{
ID: model.StableID("legacy-bup-id"),
ModelStoreID: manifest.ID("legacy-bup-msid"),
},
SnapshotID: "legacy-snap-msid",
DetailsID: "legacy-deets-msid",
}
}
snapLegacy := &manifest.EntryMetadata{
ID: "legacy-snap-msid",
Labels: map[string]string{
backupTag: "0",
},
snapLegacy := func() *manifest.EntryMetadata {
return &manifest.EntryMetadata{
ID: "legacy-snap-msid",
Labels: map[string]string{
backupTag: "0",
},
}
}
deetsLegacy := &model.BaseModel{
ID: "legacy-deets-id",
ModelStoreID: "legacy-deets-msid",
deetsLegacy := func() *model.BaseModel {
return &model.BaseModel{
ID: "legacy-deets-id",
ModelStoreID: "legacy-deets-msid",
}
}
// Incomplete backup missing data snapshot.
bupNoSnapshot := &backup.Backup{
BaseModel: model.BaseModel{
ID: model.StableID("ns-bup-id"),
ModelStoreID: manifest.ID("ns-bup-id-msid"),
},
StreamStoreID: "ns-deets-msid",
bupNoSnapshot := func() *backup.Backup {
return &backup.Backup{
BaseModel: model.BaseModel{
ID: model.StableID("ns-bup-id"),
ModelStoreID: manifest.ID("ns-bup-id-msid"),
},
StreamStoreID: "ns-deets-msid",
}
}
deetsNoSnapshot := &manifest.EntryMetadata{
ID: "ns-deets-msid",
deetsNoSnapshot := func() *manifest.EntryMetadata {
return &manifest.EntryMetadata{
ID: "ns-deets-msid",
}
}
// Legacy incomplete backup missing data snapshot.
bupLegacyNoSnapshot := &backup.Backup{
BaseModel: model.BaseModel{
ID: model.StableID("ns-legacy-bup-id"),
ModelStoreID: manifest.ID("ns-legacy-bup-id-msid"),
},
DetailsID: "ns-legacy-deets-msid",
bupLegacyNoSnapshot := func() *backup.Backup {
return &backup.Backup{
BaseModel: model.BaseModel{
ID: model.StableID("ns-legacy-bup-id"),
ModelStoreID: manifest.ID("ns-legacy-bup-id-msid"),
},
DetailsID: "ns-legacy-deets-msid",
}
}
deetsLegacyNoSnapshot := &model.BaseModel{
ID: "ns-legacy-deets-id",
ModelStoreID: "ns-legacy-deets-msid",
deetsLegacyNoSnapshot := func() *model.BaseModel {
return &model.BaseModel{
ID: "ns-legacy-deets-id",
ModelStoreID: "ns-legacy-deets-msid",
}
}
// Incomplete backup missing details.
bupNoDetails := &backup.Backup{
BaseModel: model.BaseModel{
ID: model.StableID("nssid-bup-id"),
ModelStoreID: manifest.ID("nssid-bup-msid"),
},
SnapshotID: "nssid-snap-msid",
bupNoDetails := func() *backup.Backup {
return &backup.Backup{
BaseModel: model.BaseModel{
ID: model.StableID("nssid-bup-id"),
ModelStoreID: manifest.ID("nssid-bup-msid"),
},
SnapshotID: "nssid-snap-msid",
}
}
snapNoDetails := &manifest.EntryMetadata{
ID: "nssid-snap-msid",
Labels: map[string]string{
backupTag: "0",
},
snapNoDetails := func() *manifest.EntryMetadata {
return &manifest.EntryMetadata{
ID: "nssid-snap-msid",
Labels: map[string]string{
backupTag: "0",
},
}
}
// Get some stable time so that we can do everything relative to this in the
@ -268,16 +292,16 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() {
{
name: "OnlyCompleteBackups Noops",
snapshots: []*manifest.EntryMetadata{
snapCurrent,
deetsCurrent,
snapLegacy,
snapCurrent(),
deetsCurrent(),
snapLegacy(),
},
detailsModels: []*model.BaseModel{
deetsLegacy,
deetsLegacy(),
},
backups: []backupRes{
{bup: bupCurrent},
{bup: bupLegacy},
{bup: bupCurrent()},
{bup: bupLegacy()},
},
time: baseTime,
expectErr: assert.NoError,
@ -285,24 +309,24 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() {
{
name: "MissingFieldsInBackup CausesCleanup",
snapshots: []*manifest.EntryMetadata{
snapNoDetails,
deetsNoSnapshot,
snapNoDetails(),
deetsNoSnapshot(),
},
detailsModels: []*model.BaseModel{
deetsLegacyNoSnapshot,
deetsLegacyNoSnapshot(),
},
backups: []backupRes{
{bup: bupNoSnapshot},
{bup: bupLegacyNoSnapshot},
{bup: bupNoDetails},
{bup: bupNoSnapshot()},
{bup: bupLegacyNoSnapshot()},
{bup: bupNoDetails()},
},
expectDeleteIDs: []manifest.ID{
manifest.ID(bupNoSnapshot.ModelStoreID),
manifest.ID(bupLegacyNoSnapshot.ModelStoreID),
manifest.ID(bupNoDetails.ModelStoreID),
manifest.ID(deetsLegacyNoSnapshot.ModelStoreID),
snapNoDetails.ID,
deetsNoSnapshot.ID,
manifest.ID(bupNoSnapshot().ModelStoreID),
manifest.ID(bupLegacyNoSnapshot().ModelStoreID),
manifest.ID(bupNoDetails().ModelStoreID),
manifest.ID(deetsLegacyNoSnapshot().ModelStoreID),
snapNoDetails().ID,
deetsNoSnapshot().ID,
},
time: baseTime,
expectErr: assert.NoError,
@ -310,20 +334,20 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() {
{
name: "MissingSnapshot CausesCleanup",
snapshots: []*manifest.EntryMetadata{
deetsCurrent,
deetsCurrent(),
},
detailsModels: []*model.BaseModel{
deetsLegacy,
deetsLegacy(),
},
backups: []backupRes{
{bup: bupCurrent},
{bup: bupLegacy},
{bup: bupCurrent()},
{bup: bupLegacy()},
},
expectDeleteIDs: []manifest.ID{
manifest.ID(bupCurrent.ModelStoreID),
deetsCurrent.ID,
manifest.ID(bupLegacy.ModelStoreID),
manifest.ID(deetsLegacy.ModelStoreID),
manifest.ID(bupCurrent().ModelStoreID),
deetsCurrent().ID,
manifest.ID(bupLegacy().ModelStoreID),
manifest.ID(deetsLegacy().ModelStoreID),
},
time: baseTime,
expectErr: assert.NoError,
@ -331,38 +355,39 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() {
{
name: "MissingDetails CausesCleanup",
snapshots: []*manifest.EntryMetadata{
snapCurrent,
snapLegacy,
snapCurrent(),
snapLegacy(),
},
backups: []backupRes{
{bup: bupCurrent},
{bup: bupLegacy},
{bup: bupCurrent()},
{bup: bupLegacy()},
},
expectDeleteIDs: []manifest.ID{
manifest.ID(bupCurrent.ModelStoreID),
manifest.ID(bupLegacy.ModelStoreID),
snapCurrent.ID,
snapLegacy.ID,
manifest.ID(bupCurrent().ModelStoreID),
manifest.ID(bupLegacy().ModelStoreID),
snapCurrent().ID,
snapLegacy().ID,
},
time: baseTime,
expectErr: assert.NoError,
},
// Tests with various errors from Storer.
{
name: "SnapshotsListError Fails",
snapshotFetchErr: assert.AnError,
backups: []backupRes{
{bup: bupCurrent},
{bup: bupCurrent()},
},
expectErr: assert.Error,
},
{
name: "LegacyDetailsListError Fails",
snapshots: []*manifest.EntryMetadata{
snapCurrent,
snapCurrent(),
},
detailsModelListErr: assert.AnError,
backups: []backupRes{
{bup: bupCurrent},
{bup: bupCurrent()},
},
time: baseTime,
expectErr: assert.Error,
@ -370,8 +395,8 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() {
{
name: "BackupIDsListError Fails",
snapshots: []*manifest.EntryMetadata{
snapCurrent,
deetsCurrent,
snapCurrent(),
deetsCurrent(),
},
backupListErr: assert.AnError,
time: baseTime,
@ -380,22 +405,22 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() {
{
name: "BackupModelGetErrorNotFound CausesCleanup",
snapshots: []*manifest.EntryMetadata{
snapCurrent,
deetsCurrent,
snapLegacy,
snapNoDetails,
snapCurrent(),
deetsCurrent(),
snapLegacy(),
snapNoDetails(),
},
detailsModels: []*model.BaseModel{
deetsLegacy,
deetsLegacy(),
},
backups: []backupRes{
{bup: bupCurrent},
{bup: bupCurrent()},
{
bup: bupLegacy,
bup: bupLegacy(),
err: data.ErrNotFound,
},
{
bup: bupNoDetails,
bup: bupNoDetails(),
err: data.ErrNotFound,
},
},
@ -404,11 +429,11 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() {
// delete operation should ignore missing models though so there's no
// issue.
expectDeleteIDs: []manifest.ID{
snapLegacy.ID,
manifest.ID(deetsLegacy.ModelStoreID),
manifest.ID(bupLegacy.ModelStoreID),
snapNoDetails.ID,
manifest.ID(bupNoDetails.ModelStoreID),
snapLegacy().ID,
manifest.ID(deetsLegacy().ModelStoreID),
manifest.ID(bupLegacy().ModelStoreID),
snapNoDetails().ID,
manifest.ID(bupNoDetails().ModelStoreID),
},
time: baseTime,
expectErr: assert.NoError,
@ -416,21 +441,21 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() {
{
name: "BackupModelGetError Fails",
snapshots: []*manifest.EntryMetadata{
snapCurrent,
deetsCurrent,
snapLegacy,
snapNoDetails,
snapCurrent(),
deetsCurrent(),
snapLegacy(),
snapNoDetails(),
},
detailsModels: []*model.BaseModel{
deetsLegacy,
deetsLegacy(),
},
backups: []backupRes{
{bup: bupCurrent},
{bup: bupCurrent()},
{
bup: bupLegacy,
bup: bupLegacy(),
err: assert.AnError,
},
{bup: bupNoDetails},
{bup: bupNoDetails()},
},
time: baseTime,
expectErr: assert.Error,
@ -438,34 +463,35 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() {
{
name: "DeleteError Fails",
snapshots: []*manifest.EntryMetadata{
snapCurrent,
deetsCurrent,
snapLegacy,
snapNoDetails,
snapCurrent(),
deetsCurrent(),
snapLegacy(),
snapNoDetails(),
},
detailsModels: []*model.BaseModel{
deetsLegacy,
deetsLegacy(),
},
backups: []backupRes{
{bup: bupCurrent},
{bup: bupLegacy},
{bup: bupNoDetails},
{bup: bupCurrent()},
{bup: bupLegacy()},
{bup: bupNoDetails()},
},
expectDeleteIDs: []manifest.ID{
snapNoDetails.ID,
manifest.ID(bupNoDetails.ModelStoreID),
snapNoDetails().ID,
manifest.ID(bupNoDetails().ModelStoreID),
},
deleteErr: assert.AnError,
time: baseTime,
expectErr: assert.Error,
},
// Tests dealing with buffer times.
{
name: "MissingSnapshot BarelyTooYoungForCleanup Noops",
snapshots: []*manifest.EntryMetadata{
manifestWithTime(baseTime, deetsCurrent),
manifestWithTime(baseTime, deetsCurrent()),
},
backups: []backupRes{
{bup: backupWithTime(baseTime, bupCurrent)},
{bup: backupWithTime(baseTime, bupCurrent())},
},
time: baseTime.Add(24 * time.Hour),
buffer: 24 * time.Hour,
@ -474,14 +500,14 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() {
{
name: "MissingSnapshot BarelyOldEnough CausesCleanup",
snapshots: []*manifest.EntryMetadata{
manifestWithTime(baseTime, deetsCurrent),
manifestWithTime(baseTime, deetsCurrent()),
},
backups: []backupRes{
{bup: backupWithTime(baseTime, bupCurrent)},
{bup: backupWithTime(baseTime, bupCurrent())},
},
expectDeleteIDs: []manifest.ID{
deetsCurrent.ID,
manifest.ID(bupCurrent.ModelStoreID),
deetsCurrent().ID,
manifest.ID(bupCurrent().ModelStoreID),
},
time: baseTime.Add((24 * time.Hour) + time.Second),
buffer: 24 * time.Hour,
@ -490,12 +516,12 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() {
{
name: "BackupGetErrorNotFound TooYoung Noops",
snapshots: []*manifest.EntryMetadata{
manifestWithTime(baseTime, snapCurrent),
manifestWithTime(baseTime, deetsCurrent),
manifestWithTime(baseTime, snapCurrent()),
manifestWithTime(baseTime, deetsCurrent()),
},
backups: []backupRes{
{
bup: backupWithTime(baseTime, bupCurrent),
bup: backupWithTime(baseTime, bupCurrent()),
err: data.ErrNotFound,
},
},

View File

@ -25,6 +25,7 @@ import (
"github.com/alcionai/corso/src/internal/data"
dataMock "github.com/alcionai/corso/src/internal/data/mock"
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
m365Mock "github.com/alcionai/corso/src/internal/m365/mock"
exchMock "github.com/alcionai/corso/src/internal/m365/service/exchange/mock"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/backup/details"
@ -1128,10 +1129,10 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() {
streams = append(streams, ms)
}
mc := &mockBackupCollection{
path: storePath,
loc: locPath,
streams: streams,
mc := &m365Mock.BackupCollection{
Path: storePath,
Loc: locPath,
Streams: streams,
}
return []data.BackupCollection{mc}
@ -1155,11 +1156,11 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() {
ItemInfo: details.ItemInfo{OneDrive: &info},
}
mc := &mockBackupCollection{
path: storePath,
loc: locPath,
streams: []data.Item{ms},
state: data.NotMovedState,
mc := &m365Mock.BackupCollection{
Path: storePath,
Loc: locPath,
Streams: []data.Item{ms},
CState: data.NotMovedState,
}
return []data.BackupCollection{mc}
@ -1293,48 +1294,6 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
testForFiles(t, ctx, expected, result)
}
// TODO(pandeyabs): Switch to m365/mock/BackupCollection.
type mockBackupCollection struct {
path path.Path
loc *path.Builder
streams []data.Item
state data.CollectionState
}
func (c *mockBackupCollection) Items(context.Context, *fault.Bus) <-chan data.Item {
res := make(chan data.Item)
go func() {
defer close(res)
for _, s := range c.streams {
res <- s
}
}()
return res
}
func (c mockBackupCollection) FullPath() path.Path {
return c.path
}
func (c mockBackupCollection) PreviousPath() path.Path {
return c.path
}
func (c mockBackupCollection) LocationPath() *path.Builder {
return c.loc
}
func (c mockBackupCollection) State() data.CollectionState {
return c.state
}
func (c mockBackupCollection) DoNotMergeItems() bool {
return false
}
func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
t := suite.T()
@ -1343,10 +1302,10 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
r := NewReason(testTenant, testUser, path.ExchangeService, path.EmailCategory)
collections := []data.BackupCollection{
&mockBackupCollection{
path: suite.storePath1,
loc: loc1,
streams: []data.Item{
&m365Mock.BackupCollection{
Path: suite.storePath1,
Loc: loc1,
Streams: []data.Item{
&dataMock.Item{
ItemID: testFileName,
Reader: io.NopCloser(bytes.NewReader(testFileData)),
@ -1359,10 +1318,10 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
},
},
},
&mockBackupCollection{
path: suite.storePath2,
loc: loc2,
streams: []data.Item{
&m365Mock.BackupCollection{
Path: suite.storePath2,
Loc: loc2,
Streams: []data.Item{
&dataMock.Item{
ItemID: testFileName3,
Reader: io.NopCloser(bytes.NewReader(testFileData3)),
@ -1603,11 +1562,11 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
for _, parent := range []path.Path{suite.testPath1, suite.testPath2} {
loc := path.Builder{}.Append(parent.Folders()...)
collection := &mockBackupCollection{path: parent, loc: loc}
collection := &m365Mock.BackupCollection{Path: parent, Loc: loc}
for _, item := range suite.files[parent.String()] {
collection.streams = append(
collection.streams,
collection.Streams = append(
collection.Streams,
&dataMock.Item{
ItemID: item.itemPath.Item(),
Reader: io.NopCloser(bytes.NewReader(item.data)),

View File

@ -156,6 +156,17 @@ func (ctrl *Controller) IsBackupRunnable(
service path.ServiceType,
resourceOwner string,
) (bool, error) {
if service == path.GroupsService {
_, err := ctrl.AC.Groups().GetByID(ctx, resourceOwner)
if err != nil {
// TODO(meain): check for error message in case groups are
// not enabled at all similar to sharepoint
return false, err
}
return true, nil
}
if service == path.SharePointService {
_, err := ctrl.AC.Sites().GetRoot(ctx)
if err != nil {
@ -181,7 +192,7 @@ func (ctrl *Controller) IsBackupRunnable(
return true, nil
}
func verifyBackupInputs(sels selectors.Selector, siteIDs []string) error {
func verifyBackupInputs(sels selectors.Selector, cachedIDs []string) error {
var ids []string
switch sels.Service {
@ -189,16 +200,13 @@ func verifyBackupInputs(sels selectors.Selector, siteIDs []string) error {
// Exchange and OneDrive user existence now checked in checkServiceEnabled.
return nil
case selectors.ServiceGroups:
// TODO(meain): check for group existence.
return nil
case selectors.ServiceSharePoint:
ids = siteIDs
case selectors.ServiceSharePoint, selectors.ServiceGroups:
ids = cachedIDs
}
if !filters.Contains(ids).Compare(sels.ID()) {
return clues.Stack(graph.ErrResourceOwnerNotFound).With("missing_protected_resource", sels.DiscreteOwner)
return clues.Stack(graph.ErrResourceOwnerNotFound).
With("selector_protected_resource", sels.DiscreteOwner)
}
return nil

View File

@ -100,6 +100,7 @@ func (h groupBackupHandler) NewLocationIDer(
driveID string,
elems ...string,
) details.LocationIDer {
// TODO(meain): path fixes
return details.NewSharePointLocationIDer(driveID, elems...)
}
@ -124,7 +125,6 @@ func (h groupBackupHandler) IsAllPass() bool {
func (h groupBackupHandler) IncludesDir(dir string) bool {
// TODO(meain)
// return h.scope.Matches(selectors.SharePointGroupFolder, dir)
return true
}
@ -138,7 +138,7 @@ func augmentGroupItemInfo(
size int64,
parentPath *path.Builder,
) details.ItemInfo {
var driveName, driveID, creatorEmail string
var driveName, driveID, creatorEmail, siteID, weburl string
// TODO: we rely on this info for details/restore lookups,
// so if it's nil we have an issue, and will need an alternative
@ -159,15 +159,15 @@ func augmentGroupItemInfo(
}
}
// gsi := item.GetSharepointIds()
// if gsi != nil {
// siteID = ptr.Val(gsi.GetSiteId())
// weburl = ptr.Val(gsi.GetSiteUrl())
gsi := item.GetSharepointIds()
if gsi != nil {
siteID = ptr.Val(gsi.GetSiteId())
weburl = ptr.Val(gsi.GetSiteUrl())
// if len(weburl) == 0 {
// weburl = constructWebURL(item.GetAdditionalData())
// }
// }
if len(weburl) == 0 {
weburl = constructWebURL(item.GetAdditionalData())
}
}
if item.GetParentReference() != nil {
driveID = ptr.Val(item.GetParentReference().GetDriveId())
@ -179,6 +179,7 @@ func augmentGroupItemInfo(
pps = parentPath.String()
}
// TODO: Add channel name and ID
dii.Groups = &details.GroupsInfo{
Created: ptr.Val(item.GetCreatedDateTime()),
DriveID: driveID,
@ -189,6 +190,8 @@ func augmentGroupItemInfo(
Owner: creatorEmail,
ParentPath: pps,
Size: size,
SiteID: siteID,
WebURL: weburl,
}
dii.Extension = &details.ExtensionData{}

View File

@ -11,24 +11,48 @@ import (
// NewGroupsLocationIDer builds a LocationIDer for the groups.
func NewGroupsLocationIDer(
category path.CategoryType,
driveID string,
escapedFolders ...string,
) uniqueLoc {
// TODO: implement
return uniqueLoc{}
) (uniqueLoc, error) {
// TODO(meain): path fixes
if err := path.ValidateServiceAndCategory(path.GroupsService, category); err != nil {
return uniqueLoc{}, clues.Wrap(err, "making groups LocationIDer")
}
pb := path.Builder{}.Append(category.String())
prefixElems := 1
if driveID != "" { // non sp paths don't have driveID
pb.Append(driveID)
prefixElems = 2
}
pb.Append(escapedFolders...)
return uniqueLoc{pb, prefixElems}, nil
}
// GroupsInfo describes a groups item
type GroupsInfo struct {
Created time.Time `json:"created,omitempty"`
DriveName string `json:"driveName,omitempty"`
DriveID string `json:"driveID,omitempty"`
ItemName string `json:"itemName,omitempty"`
ItemType ItemType `json:"itemType,omitempty"`
Modified time.Time `json:"modified,omitempty"`
Owner string `json:"owner,omitempty"`
ParentPath string `json:"parentPath,omitempty"`
Size int64 `json:"size,omitempty"`
// Channels Specific
ChannelName string `json:"channelName,omitempty"`
ChannelID string `json:"channelID,omitempty"`
// SharePoint specific
DriveName string `json:"driveName,omitempty"`
DriveID string `json:"driveID,omitempty"`
SiteID string `json:"siteID,omitempty"`
WebURL string `json:"webURL,omitempty"`
}
// Headers returns the human-readable names of properties in a SharePointInfo
@ -51,9 +75,27 @@ func (i *GroupsInfo) UpdateParentPath(newLocPath *path.Builder) {
}
func (i *GroupsInfo) uniqueLocation(baseLoc *path.Builder) (*uniqueLoc, error) {
return nil, clues.New("not yet implemented")
var category path.CategoryType
switch i.ItemType {
case SharePointLibrary:
category = path.LibrariesCategory
if len(i.DriveID) == 0 {
return nil, clues.New("empty drive ID")
}
}
loc, err := NewGroupsLocationIDer(category, i.DriveID, baseLoc.Elements()...)
return &loc, err
}
func (i *GroupsInfo) updateFolder(f *FolderInfo) error {
return clues.New("not yet implemented")
// TODO(meain): path updates if any
if i.ItemType == SharePointLibrary {
return updateFolderWithinDrive(SharePointLibrary, i.DriveName, i.DriveID, f)
}
return clues.New("unsupported ItemType for GroupsInfo").With("item_type", i.ItemType)
}

View File

@ -28,7 +28,7 @@ const (
ExchangeMail ItemType = 3
// SharePoint (10x)
SharePointLibrary ItemType = 101
SharePointLibrary ItemType = 101 // also used for groups
SharePointList ItemType = 102
SharePointPage ItemType = 103

View File

@ -6,6 +6,7 @@ import (
"github.com/alcionai/clues"
"github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/alcionai/corso/src/internal/common/idname"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/fault"
@ -80,7 +81,7 @@ func getAllGroups(
// helpers
// ---------------------------------------------------------------------------
// parseUser extracts information from `models.Groupable` we care about
// parseGroup extracts information from `models.Groupable` we care about
func parseGroup(ctx context.Context, mg models.Groupable) (*Group, error) {
if mg.GetDisplayName() == nil {
return nil, clues.New("group missing display name").
@ -95,3 +96,23 @@ func parseGroup(ctx context.Context, mg models.Groupable) (*Group, error) {
return u, nil
}
// GroupsMap retrieves an id-name cache of all groups in the tenant.
func GroupsMap(
ctx context.Context,
acct account.Account,
errs *fault.Bus,
) (idname.Cacher, error) {
groups, err := Groups(ctx, acct, errs)
if err != nil {
return idname.NewCache(nil), err
}
itn := make(map[string]string, len(groups))
for _, s := range groups {
itn[s.ID] = s.DisplayName
}
return idname.NewCache(itn), nil
}

View File

@ -68,6 +68,31 @@ func (suite *GroupsIntgSuite) TestGroups() {
}
}
func (suite *GroupsIntgSuite) TestGroupsMap() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
graph.InitializeConcurrencyLimiter(ctx, true, 4)
gm, err := m365.GroupsMap(ctx, suite.acct, fault.New(true))
assert.NoError(t, err, clues.ToCore(err))
assert.NotEmpty(t, gm)
for _, gid := range gm.IDs() {
suite.Run("group_"+gid, func() {
t := suite.T()
assert.NotEmpty(t, gid)
name, ok := gm.NameOf(gid)
assert.True(t, ok)
assert.NotEmpty(t, name)
})
}
}
func (suite *GroupsIntgSuite) TestGroups_InvalidCredentials() {
table := []struct {
name string