diff --git a/src/internal/kopia/backup_bases.go b/src/internal/kopia/backup_bases.go index 6e5d412d7..f18515d8f 100644 --- a/src/internal/kopia/backup_bases.go +++ b/src/internal/kopia/backup_bases.go @@ -46,6 +46,9 @@ type BackupBases interface { // MinBackupVersion returns the lowest version of all merge backups in the // BackupBases. MinBackupVersion() int + // MinAssisttVersion returns the lowest version of all assist backups in the + // BackupBases. + MinAssistVersion() int // MergeBackupBases takes another BackupBases and merges it's contained assist // and merge bases into this BackupBases. The passed in BackupBases is // considered an older alternative to this BackupBases meaning bases from @@ -119,6 +122,22 @@ func (bb *backupBases) MinBackupVersion() int { return min } +func (bb *backupBases) MinAssistVersion() int { + min := version.NoBackup + + if bb == nil { + return min + } + + for _, base := range bb.assistBases { + if min == version.NoBackup || base.Backup.Version < min { + min = base.Backup.Version + } + } + + return min +} + func (bb backupBases) MergeBases() []BackupBase { return slices.Clone(bb.mergeBases) } diff --git a/src/internal/kopia/backup_bases_test.go b/src/internal/kopia/backup_bases_test.go index d2af11a7d..f6dea79b4 100644 --- a/src/internal/kopia/backup_bases_test.go +++ b/src/internal/kopia/backup_bases_test.go @@ -92,20 +92,23 @@ func TestBackupBasesUnitSuite(t *testing.T) { suite.Run(t, &BackupBasesUnitSuite{Suite: tester.NewUnitSuite(t)}) } -func (suite *BackupBasesUnitSuite) TestMinBackupVersion() { +func (suite *BackupBasesUnitSuite) TestBackupBases_minVersions() { table := []struct { - name string - bb *backupBases - expectedVersion int + name string + bb *backupBases + expectedBackupVersion int + expectedAssistVersion int }{ { - name: "Nil BackupBase", - expectedVersion: version.NoBackup, + name: "Nil BackupBase", + expectedBackupVersion: version.NoBackup, + expectedAssistVersion: version.NoBackup, }, { - name: "No Backups", - bb: &backupBases{}, - expectedVersion: version.NoBackup, + name: "No Backups", + bb: &backupBases{}, + expectedBackupVersion: version.NoBackup, + expectedAssistVersion: version.NoBackup, }, { name: "Unsorted Backups", @@ -128,7 +131,8 @@ func (suite *BackupBasesUnitSuite) TestMinBackupVersion() { }, }, }, - expectedVersion: 0, + expectedBackupVersion: 0, + expectedAssistVersion: version.NoBackup, }, { name: "Only Assist Bases", @@ -151,12 +155,97 @@ func (suite *BackupBasesUnitSuite) TestMinBackupVersion() { }, }, }, - expectedVersion: version.NoBackup, + expectedBackupVersion: version.NoBackup, + expectedAssistVersion: 0, + }, + { + name: "Assist and Merge Bases, min merge", + bb: &backupBases{ + mergeBases: []BackupBase{ + { + Backup: &backup.Backup{ + Version: 1, + }, + }, + { + Backup: &backup.Backup{ + Version: 5, + }, + }, + { + Backup: &backup.Backup{ + Version: 3, + }, + }, + }, + assistBases: []BackupBase{ + { + Backup: &backup.Backup{ + Version: 4, + }, + }, + { + Backup: &backup.Backup{ + Version: 2, + }, + }, + { + Backup: &backup.Backup{ + Version: 6, + }, + }, + }, + }, + expectedBackupVersion: 1, + expectedAssistVersion: 2, + }, + { + name: "Assist and Merge Bases, min assist", + bb: &backupBases{ + mergeBases: []BackupBase{ + { + Backup: &backup.Backup{ + Version: 7, + }, + }, + { + Backup: &backup.Backup{ + Version: 5, + }, + }, + { + Backup: &backup.Backup{ + Version: 3, + }, + }, + }, + assistBases: []BackupBase{ + { + Backup: &backup.Backup{ + Version: 4, + }, + }, + { + Backup: &backup.Backup{ + Version: 2, + }, + }, + { + Backup: &backup.Backup{ + Version: 6, + }, + }, + }, + }, + expectedBackupVersion: 3, + expectedAssistVersion: 2, }, } for _, test := range table { suite.Run(test.name, func() { - assert.Equal(suite.T(), test.expectedVersion, test.bb.MinBackupVersion()) + t := suite.T() + assert.Equal(t, test.expectedBackupVersion, test.bb.MinBackupVersion(), "backup") + assert.Equal(t, test.expectedAssistVersion, test.bb.MinAssistVersion(), "assist") }) } } diff --git a/src/internal/m365/onedrive_test.go b/src/internal/m365/onedrive_test.go index 6617f1bfa..45ab69521 100644 --- a/src/internal/m365/onedrive_test.go +++ b/src/internal/m365/onedrive_test.go @@ -228,7 +228,7 @@ func (suite *SharePointIntegrationSuite) TestLinkSharesInheritanceRestoreAndBack func (suite *SharePointIntegrationSuite) TestRestoreFolderNamedFolderRegression() { // No reason why it couldn't work with previous versions, but this is when it got introduced. - testRestoreFolderNamedFolderRegression(suite, version.All8MigrateUserPNToID) + testRestoreFolderNamedFolderRegression(suite, version.Backup) } // --------------------------------------------------------------------------- @@ -292,7 +292,7 @@ func (suite *OneDriveIntegrationSuite) TestLinkSharesInheritanceRestoreAndBackup func (suite *OneDriveIntegrationSuite) TestRestoreFolderNamedFolderRegression() { // No reason why it couldn't work with previous versions, but this is when it got introduced. - testRestoreFolderNamedFolderRegression(suite, version.All8MigrateUserPNToID) + testRestoreFolderNamedFolderRegression(suite, version.Backup) } // --------------------------------------------------------------------------- diff --git a/src/internal/m365/service/onedrive/restore_test.go b/src/internal/m365/service/onedrive/restore_test.go index 0af13eccb..494a22709 100644 --- a/src/internal/m365/service/onedrive/restore_test.go +++ b/src/internal/m365/service/onedrive/restore_test.go @@ -24,7 +24,7 @@ func TestRestoreUnitSuite(t *testing.T) { func (suite *RestoreUnitSuite) TestAugmentRestorePaths() { // Adding a simple test here so that we can be sure that this // function gets updated whenever we add a new version. - require.LessOrEqual(suite.T(), version.Backup, version.All8MigrateUserPNToID, "unsupported backup version") + require.LessOrEqual(suite.T(), version.Backup, version.Groups9Update, "unsupported backup version") table := []struct { name string @@ -216,7 +216,7 @@ func (suite *RestoreUnitSuite) TestAugmentRestorePaths() { func (suite *RestoreUnitSuite) TestAugmentRestorePaths_DifferentRestorePath() { // Adding a simple test here so that we can be sure that this // function gets updated whenever we add a new version. - require.LessOrEqual(suite.T(), version.Backup, version.All8MigrateUserPNToID, "unsupported backup version") + require.LessOrEqual(suite.T(), version.Backup, version.Groups9Update, "unsupported backup version") type pathPair struct { storage string diff --git a/src/internal/m365/service/onedrive/stub/stub.go b/src/internal/m365/service/onedrive/stub/stub.go index 779cc7242..e1df35c13 100644 --- a/src/internal/m365/service/onedrive/stub/stub.go +++ b/src/internal/m365/service/onedrive/stub/stub.go @@ -217,7 +217,8 @@ func (c *collection) withFile(name string, fileData []byte, meta MetaData) (*col c.Aux = append(c.Aux, md) // v6+ current metadata design - case version.OneDrive6NameInMeta, version.OneDrive7LocationRef, version.All8MigrateUserPNToID: + case version.OneDrive6NameInMeta, version.OneDrive7LocationRef, + version.All8MigrateUserPNToID, version.Groups9Update: item, err := FileWithData( name+metadata.DataFileSuffix, name+metadata.DataFileSuffix, @@ -251,7 +252,8 @@ func (c *collection) withFile(name string, fileData []byte, meta MetaData) (*col func (c *collection) withFolder(name string, meta MetaData) (*collection, error) { switch c.BackupVersion { case 0, version.OneDrive4DirIncludesPermissions, version.OneDrive5DirMetaNoName, - version.OneDrive6NameInMeta, version.OneDrive7LocationRef, version.All8MigrateUserPNToID: + version.OneDrive6NameInMeta, version.OneDrive7LocationRef, + version.All8MigrateUserPNToID, version.Groups9Update: return c, nil case version.OneDrive1DataAndMetaFiles, 2, version.OneDrive3IsMetaMarker: diff --git a/src/internal/operations/backup.go b/src/internal/operations/backup.go index 1185bbec1..32bdbb236 100644 --- a/src/internal/operations/backup.go +++ b/src/internal/operations/backup.go @@ -371,6 +371,34 @@ func (op *BackupOperation) do( return nil, clues.Wrap(err, "producing manifests and metadata") } + // Force full backups if the base is an older corso version. Those backups + // don't have all the data we want to pull forward. + // + // TODO(ashmrtn): We can push this check further down the stack to either: + // * the metadata fetch code to disable individual bases (requires a + // function to completely remove a base from the set) + // * the base finder code to skip over older bases (breaks isolation a bit + // by requiring knowledge of good/bad backup versions for different + // services) + if op.Selectors.PathService() == path.GroupsService { + if mans.MinBackupVersion() != version.NoBackup && + mans.MinBackupVersion() < version.Groups9Update { + logger.Ctx(ctx).Info("dropping merge bases due to groups version change") + + mans.DisableMergeBases() + mans.DisableAssistBases() + + canUseMetadata = false + mdColls = nil + } + + if mans.MinAssistVersion() != version.NoBackup && + mans.MinAssistVersion() < version.Groups9Update { + logger.Ctx(ctx).Info("disabling assist bases due to groups version change") + mans.DisableAssistBases() + } + } + ctx = clues.Add( ctx, "can_use_metadata", canUseMetadata, diff --git a/src/internal/operations/pathtransformer/restore_path_transformer_test.go b/src/internal/operations/pathtransformer/restore_path_transformer_test.go index a3a9a4b1c..5dc637e15 100644 --- a/src/internal/operations/pathtransformer/restore_path_transformer_test.go +++ b/src/internal/operations/pathtransformer/restore_path_transformer_test.go @@ -61,6 +61,40 @@ func (suite *RestorePathTransformerUnitSuite) TestGetPaths() { expectErr assert.ErrorAssertionFunc expected []expectPaths }{ + { + name: "Groups List Errors v9", + // No version bump for the change so we always have to check for this. + backupVersion: version.Groups9Update, + input: []*details.Entry{ + { + RepoRef: GroupsRootItemPath.RR.String(), + LocationRef: GroupsRootItemPath.Loc.String(), + ItemInfo: details.ItemInfo{ + Groups: &details.GroupsInfo{ + ItemType: details.SharePointList, + }, + }, + }, + }, + expectErr: assert.Error, + }, + { + name: "Groups Page Errors v9", + // No version bump for the change so we always have to check for this. + backupVersion: version.Groups9Update, + input: []*details.Entry{ + { + RepoRef: GroupsRootItemPath.RR.String(), + LocationRef: GroupsRootItemPath.Loc.String(), + ItemInfo: details.ItemInfo{ + Groups: &details.GroupsInfo{ + ItemType: details.SharePointPage, + }, + }, + }, + }, + expectErr: assert.Error, + }, { name: "Groups List Errors", // No version bump for the change so we always have to check for this. diff --git a/src/internal/operations/test/exchange_test.go b/src/internal/operations/test/exchange_test.go index 1947eb4bf..7439c04ae 100644 --- a/src/internal/operations/test/exchange_test.go +++ b/src/internal/operations/test/exchange_test.go @@ -231,6 +231,113 @@ func (suite *ExchangeBackupIntgSuite) TestBackup_Run_exchange() { } } +func (suite *ExchangeBackupIntgSuite) TestBackup_Run_exchangeBasic_groups9VersionBump() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + var ( + mb = evmock.NewBus() + sel = selectors.NewExchangeBackup([]string{suite.its.user.ID}) + opts = control.DefaultOptions() + ws = deeTD.DriveIDFromRepoRef + ) + + sel.Include( + sel.ContactFolders([]string{api.DefaultContacts}, selectors.PrefixMatch()), + // sel.EventCalendars([]string{api.DefaultCalendar}, selectors.PrefixMatch()), + sel.MailFolders([]string{api.MailInbox}, selectors.PrefixMatch())) + + bo, bod := prepNewTestBackupOp( + t, + ctx, + mb, + sel.Selector, + opts, + version.All8MigrateUserPNToID) + defer bod.close(t, ctx) + + runAndCheckBackup(t, ctx, &bo, mb, false) + checkBackupIsInManifests( + t, + ctx, + bod.kw, + bod.sw, + &bo, + bod.sel, + bod.sel.ID(), + path.EmailCategory) + + _, expectDeets := deeTD.GetDeetsInBackup( + t, + ctx, + bo.Results.BackupID, + bod.acct.ID(), + bod.sel.ID(), + path.ExchangeService, + ws, + bod.kms, + bod.sss) + deeTD.CheckBackupDetails( + t, + ctx, + bo.Results.BackupID, + ws, + bod.kms, + bod.sss, + expectDeets, + false) + + mb = evmock.NewBus() + notForcedFull := newTestBackupOp( + t, + ctx, + bod, + mb, + opts) + notForcedFull.BackupVersion = version.Groups9Update + + runAndCheckBackup(t, ctx, ¬ForcedFull, mb, false) + checkBackupIsInManifests( + t, + ctx, + bod.kw, + bod.sw, + ¬ForcedFull, + bod.sel, + bod.sel.ID(), + path.EmailCategory) + + _, expectDeets = deeTD.GetDeetsInBackup( + t, + ctx, + notForcedFull.Results.BackupID, + bod.acct.ID(), + bod.sel.ID(), + path.ExchangeService, + ws, + bod.kms, + bod.sss) + deeTD.CheckBackupDetails( + t, + ctx, + notForcedFull.Results.BackupID, + ws, + bod.kms, + bod.sss, + expectDeets, + false) + + // The number of items backed up in the second backup should be less than the + // number of items in the original backup. + assert.Greater( + t, + bo.Results.Counts[string(count.PersistedNonCachedFiles)], + notForcedFull.Results.Counts[string(count.PersistedNonCachedFiles)], + "items written") +} + func (suite *ExchangeBackupIntgSuite) TestBackup_Run_incrementalExchange() { testExchangeContinuousBackups(suite, control.Toggles{}) } diff --git a/src/internal/operations/test/group_test.go b/src/internal/operations/test/group_test.go index f9a40f875..5c1b1dcd5 100644 --- a/src/internal/operations/test/group_test.go +++ b/src/internal/operations/test/group_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" evmock "github.com/alcionai/corso/src/internal/events/mock" @@ -13,6 +14,7 @@ import ( "github.com/alcionai/corso/src/internal/version" deeTD "github.com/alcionai/corso/src/pkg/backup/details/testdata" "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/count" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" selTD "github.com/alcionai/corso/src/pkg/selectors/testdata" @@ -78,6 +80,121 @@ func (suite *GroupsBackupIntgSuite) TestBackup_Run_incrementalGroups() { true) } +func (suite *GroupsBackupIntgSuite) TestBackup_Run_groupsBasic_groups9VersionBump() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + var ( + mb = evmock.NewBus() + sel = selectors.NewGroupsBackup([]string{suite.its.group.ID}) + opts = control.DefaultOptions() + whatSet = deeTD.CategoryFromRepoRef + ) + + sel.Include( + selTD.GroupsBackupLibraryFolderScope(sel), + selTD.GroupsBackupChannelScope(sel)) + + bo, bod := prepNewTestBackupOp( + t, + ctx, + mb, + sel.Selector, + opts, + version.All8MigrateUserPNToID) + defer bod.close(t, ctx) + + runAndCheckBackup(t, ctx, &bo, mb, false) + checkBackupIsInManifests( + t, + ctx, + bod.kw, + bod.sw, + &bo, + bod.sel, + bod.sel.ID(), + path.ChannelMessagesCategory) + + _, expectDeets := deeTD.GetDeetsInBackup( + t, + ctx, + bo.Results.BackupID, + bod.acct.ID(), + bod.sel.ID(), + path.GroupsService, + whatSet, + bod.kms, + bod.sss) + deeTD.CheckBackupDetails( + t, + ctx, + bo.Results.BackupID, + whatSet, + bod.kms, + bod.sss, + expectDeets, + false) + + mb = evmock.NewBus() + forcedFull := newTestBackupOp( + t, + ctx, + bod, + mb, + opts) + forcedFull.BackupVersion = version.Groups9Update + + runAndCheckBackup(t, ctx, &forcedFull, mb, false) + checkBackupIsInManifests( + t, + ctx, + bod.kw, + bod.sw, + &forcedFull, + bod.sel, + bod.sel.ID(), + path.ChannelMessagesCategory) + + _, expectDeets = deeTD.GetDeetsInBackup( + t, + ctx, + forcedFull.Results.BackupID, + bod.acct.ID(), + bod.sel.ID(), + path.GroupsService, + whatSet, + bod.kms, + bod.sss) + deeTD.CheckBackupDetails( + t, + ctx, + forcedFull.Results.BackupID, + whatSet, + bod.kms, + bod.sss, + expectDeets, + false) + + // The number of items backed up in the forced full backup should be roughly + // the same as the number of items in the original backup. + assert.Equal( + t, + bo.Results.Counts[string(count.PersistedNonCachedFiles)], + forcedFull.Results.Counts[string(count.PersistedNonCachedFiles)], + "items written") +} + +func (suite *GroupsBackupIntgSuite) TestBackup_Run_groupsVersion9AssistBases() { + sel := selectors.NewGroupsBackup([]string{suite.its.group.ID}) + sel.Include( + selTD.GroupsBackupLibraryFolderScope(sel), + selTD.GroupsBackupChannelScope(sel)) + + runDriveAssistBaseGroupsUpdate(suite, sel.Selector, false) +} + func (suite *GroupsBackupIntgSuite) TestBackup_Run_groupsBasic() { t := suite.T() diff --git a/src/internal/operations/test/helper_test.go b/src/internal/operations/test/helper_test.go index 6834ce620..a88319523 100644 --- a/src/internal/operations/test/helper_test.go +++ b/src/internal/operations/test/helper_test.go @@ -169,6 +169,7 @@ func prepNewTestBackupOp( bod, bus, opts) + bo.BackupVersion = backupVersion bod.sss = streamstore.NewStreamer( bod.kw, diff --git a/src/internal/operations/test/onedrive_test.go b/src/internal/operations/test/onedrive_test.go index 808c4675c..508d3313d 100644 --- a/src/internal/operations/test/onedrive_test.go +++ b/src/internal/operations/test/onedrive_test.go @@ -3,6 +3,8 @@ package test_test import ( "context" "fmt" + "io" + "sync/atomic" "testing" "github.com/alcionai/clues" @@ -36,6 +38,7 @@ import ( "github.com/alcionai/corso/src/pkg/control" ctrlTD "github.com/alcionai/corso/src/pkg/control/testdata" "github.com/alcionai/corso/src/pkg/count" + "github.com/alcionai/corso/src/pkg/extensions" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" @@ -107,6 +110,118 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDrive() { false) } +func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDriveBasic_groups9VersionBump() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + var ( + mb = evmock.NewBus() + userID = tconfig.SecondaryM365UserID(t) + osel = selectors.NewOneDriveBackup([]string{userID}) + ws = deeTD.DriveIDFromRepoRef + opts = control.DefaultOptions() + ) + + osel.Include(selTD.OneDriveBackupFolderScope(osel)) + + bo, bod := prepNewTestBackupOp( + t, + ctx, + mb, + osel.Selector, + opts, + version.All8MigrateUserPNToID) + defer bod.close(t, ctx) + + runAndCheckBackup(t, ctx, &bo, mb, false) + checkBackupIsInManifests( + t, + ctx, + bod.kw, + bod.sw, + &bo, + bod.sel, + bod.sel.ID(), + path.FilesCategory) + + _, expectDeets := deeTD.GetDeetsInBackup( + t, + ctx, + bo.Results.BackupID, + bod.acct.ID(), + bod.sel.ID(), + path.OneDriveService, + ws, + bod.kms, + bod.sss) + deeTD.CheckBackupDetails( + t, + ctx, + bo.Results.BackupID, + ws, + bod.kms, + bod.sss, + expectDeets, + false) + + mb = evmock.NewBus() + notForcedFull := newTestBackupOp( + t, + ctx, + bod, + mb, + opts) + notForcedFull.BackupVersion = version.Groups9Update + + runAndCheckBackup(t, ctx, ¬ForcedFull, mb, false) + checkBackupIsInManifests( + t, + ctx, + bod.kw, + bod.sw, + ¬ForcedFull, + bod.sel, + bod.sel.ID(), + path.FilesCategory) + + _, expectDeets = deeTD.GetDeetsInBackup( + t, + ctx, + notForcedFull.Results.BackupID, + bod.acct.ID(), + bod.sel.ID(), + path.OneDriveService, + ws, + bod.kms, + bod.sss) + deeTD.CheckBackupDetails( + t, + ctx, + notForcedFull.Results.BackupID, + ws, + bod.kms, + bod.sss, + expectDeets, + false) + + // The number of items backed up in the second backup should be less than the + // number of items in the original backup. + assert.Greater( + t, + bo.Results.Counts[string(count.PersistedNonCachedFiles)], + notForcedFull.Results.Counts[string(count.PersistedNonCachedFiles)], + "items written") +} + +//func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDriveVersion9AssistBases() { +// sel := selectors.NewOneDriveBackup([]string{tconfig.SecondaryM365UserID(suite.T())}) +// sel.Include(selTD.OneDriveBackupFolderScope(sel)) +// +// runDriveAssistBaseGroupsUpdate(suite, sel.Selector, true) +//} + func (suite *OneDriveBackupIntgSuite) TestBackup_Run_incrementalOneDrive() { sel := selectors.NewOneDriveRestore([]string{suite.its.user.ID}) @@ -806,6 +921,179 @@ func runDriveIncrementalTest( } } +var ( + _ io.ReadCloser = &failFirstRead{} + _ extensions.CreateItemExtensioner = &createFailFirstRead{} +) + +// failFirstRead fails the first read on a file being uploaded during a +// snapshot. Only one file is failed during the snapshot even if it the snapshot +// contains multiple files. +type failFirstRead struct { + firstFile *atomic.Bool + io.ReadCloser +} + +func (e *failFirstRead) Read(p []byte) (int, error) { + if e.firstFile.CompareAndSwap(true, false) { + // This is the first file being read, return an error for it. + return 0, clues.New("injected error for testing") + } + + return e.ReadCloser.Read(p) +} + +func newCreateSingleFileFailExtension() *createFailFirstRead { + firstItem := &atomic.Bool{} + firstItem.Store(true) + + return &createFailFirstRead{ + firstItem: firstItem, + } +} + +type createFailFirstRead struct { + firstItem *atomic.Bool +} + +func (ce *createFailFirstRead) CreateItemExtension( + _ context.Context, + r io.ReadCloser, + _ details.ItemInfo, + _ *details.ExtensionData, +) (io.ReadCloser, error) { + return &failFirstRead{ + firstFile: ce.firstItem, + ReadCloser: r, + }, nil +} + +func runDriveAssistBaseGroupsUpdate( + suite tester.Suite, + sel selectors.Selector, + expectCached bool, +) { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + var ( + whatSet = deeTD.CategoryFromRepoRef + mb = evmock.NewBus() + opts = control.DefaultOptions() + ) + + opts.ItemExtensionFactory = []extensions.CreateItemExtensioner{ + newCreateSingleFileFailExtension(), + } + + // Creating out here so bod lasts for full test and isn't closed until the + // test is compltely done. + bo, bod := prepNewTestBackupOp( + t, + ctx, + mb, + sel, + opts, + version.All8MigrateUserPNToID) + defer bod.close(t, ctx) + + suite.Run("makeAssistBackup", func() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + // Need to run manually cause runAndCheckBackup assumes success for the most + // part. + err := bo.Run(ctx) + assert.Error(t, err, clues.ToCore(err)) + assert.NotEmpty(t, bo.Results, "backup had non-zero results") + assert.NotEmpty(t, bo.Results.BackupID, "backup generated an ID") + assert.NotZero(t, bo.Results.ItemsWritten) + + // TODO(ashmrtn): Check that the base is marked as an assist base. + t.Logf("base error: %v\n", err) + }) + + // Don't run the below if we've already failed since it won't make sense + // anymore. + if suite.T().Failed() { + return + } + + suite.Run("makeIncrementalBackup", func() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + var ( + mb = evmock.NewBus() + opts = control.DefaultOptions() + ) + + forcedFull := newTestBackupOp( + t, + ctx, + bod, + mb, + opts) + forcedFull.BackupVersion = version.Groups9Update + + runAndCheckBackup(t, ctx, &forcedFull, mb, false) + + reasons, err := bod.sel.Reasons(bod.acct.ID(), false) + require.NoError(t, err, clues.ToCore(err)) + + for _, reason := range reasons { + checkBackupIsInManifests( + t, + ctx, + bod.kw, + bod.sw, + &forcedFull, + bod.sel, + bod.sel.ID(), + reason.Category()) + } + + _, expectDeets := deeTD.GetDeetsInBackup( + t, + ctx, + forcedFull.Results.BackupID, + bod.acct.ID(), + bod.sel.ID(), + bod.sel.PathService(), + whatSet, + bod.kms, + bod.sss) + deeTD.CheckBackupDetails( + t, + ctx, + forcedFull.Results.BackupID, + whatSet, + bod.kms, + bod.sss, + expectDeets, + false) + + // For groups the forced full backup shouldn't have any cached items. For + // OneDrive and SharePoint it should since they shouldn't be forcing full + // backups. + cachedCheck := assert.NotZero + if !expectCached { + cachedCheck = assert.Zero + } + + cachedCheck( + t, + forcedFull.Results.Counts[string(count.PersistedCachedFiles)], + "kopia cached items") + }) +} + func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDriveOwnerMigration() { t := suite.T() diff --git a/src/internal/operations/test/sharepoint_test.go b/src/internal/operations/test/sharepoint_test.go index fa28bb747..28306cd57 100644 --- a/src/internal/operations/test/sharepoint_test.go +++ b/src/internal/operations/test/sharepoint_test.go @@ -46,6 +46,117 @@ func (suite *SharePointBackupIntgSuite) SetupSuite() { suite.its = newIntegrationTesterSetup(suite.T()) } +func (suite *SharePointBackupIntgSuite) TestBackup_Run_sharePointBasic_groups9VersionBump() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + var ( + mb = evmock.NewBus() + sel = selectors.NewSharePointBackup([]string{suite.its.site.ID}) + opts = control.DefaultOptions() + ws = deeTD.DriveIDFromRepoRef + ) + + sel.Include(selTD.SharePointBackupFolderScope(sel)) + + bo, bod := prepNewTestBackupOp( + t, + ctx, + mb, + sel.Selector, + opts, + version.All8MigrateUserPNToID) + defer bod.close(t, ctx) + + runAndCheckBackup(t, ctx, &bo, mb, false) + checkBackupIsInManifests( + t, + ctx, + bod.kw, + bod.sw, + &bo, + bod.sel, + bod.sel.ID(), + path.LibrariesCategory) + + _, expectDeets := deeTD.GetDeetsInBackup( + t, + ctx, + bo.Results.BackupID, + bod.acct.ID(), + bod.sel.ID(), + path.SharePointService, + ws, + bod.kms, + bod.sss) + deeTD.CheckBackupDetails( + t, + ctx, + bo.Results.BackupID, + ws, + bod.kms, + bod.sss, + expectDeets, + false) + + mb = evmock.NewBus() + notForcedFull := newTestBackupOp( + t, + ctx, + bod, + mb, + opts) + notForcedFull.BackupVersion = version.Groups9Update + + runAndCheckBackup(t, ctx, ¬ForcedFull, mb, false) + checkBackupIsInManifests( + t, + ctx, + bod.kw, + bod.sw, + ¬ForcedFull, + bod.sel, + bod.sel.ID(), + path.LibrariesCategory) + + _, expectDeets = deeTD.GetDeetsInBackup( + t, + ctx, + notForcedFull.Results.BackupID, + bod.acct.ID(), + bod.sel.ID(), + path.SharePointService, + ws, + bod.kms, + bod.sss) + deeTD.CheckBackupDetails( + t, + ctx, + notForcedFull.Results.BackupID, + ws, + bod.kms, + bod.sss, + expectDeets, + false) + + // The number of items backed up in the second backup should be less than the + // number of items in the original backup. + assert.Greater( + t, + bo.Results.Counts[string(count.PersistedNonCachedFiles)], + notForcedFull.Results.Counts[string(count.PersistedNonCachedFiles)], + "items written") +} + +func (suite *SharePointBackupIntgSuite) TestBackup_Run_sharePointVersion9AssistBases() { + sel := selectors.NewSharePointBackup([]string{suite.its.site.ID}) + sel.Include(selTD.SharePointBackupFolderScope(sel)) + + runDriveAssistBaseGroupsUpdate(suite, sel.Selector, true) +} + func (suite *SharePointBackupIntgSuite) TestBackup_Run_incrementalSharePoint() { sel := selectors.NewSharePointRestore([]string{suite.its.site.ID}) diff --git a/src/internal/version/backup.go b/src/internal/version/backup.go index 7dbcc6718..68011867d 100644 --- a/src/internal/version/backup.go +++ b/src/internal/version/backup.go @@ -1,6 +1,6 @@ package version -const Backup = 8 +const Backup = Groups9Update // Various labels to refer to important version changes. // Labels don't need 1:1 service:version representation. Add a new @@ -46,6 +46,10 @@ const ( // All8MigrateUserPNToID marks when we migrated repo refs from the user's // PrincipalName to their ID for stability. All8MigrateUserPNToID = 8 + + // Groups9Update marks when we updated the details that groups and teams use. + // Older backups don't contain all the info we want in details. + Groups9Update = 9 ) // IsNoBackup returns true if the version implies that no prior backup exists.