Groups version bump (#4561)

Bump the backup version and force a full backup if
there's a backup for teams/groups that has base(s)
from an older version of corso

This will avoid propagating older details formats
forward. Those formats don't have all the data
newer formats do

This is mostly a stop-gap, a more robust solution
can be added later

Manually tested that it forces a full backup

---

#### Does this PR need a docs update or release note?

- [ ]  Yes, it's included
- [ ] 🕐 Yes, but in a later PR
- [x]  No

#### Type of change

- [ ] 🌻 Feature
- [x] 🐛 Bugfix
- [ ] 🗺️ Documentation
- [ ] 🤖 Supportability/Tests
- [ ] 💻 CI/Deployment
- [ ] 🧹 Tech Debt/Cleanup

#### Issue(s)

* #4569

#### Test Plan

- [x] 💪 Manual
- [ ]  Unit test
- [ ] 💚 E2E
This commit is contained in:
ashmrtn 2023-10-27 12:37:39 -07:00 committed by GitHub
parent fd887f4d04
commit f61448d650
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 819 additions and 19 deletions

View File

@ -46,6 +46,9 @@ type BackupBases interface {
// MinBackupVersion returns the lowest version of all merge backups in the // MinBackupVersion returns the lowest version of all merge backups in the
// BackupBases. // BackupBases.
MinBackupVersion() int MinBackupVersion() int
// MinAssisttVersion returns the lowest version of all assist backups in the
// BackupBases.
MinAssistVersion() int
// MergeBackupBases takes another BackupBases and merges it's contained assist // MergeBackupBases takes another BackupBases and merges it's contained assist
// and merge bases into this BackupBases. The passed in BackupBases is // and merge bases into this BackupBases. The passed in BackupBases is
// considered an older alternative to this BackupBases meaning bases from // considered an older alternative to this BackupBases meaning bases from
@ -119,6 +122,22 @@ func (bb *backupBases) MinBackupVersion() int {
return min return min
} }
func (bb *backupBases) MinAssistVersion() int {
min := version.NoBackup
if bb == nil {
return min
}
for _, base := range bb.assistBases {
if min == version.NoBackup || base.Backup.Version < min {
min = base.Backup.Version
}
}
return min
}
func (bb backupBases) MergeBases() []BackupBase { func (bb backupBases) MergeBases() []BackupBase {
return slices.Clone(bb.mergeBases) return slices.Clone(bb.mergeBases)
} }

View File

@ -92,20 +92,23 @@ func TestBackupBasesUnitSuite(t *testing.T) {
suite.Run(t, &BackupBasesUnitSuite{Suite: tester.NewUnitSuite(t)}) suite.Run(t, &BackupBasesUnitSuite{Suite: tester.NewUnitSuite(t)})
} }
func (suite *BackupBasesUnitSuite) TestMinBackupVersion() { func (suite *BackupBasesUnitSuite) TestBackupBases_minVersions() {
table := []struct { table := []struct {
name string name string
bb *backupBases bb *backupBases
expectedVersion int expectedBackupVersion int
expectedAssistVersion int
}{ }{
{ {
name: "Nil BackupBase", name: "Nil BackupBase",
expectedVersion: version.NoBackup, expectedBackupVersion: version.NoBackup,
expectedAssistVersion: version.NoBackup,
}, },
{ {
name: "No Backups", name: "No Backups",
bb: &backupBases{}, bb: &backupBases{},
expectedVersion: version.NoBackup, expectedBackupVersion: version.NoBackup,
expectedAssistVersion: version.NoBackup,
}, },
{ {
name: "Unsorted Backups", name: "Unsorted Backups",
@ -128,7 +131,8 @@ func (suite *BackupBasesUnitSuite) TestMinBackupVersion() {
}, },
}, },
}, },
expectedVersion: 0, expectedBackupVersion: 0,
expectedAssistVersion: version.NoBackup,
}, },
{ {
name: "Only Assist Bases", name: "Only Assist Bases",
@ -151,12 +155,97 @@ func (suite *BackupBasesUnitSuite) TestMinBackupVersion() {
}, },
}, },
}, },
expectedVersion: version.NoBackup, expectedBackupVersion: version.NoBackup,
expectedAssistVersion: 0,
},
{
name: "Assist and Merge Bases, min merge",
bb: &backupBases{
mergeBases: []BackupBase{
{
Backup: &backup.Backup{
Version: 1,
},
},
{
Backup: &backup.Backup{
Version: 5,
},
},
{
Backup: &backup.Backup{
Version: 3,
},
},
},
assistBases: []BackupBase{
{
Backup: &backup.Backup{
Version: 4,
},
},
{
Backup: &backup.Backup{
Version: 2,
},
},
{
Backup: &backup.Backup{
Version: 6,
},
},
},
},
expectedBackupVersion: 1,
expectedAssistVersion: 2,
},
{
name: "Assist and Merge Bases, min assist",
bb: &backupBases{
mergeBases: []BackupBase{
{
Backup: &backup.Backup{
Version: 7,
},
},
{
Backup: &backup.Backup{
Version: 5,
},
},
{
Backup: &backup.Backup{
Version: 3,
},
},
},
assistBases: []BackupBase{
{
Backup: &backup.Backup{
Version: 4,
},
},
{
Backup: &backup.Backup{
Version: 2,
},
},
{
Backup: &backup.Backup{
Version: 6,
},
},
},
},
expectedBackupVersion: 3,
expectedAssistVersion: 2,
}, },
} }
for _, test := range table { for _, test := range table {
suite.Run(test.name, func() { suite.Run(test.name, func() {
assert.Equal(suite.T(), test.expectedVersion, test.bb.MinBackupVersion()) t := suite.T()
assert.Equal(t, test.expectedBackupVersion, test.bb.MinBackupVersion(), "backup")
assert.Equal(t, test.expectedAssistVersion, test.bb.MinAssistVersion(), "assist")
}) })
} }
} }

View File

@ -228,7 +228,7 @@ func (suite *SharePointIntegrationSuite) TestLinkSharesInheritanceRestoreAndBack
func (suite *SharePointIntegrationSuite) TestRestoreFolderNamedFolderRegression() { func (suite *SharePointIntegrationSuite) TestRestoreFolderNamedFolderRegression() {
// No reason why it couldn't work with previous versions, but this is when it got introduced. // No reason why it couldn't work with previous versions, but this is when it got introduced.
testRestoreFolderNamedFolderRegression(suite, version.All8MigrateUserPNToID) testRestoreFolderNamedFolderRegression(suite, version.Backup)
} }
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@ -292,7 +292,7 @@ func (suite *OneDriveIntegrationSuite) TestLinkSharesInheritanceRestoreAndBackup
func (suite *OneDriveIntegrationSuite) TestRestoreFolderNamedFolderRegression() { func (suite *OneDriveIntegrationSuite) TestRestoreFolderNamedFolderRegression() {
// No reason why it couldn't work with previous versions, but this is when it got introduced. // No reason why it couldn't work with previous versions, but this is when it got introduced.
testRestoreFolderNamedFolderRegression(suite, version.All8MigrateUserPNToID) testRestoreFolderNamedFolderRegression(suite, version.Backup)
} }
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------

View File

@ -24,7 +24,7 @@ func TestRestoreUnitSuite(t *testing.T) {
func (suite *RestoreUnitSuite) TestAugmentRestorePaths() { func (suite *RestoreUnitSuite) TestAugmentRestorePaths() {
// Adding a simple test here so that we can be sure that this // Adding a simple test here so that we can be sure that this
// function gets updated whenever we add a new version. // function gets updated whenever we add a new version.
require.LessOrEqual(suite.T(), version.Backup, version.All8MigrateUserPNToID, "unsupported backup version") require.LessOrEqual(suite.T(), version.Backup, version.Groups9Update, "unsupported backup version")
table := []struct { table := []struct {
name string name string
@ -216,7 +216,7 @@ func (suite *RestoreUnitSuite) TestAugmentRestorePaths() {
func (suite *RestoreUnitSuite) TestAugmentRestorePaths_DifferentRestorePath() { func (suite *RestoreUnitSuite) TestAugmentRestorePaths_DifferentRestorePath() {
// Adding a simple test here so that we can be sure that this // Adding a simple test here so that we can be sure that this
// function gets updated whenever we add a new version. // function gets updated whenever we add a new version.
require.LessOrEqual(suite.T(), version.Backup, version.All8MigrateUserPNToID, "unsupported backup version") require.LessOrEqual(suite.T(), version.Backup, version.Groups9Update, "unsupported backup version")
type pathPair struct { type pathPair struct {
storage string storage string

View File

@ -217,7 +217,8 @@ func (c *collection) withFile(name string, fileData []byte, meta MetaData) (*col
c.Aux = append(c.Aux, md) c.Aux = append(c.Aux, md)
// v6+ current metadata design // v6+ current metadata design
case version.OneDrive6NameInMeta, version.OneDrive7LocationRef, version.All8MigrateUserPNToID: case version.OneDrive6NameInMeta, version.OneDrive7LocationRef,
version.All8MigrateUserPNToID, version.Groups9Update:
item, err := FileWithData( item, err := FileWithData(
name+metadata.DataFileSuffix, name+metadata.DataFileSuffix,
name+metadata.DataFileSuffix, name+metadata.DataFileSuffix,
@ -251,7 +252,8 @@ func (c *collection) withFile(name string, fileData []byte, meta MetaData) (*col
func (c *collection) withFolder(name string, meta MetaData) (*collection, error) { func (c *collection) withFolder(name string, meta MetaData) (*collection, error) {
switch c.BackupVersion { switch c.BackupVersion {
case 0, version.OneDrive4DirIncludesPermissions, version.OneDrive5DirMetaNoName, case 0, version.OneDrive4DirIncludesPermissions, version.OneDrive5DirMetaNoName,
version.OneDrive6NameInMeta, version.OneDrive7LocationRef, version.All8MigrateUserPNToID: version.OneDrive6NameInMeta, version.OneDrive7LocationRef,
version.All8MigrateUserPNToID, version.Groups9Update:
return c, nil return c, nil
case version.OneDrive1DataAndMetaFiles, 2, version.OneDrive3IsMetaMarker: case version.OneDrive1DataAndMetaFiles, 2, version.OneDrive3IsMetaMarker:

View File

@ -371,6 +371,34 @@ func (op *BackupOperation) do(
return nil, clues.Wrap(err, "producing manifests and metadata") return nil, clues.Wrap(err, "producing manifests and metadata")
} }
// Force full backups if the base is an older corso version. Those backups
// don't have all the data we want to pull forward.
//
// TODO(ashmrtn): We can push this check further down the stack to either:
// * the metadata fetch code to disable individual bases (requires a
// function to completely remove a base from the set)
// * the base finder code to skip over older bases (breaks isolation a bit
// by requiring knowledge of good/bad backup versions for different
// services)
if op.Selectors.PathService() == path.GroupsService {
if mans.MinBackupVersion() != version.NoBackup &&
mans.MinBackupVersion() < version.Groups9Update {
logger.Ctx(ctx).Info("dropping merge bases due to groups version change")
mans.DisableMergeBases()
mans.DisableAssistBases()
canUseMetadata = false
mdColls = nil
}
if mans.MinAssistVersion() != version.NoBackup &&
mans.MinAssistVersion() < version.Groups9Update {
logger.Ctx(ctx).Info("disabling assist bases due to groups version change")
mans.DisableAssistBases()
}
}
ctx = clues.Add( ctx = clues.Add(
ctx, ctx,
"can_use_metadata", canUseMetadata, "can_use_metadata", canUseMetadata,

View File

@ -61,6 +61,40 @@ func (suite *RestorePathTransformerUnitSuite) TestGetPaths() {
expectErr assert.ErrorAssertionFunc expectErr assert.ErrorAssertionFunc
expected []expectPaths expected []expectPaths
}{ }{
{
name: "Groups List Errors v9",
// No version bump for the change so we always have to check for this.
backupVersion: version.Groups9Update,
input: []*details.Entry{
{
RepoRef: GroupsRootItemPath.RR.String(),
LocationRef: GroupsRootItemPath.Loc.String(),
ItemInfo: details.ItemInfo{
Groups: &details.GroupsInfo{
ItemType: details.SharePointList,
},
},
},
},
expectErr: assert.Error,
},
{
name: "Groups Page Errors v9",
// No version bump for the change so we always have to check for this.
backupVersion: version.Groups9Update,
input: []*details.Entry{
{
RepoRef: GroupsRootItemPath.RR.String(),
LocationRef: GroupsRootItemPath.Loc.String(),
ItemInfo: details.ItemInfo{
Groups: &details.GroupsInfo{
ItemType: details.SharePointPage,
},
},
},
},
expectErr: assert.Error,
},
{ {
name: "Groups List Errors", name: "Groups List Errors",
// No version bump for the change so we always have to check for this. // No version bump for the change so we always have to check for this.

View File

@ -231,6 +231,113 @@ func (suite *ExchangeBackupIntgSuite) TestBackup_Run_exchange() {
} }
} }
func (suite *ExchangeBackupIntgSuite) TestBackup_Run_exchangeBasic_groups9VersionBump() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
var (
mb = evmock.NewBus()
sel = selectors.NewExchangeBackup([]string{suite.its.user.ID})
opts = control.DefaultOptions()
ws = deeTD.DriveIDFromRepoRef
)
sel.Include(
sel.ContactFolders([]string{api.DefaultContacts}, selectors.PrefixMatch()),
// sel.EventCalendars([]string{api.DefaultCalendar}, selectors.PrefixMatch()),
sel.MailFolders([]string{api.MailInbox}, selectors.PrefixMatch()))
bo, bod := prepNewTestBackupOp(
t,
ctx,
mb,
sel.Selector,
opts,
version.All8MigrateUserPNToID)
defer bod.close(t, ctx)
runAndCheckBackup(t, ctx, &bo, mb, false)
checkBackupIsInManifests(
t,
ctx,
bod.kw,
bod.sw,
&bo,
bod.sel,
bod.sel.ID(),
path.EmailCategory)
_, expectDeets := deeTD.GetDeetsInBackup(
t,
ctx,
bo.Results.BackupID,
bod.acct.ID(),
bod.sel.ID(),
path.ExchangeService,
ws,
bod.kms,
bod.sss)
deeTD.CheckBackupDetails(
t,
ctx,
bo.Results.BackupID,
ws,
bod.kms,
bod.sss,
expectDeets,
false)
mb = evmock.NewBus()
notForcedFull := newTestBackupOp(
t,
ctx,
bod,
mb,
opts)
notForcedFull.BackupVersion = version.Groups9Update
runAndCheckBackup(t, ctx, &notForcedFull, mb, false)
checkBackupIsInManifests(
t,
ctx,
bod.kw,
bod.sw,
&notForcedFull,
bod.sel,
bod.sel.ID(),
path.EmailCategory)
_, expectDeets = deeTD.GetDeetsInBackup(
t,
ctx,
notForcedFull.Results.BackupID,
bod.acct.ID(),
bod.sel.ID(),
path.ExchangeService,
ws,
bod.kms,
bod.sss)
deeTD.CheckBackupDetails(
t,
ctx,
notForcedFull.Results.BackupID,
ws,
bod.kms,
bod.sss,
expectDeets,
false)
// The number of items backed up in the second backup should be less than the
// number of items in the original backup.
assert.Greater(
t,
bo.Results.Counts[string(count.PersistedNonCachedFiles)],
notForcedFull.Results.Counts[string(count.PersistedNonCachedFiles)],
"items written")
}
func (suite *ExchangeBackupIntgSuite) TestBackup_Run_incrementalExchange() { func (suite *ExchangeBackupIntgSuite) TestBackup_Run_incrementalExchange() {
testExchangeContinuousBackups(suite, control.Toggles{}) testExchangeContinuousBackups(suite, control.Toggles{})
} }

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"testing" "testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
evmock "github.com/alcionai/corso/src/internal/events/mock" evmock "github.com/alcionai/corso/src/internal/events/mock"
@ -13,6 +14,7 @@ import (
"github.com/alcionai/corso/src/internal/version" "github.com/alcionai/corso/src/internal/version"
deeTD "github.com/alcionai/corso/src/pkg/backup/details/testdata" deeTD "github.com/alcionai/corso/src/pkg/backup/details/testdata"
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/count"
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/selectors"
selTD "github.com/alcionai/corso/src/pkg/selectors/testdata" selTD "github.com/alcionai/corso/src/pkg/selectors/testdata"
@ -78,6 +80,121 @@ func (suite *GroupsBackupIntgSuite) TestBackup_Run_incrementalGroups() {
true) true)
} }
func (suite *GroupsBackupIntgSuite) TestBackup_Run_groupsBasic_groups9VersionBump() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
var (
mb = evmock.NewBus()
sel = selectors.NewGroupsBackup([]string{suite.its.group.ID})
opts = control.DefaultOptions()
whatSet = deeTD.CategoryFromRepoRef
)
sel.Include(
selTD.GroupsBackupLibraryFolderScope(sel),
selTD.GroupsBackupChannelScope(sel))
bo, bod := prepNewTestBackupOp(
t,
ctx,
mb,
sel.Selector,
opts,
version.All8MigrateUserPNToID)
defer bod.close(t, ctx)
runAndCheckBackup(t, ctx, &bo, mb, false)
checkBackupIsInManifests(
t,
ctx,
bod.kw,
bod.sw,
&bo,
bod.sel,
bod.sel.ID(),
path.ChannelMessagesCategory)
_, expectDeets := deeTD.GetDeetsInBackup(
t,
ctx,
bo.Results.BackupID,
bod.acct.ID(),
bod.sel.ID(),
path.GroupsService,
whatSet,
bod.kms,
bod.sss)
deeTD.CheckBackupDetails(
t,
ctx,
bo.Results.BackupID,
whatSet,
bod.kms,
bod.sss,
expectDeets,
false)
mb = evmock.NewBus()
forcedFull := newTestBackupOp(
t,
ctx,
bod,
mb,
opts)
forcedFull.BackupVersion = version.Groups9Update
runAndCheckBackup(t, ctx, &forcedFull, mb, false)
checkBackupIsInManifests(
t,
ctx,
bod.kw,
bod.sw,
&forcedFull,
bod.sel,
bod.sel.ID(),
path.ChannelMessagesCategory)
_, expectDeets = deeTD.GetDeetsInBackup(
t,
ctx,
forcedFull.Results.BackupID,
bod.acct.ID(),
bod.sel.ID(),
path.GroupsService,
whatSet,
bod.kms,
bod.sss)
deeTD.CheckBackupDetails(
t,
ctx,
forcedFull.Results.BackupID,
whatSet,
bod.kms,
bod.sss,
expectDeets,
false)
// The number of items backed up in the forced full backup should be roughly
// the same as the number of items in the original backup.
assert.Equal(
t,
bo.Results.Counts[string(count.PersistedNonCachedFiles)],
forcedFull.Results.Counts[string(count.PersistedNonCachedFiles)],
"items written")
}
func (suite *GroupsBackupIntgSuite) TestBackup_Run_groupsVersion9AssistBases() {
sel := selectors.NewGroupsBackup([]string{suite.its.group.ID})
sel.Include(
selTD.GroupsBackupLibraryFolderScope(sel),
selTD.GroupsBackupChannelScope(sel))
runDriveAssistBaseGroupsUpdate(suite, sel.Selector, false)
}
func (suite *GroupsBackupIntgSuite) TestBackup_Run_groupsBasic() { func (suite *GroupsBackupIntgSuite) TestBackup_Run_groupsBasic() {
t := suite.T() t := suite.T()

View File

@ -169,6 +169,7 @@ func prepNewTestBackupOp(
bod, bod,
bus, bus,
opts) opts)
bo.BackupVersion = backupVersion
bod.sss = streamstore.NewStreamer( bod.sss = streamstore.NewStreamer(
bod.kw, bod.kw,

View File

@ -3,6 +3,8 @@ package test_test
import ( import (
"context" "context"
"fmt" "fmt"
"io"
"sync/atomic"
"testing" "testing"
"github.com/alcionai/clues" "github.com/alcionai/clues"
@ -36,6 +38,7 @@ import (
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
ctrlTD "github.com/alcionai/corso/src/pkg/control/testdata" ctrlTD "github.com/alcionai/corso/src/pkg/control/testdata"
"github.com/alcionai/corso/src/pkg/count" "github.com/alcionai/corso/src/pkg/count"
"github.com/alcionai/corso/src/pkg/extensions"
"github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/selectors"
@ -107,6 +110,118 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDrive() {
false) false)
} }
func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDriveBasic_groups9VersionBump() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
var (
mb = evmock.NewBus()
userID = tconfig.SecondaryM365UserID(t)
osel = selectors.NewOneDriveBackup([]string{userID})
ws = deeTD.DriveIDFromRepoRef
opts = control.DefaultOptions()
)
osel.Include(selTD.OneDriveBackupFolderScope(osel))
bo, bod := prepNewTestBackupOp(
t,
ctx,
mb,
osel.Selector,
opts,
version.All8MigrateUserPNToID)
defer bod.close(t, ctx)
runAndCheckBackup(t, ctx, &bo, mb, false)
checkBackupIsInManifests(
t,
ctx,
bod.kw,
bod.sw,
&bo,
bod.sel,
bod.sel.ID(),
path.FilesCategory)
_, expectDeets := deeTD.GetDeetsInBackup(
t,
ctx,
bo.Results.BackupID,
bod.acct.ID(),
bod.sel.ID(),
path.OneDriveService,
ws,
bod.kms,
bod.sss)
deeTD.CheckBackupDetails(
t,
ctx,
bo.Results.BackupID,
ws,
bod.kms,
bod.sss,
expectDeets,
false)
mb = evmock.NewBus()
notForcedFull := newTestBackupOp(
t,
ctx,
bod,
mb,
opts)
notForcedFull.BackupVersion = version.Groups9Update
runAndCheckBackup(t, ctx, &notForcedFull, mb, false)
checkBackupIsInManifests(
t,
ctx,
bod.kw,
bod.sw,
&notForcedFull,
bod.sel,
bod.sel.ID(),
path.FilesCategory)
_, expectDeets = deeTD.GetDeetsInBackup(
t,
ctx,
notForcedFull.Results.BackupID,
bod.acct.ID(),
bod.sel.ID(),
path.OneDriveService,
ws,
bod.kms,
bod.sss)
deeTD.CheckBackupDetails(
t,
ctx,
notForcedFull.Results.BackupID,
ws,
bod.kms,
bod.sss,
expectDeets,
false)
// The number of items backed up in the second backup should be less than the
// number of items in the original backup.
assert.Greater(
t,
bo.Results.Counts[string(count.PersistedNonCachedFiles)],
notForcedFull.Results.Counts[string(count.PersistedNonCachedFiles)],
"items written")
}
//func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDriveVersion9AssistBases() {
// sel := selectors.NewOneDriveBackup([]string{tconfig.SecondaryM365UserID(suite.T())})
// sel.Include(selTD.OneDriveBackupFolderScope(sel))
//
// runDriveAssistBaseGroupsUpdate(suite, sel.Selector, true)
//}
func (suite *OneDriveBackupIntgSuite) TestBackup_Run_incrementalOneDrive() { func (suite *OneDriveBackupIntgSuite) TestBackup_Run_incrementalOneDrive() {
sel := selectors.NewOneDriveRestore([]string{suite.its.user.ID}) sel := selectors.NewOneDriveRestore([]string{suite.its.user.ID})
@ -806,6 +921,179 @@ func runDriveIncrementalTest(
} }
} }
var (
_ io.ReadCloser = &failFirstRead{}
_ extensions.CreateItemExtensioner = &createFailFirstRead{}
)
// failFirstRead fails the first read on a file being uploaded during a
// snapshot. Only one file is failed during the snapshot even if it the snapshot
// contains multiple files.
type failFirstRead struct {
firstFile *atomic.Bool
io.ReadCloser
}
func (e *failFirstRead) Read(p []byte) (int, error) {
if e.firstFile.CompareAndSwap(true, false) {
// This is the first file being read, return an error for it.
return 0, clues.New("injected error for testing")
}
return e.ReadCloser.Read(p)
}
func newCreateSingleFileFailExtension() *createFailFirstRead {
firstItem := &atomic.Bool{}
firstItem.Store(true)
return &createFailFirstRead{
firstItem: firstItem,
}
}
type createFailFirstRead struct {
firstItem *atomic.Bool
}
func (ce *createFailFirstRead) CreateItemExtension(
_ context.Context,
r io.ReadCloser,
_ details.ItemInfo,
_ *details.ExtensionData,
) (io.ReadCloser, error) {
return &failFirstRead{
firstFile: ce.firstItem,
ReadCloser: r,
}, nil
}
func runDriveAssistBaseGroupsUpdate(
suite tester.Suite,
sel selectors.Selector,
expectCached bool,
) {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
var (
whatSet = deeTD.CategoryFromRepoRef
mb = evmock.NewBus()
opts = control.DefaultOptions()
)
opts.ItemExtensionFactory = []extensions.CreateItemExtensioner{
newCreateSingleFileFailExtension(),
}
// Creating out here so bod lasts for full test and isn't closed until the
// test is compltely done.
bo, bod := prepNewTestBackupOp(
t,
ctx,
mb,
sel,
opts,
version.All8MigrateUserPNToID)
defer bod.close(t, ctx)
suite.Run("makeAssistBackup", func() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
// Need to run manually cause runAndCheckBackup assumes success for the most
// part.
err := bo.Run(ctx)
assert.Error(t, err, clues.ToCore(err))
assert.NotEmpty(t, bo.Results, "backup had non-zero results")
assert.NotEmpty(t, bo.Results.BackupID, "backup generated an ID")
assert.NotZero(t, bo.Results.ItemsWritten)
// TODO(ashmrtn): Check that the base is marked as an assist base.
t.Logf("base error: %v\n", err)
})
// Don't run the below if we've already failed since it won't make sense
// anymore.
if suite.T().Failed() {
return
}
suite.Run("makeIncrementalBackup", func() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
var (
mb = evmock.NewBus()
opts = control.DefaultOptions()
)
forcedFull := newTestBackupOp(
t,
ctx,
bod,
mb,
opts)
forcedFull.BackupVersion = version.Groups9Update
runAndCheckBackup(t, ctx, &forcedFull, mb, false)
reasons, err := bod.sel.Reasons(bod.acct.ID(), false)
require.NoError(t, err, clues.ToCore(err))
for _, reason := range reasons {
checkBackupIsInManifests(
t,
ctx,
bod.kw,
bod.sw,
&forcedFull,
bod.sel,
bod.sel.ID(),
reason.Category())
}
_, expectDeets := deeTD.GetDeetsInBackup(
t,
ctx,
forcedFull.Results.BackupID,
bod.acct.ID(),
bod.sel.ID(),
bod.sel.PathService(),
whatSet,
bod.kms,
bod.sss)
deeTD.CheckBackupDetails(
t,
ctx,
forcedFull.Results.BackupID,
whatSet,
bod.kms,
bod.sss,
expectDeets,
false)
// For groups the forced full backup shouldn't have any cached items. For
// OneDrive and SharePoint it should since they shouldn't be forcing full
// backups.
cachedCheck := assert.NotZero
if !expectCached {
cachedCheck = assert.Zero
}
cachedCheck(
t,
forcedFull.Results.Counts[string(count.PersistedCachedFiles)],
"kopia cached items")
})
}
func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDriveOwnerMigration() { func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDriveOwnerMigration() {
t := suite.T() t := suite.T()

View File

@ -46,6 +46,117 @@ func (suite *SharePointBackupIntgSuite) SetupSuite() {
suite.its = newIntegrationTesterSetup(suite.T()) suite.its = newIntegrationTesterSetup(suite.T())
} }
func (suite *SharePointBackupIntgSuite) TestBackup_Run_sharePointBasic_groups9VersionBump() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
var (
mb = evmock.NewBus()
sel = selectors.NewSharePointBackup([]string{suite.its.site.ID})
opts = control.DefaultOptions()
ws = deeTD.DriveIDFromRepoRef
)
sel.Include(selTD.SharePointBackupFolderScope(sel))
bo, bod := prepNewTestBackupOp(
t,
ctx,
mb,
sel.Selector,
opts,
version.All8MigrateUserPNToID)
defer bod.close(t, ctx)
runAndCheckBackup(t, ctx, &bo, mb, false)
checkBackupIsInManifests(
t,
ctx,
bod.kw,
bod.sw,
&bo,
bod.sel,
bod.sel.ID(),
path.LibrariesCategory)
_, expectDeets := deeTD.GetDeetsInBackup(
t,
ctx,
bo.Results.BackupID,
bod.acct.ID(),
bod.sel.ID(),
path.SharePointService,
ws,
bod.kms,
bod.sss)
deeTD.CheckBackupDetails(
t,
ctx,
bo.Results.BackupID,
ws,
bod.kms,
bod.sss,
expectDeets,
false)
mb = evmock.NewBus()
notForcedFull := newTestBackupOp(
t,
ctx,
bod,
mb,
opts)
notForcedFull.BackupVersion = version.Groups9Update
runAndCheckBackup(t, ctx, &notForcedFull, mb, false)
checkBackupIsInManifests(
t,
ctx,
bod.kw,
bod.sw,
&notForcedFull,
bod.sel,
bod.sel.ID(),
path.LibrariesCategory)
_, expectDeets = deeTD.GetDeetsInBackup(
t,
ctx,
notForcedFull.Results.BackupID,
bod.acct.ID(),
bod.sel.ID(),
path.SharePointService,
ws,
bod.kms,
bod.sss)
deeTD.CheckBackupDetails(
t,
ctx,
notForcedFull.Results.BackupID,
ws,
bod.kms,
bod.sss,
expectDeets,
false)
// The number of items backed up in the second backup should be less than the
// number of items in the original backup.
assert.Greater(
t,
bo.Results.Counts[string(count.PersistedNonCachedFiles)],
notForcedFull.Results.Counts[string(count.PersistedNonCachedFiles)],
"items written")
}
func (suite *SharePointBackupIntgSuite) TestBackup_Run_sharePointVersion9AssistBases() {
sel := selectors.NewSharePointBackup([]string{suite.its.site.ID})
sel.Include(selTD.SharePointBackupFolderScope(sel))
runDriveAssistBaseGroupsUpdate(suite, sel.Selector, true)
}
func (suite *SharePointBackupIntgSuite) TestBackup_Run_incrementalSharePoint() { func (suite *SharePointBackupIntgSuite) TestBackup_Run_incrementalSharePoint() {
sel := selectors.NewSharePointRestore([]string{suite.its.site.ID}) sel := selectors.NewSharePointRestore([]string{suite.its.site.ID})

View File

@ -1,6 +1,6 @@
package version package version
const Backup = 8 const Backup = Groups9Update
// Various labels to refer to important version changes. // Various labels to refer to important version changes.
// Labels don't need 1:1 service:version representation. Add a new // Labels don't need 1:1 service:version representation. Add a new
@ -46,6 +46,10 @@ const (
// All8MigrateUserPNToID marks when we migrated repo refs from the user's // All8MigrateUserPNToID marks when we migrated repo refs from the user's
// PrincipalName to their ID for stability. // PrincipalName to their ID for stability.
All8MigrateUserPNToID = 8 All8MigrateUserPNToID = 8
// Groups9Update marks when we updated the details that groups and teams use.
// Older backups don't contain all the info we want in details.
Groups9Update = 9
) )
// IsNoBackup returns true if the version implies that no prior backup exists. // IsNoBackup returns true if the version implies that no prior backup exists.