Compare commits

...

3 Commits

Author SHA1 Message Date
ashmrtn
d4bb22f498 Alternative way to handle 2-level calendars hierarchy (#2397)
All calendars except the default are nested under a "Other Calendars" folder. Having a non-default calendar named the same as the default calendar does not cause problems when fetching the default calendar by name. Only the default calendar will be returned in that situation.

This fixes the bug where we had multiple collections for the same path but representing different folders.

Also updates the restore execution path to handle the new nested folder structure.

Backup, incremental backup, and restore flows tested manually

- [x]  Yes, it's included
- [ ] 🕐 Yes, but in a later PR
- [ ]  No

- [ ] 🌻 Feature
- [x] 🐛 Bugfix
- [ ] 🗺️ Documentation
- [ ] 🤖 Test
- [ ] 💻 CI/Deployment
- [ ] 🧹 Tech Debt/Cleanup

* #2388

- [x] 💪 Manual
- [ ]  Unit test
- [ ] 💚 E2E
2023-02-03 15:39:25 -08:00
Keepers
6a1dbd388b handle error from bu.backupCollections (#2386)
## Does this PR need a docs update or release note?

- [x]  No 

## Type of change

- [x] 🐛 Bugfix

## Test Plan

- [x] 💚 E2E
2023-02-03 11:49:57 +05:30
Keepers
f3721e9e5d extra panic protection in operations (#2383)
## Does this PR need a docs update or release note?

- [x]  No 

## Type of change

- [x] 🧹 Tech Debt/Cleanup

## Test Plan

- [x] 💚 E2E
2023-02-02 21:15:21 -05:00
10 changed files with 118 additions and 38 deletions

View File

@ -12,6 +12,19 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Document Corso's fault-tolerance and restartability features
- Add retries on timeouts and status code 500 for Exchange
- Increase page size preference for delta requests for Exchange to reduce number of roundtrips
- OneDrive file/folder permissions can now be backed up and restored
- Add `--restore-permissions` flag to toggle restoration of OneDrive permissions
- Add versions to backups so that we can understand/handle older backup formats
### Fixed
- Backing up a calendar that has the same name as the default calendar
### Known Issues
- When the same user has permissions to a file and the containing
folder, we only restore folder level permissions for the user and no
separate file only permission is restored.
- Link shares are not restored
## [v0.2.0] (alpha) - 2023-1-29

View File

@ -501,10 +501,11 @@ func (suite *FolderCacheIntegrationSuite) TestCreateContainerDestination() {
directoryCaches = make(map[path.CategoryType]graph.ContainerResolver)
folderName = tester.DefaultTestRestoreDestination().ContainerName
tests = []struct {
name string
pathFunc1 func(t *testing.T) path.Path
pathFunc2 func(t *testing.T) path.Path
category path.CategoryType
name string
pathFunc1 func(t *testing.T) path.Path
pathFunc2 func(t *testing.T) path.Path
category path.CategoryType
folderPrefix string
}{
{
name: "Mail Cache Test",
@ -587,6 +588,7 @@ func (suite *FolderCacheIntegrationSuite) TestCreateContainerDestination() {
require.NoError(t, err)
return aPath
},
folderPrefix: calendarOthersFolder,
},
}
)
@ -617,8 +619,9 @@ func (suite *FolderCacheIntegrationSuite) TestCreateContainerDestination() {
_, err = resolver.IDToPath(ctx, secondID)
require.NoError(t, err)
_, ok := resolver.PathInCache(folderName)
require.True(t, ok)
p := stdpath.Join(test.folderPrefix, folderName)
_, ok := resolver.PathInCache(p)
require.True(t, ok, "looking for path in cache: %s", p)
})
}
}

View File

@ -537,9 +537,9 @@ func (suite *DataCollectionsIntegrationSuite) TestEventsSerializationRegression(
},
{
name: "Birthday Calendar",
expected: "Birthdays",
expected: calendarOthersFolder + "/Birthdays",
scope: selectors.NewExchangeBackup(users).EventCalendars(
[]string{"Birthdays"},
[]string{calendarOthersFolder + "/Birthdays"},
selectors.PrefixMatch(),
)[0],
},

View File

@ -64,7 +64,15 @@ func (ecc *eventCalendarCache) Populate(
return errors.Wrap(err, "initializing")
}
err := ecc.enumer.EnumerateContainers(ctx, ecc.userID, "", ecc.addFolder)
err := ecc.enumer.EnumerateContainers(
ctx,
ecc.userID,
"",
func(cf graph.CacheFolder) error {
cf.SetPath(path.Builder{}.Append(calendarOthersFolder, *cf.GetDisplayName()))
return ecc.addFolder(cf)
},
)
if err != nil {
return errors.Wrap(err, "enumerating containers")
}
@ -83,7 +91,7 @@ func (ecc *eventCalendarCache) AddToCache(ctx context.Context, f graph.Container
return errors.Wrap(err, "validating container")
}
temp := graph.NewCacheFolder(f, path.Builder{}.Append(*f.GetDisplayName()))
temp := graph.NewCacheFolder(f, path.Builder{}.Append(calendarOthersFolder, *f.GetDisplayName()))
if err := ecc.addFolder(temp); err != nil {
return errors.Wrap(err, "adding container")

View File

@ -38,4 +38,5 @@ const (
rootFolderAlias = "msgfolderroot"
DefaultContactFolder = "Contacts"
DefaultCalendar = "Calendar"
calendarOthersFolder = "Other Calendars"
)

View File

@ -637,7 +637,11 @@ func establishEventsRestoreLocation(
user string,
isNewCache bool,
) (string, error) {
cached, ok := ecc.PathInCache(folders[0])
// Need to prefix with the "Other Calendars" folder so lookup happens properly.
cached, ok := ecc.PathInCache(path.Builder{}.Append(
calendarOthersFolder,
folders[0],
).String())
if ok {
return cached, nil
}

View File

@ -2,6 +2,7 @@ package operations
import (
"context"
"runtime/debug"
"time"
"github.com/alcionai/clues"
@ -106,7 +107,13 @@ type detailsWriter interface {
// ---------------------------------------------------------------------------
// Run begins a synchronous backup operation.
func (op *BackupOperation) Run(ctx context.Context) error {
func (op *BackupOperation) Run(ctx context.Context) (err error) {
defer func() {
if r := recover(); r != nil {
err = clues.Wrap(r.(error), "panic recovery").WithClues(ctx).With("stacktrace", debug.Stack())
}
}()
ctx, end := D.Span(ctx, "operations:backup:run")
defer func() {
end()
@ -160,6 +167,12 @@ func (op *BackupOperation) do(ctx context.Context) (err error) {
// persist operation results to the model store on exit
defer func() {
// panic recovery here prevents additional errors in op.persistResults()
if r := recover(); r != nil {
err = clues.Wrap(r.(error), "panic recovery").WithClues(ctx).With("stacktrace", debug.Stack())
return
}
err = op.persistResults(startTime, &opStats)
if err != nil {
op.Errors.Fail(errors.Wrap(err, "persisting backup results"))
@ -189,6 +202,8 @@ func (op *BackupOperation) do(ctx context.Context) (err error) {
op.Errors.Fail(errors.Wrap(err, "collecting manifest heuristics"))
opStats.readErr = op.Errors.Err()
logger.Ctx(ctx).With("err", err).Errorw("producing manifests and metadata", clues.InErr(err).Slice()...)
return opStats.readErr
}
@ -197,6 +212,8 @@ func (op *BackupOperation) do(ctx context.Context) (err error) {
op.Errors.Fail(errors.Wrap(err, "connecting to m365"))
opStats.readErr = op.Errors.Err()
logger.Ctx(ctx).With("err", err).Errorw("connectng to m365", clues.InErr(err).Slice()...)
return opStats.readErr
}
@ -205,6 +222,8 @@ func (op *BackupOperation) do(ctx context.Context) (err error) {
op.Errors.Fail(errors.Wrap(err, "retrieving data to backup"))
opStats.readErr = op.Errors.Err()
logger.Ctx(ctx).With("err", err).Errorw("producing backup data collections", clues.InErr(err).Slice()...)
return opStats.readErr
}
@ -223,6 +242,8 @@ func (op *BackupOperation) do(ctx context.Context) (err error) {
op.Errors.Fail(errors.Wrap(err, "backing up service data"))
opStats.writeErr = op.Errors.Err()
logger.Ctx(ctx).With("err", err).Errorw("persisting collection backups", clues.InErr(err).Slice()...)
return opStats.writeErr
}
@ -237,6 +258,8 @@ func (op *BackupOperation) do(ctx context.Context) (err error) {
op.Errors.Fail(errors.Wrap(err, "merging backup details"))
opStats.writeErr = op.Errors.Err()
logger.Ctx(ctx).With("err", err).Errorw("merging details", clues.InErr(err).Slice()...)
return opStats.writeErr
}
@ -428,24 +451,22 @@ func consumeBackupDataCollections(
cs,
nil,
tags,
isIncremental,
)
isIncremental)
if err != nil {
if kopiaStats == nil {
return nil, nil, nil, err
}
return nil, nil, nil, errors.Wrapf(
err,
"kopia snapshot failed with %v catastrophic errors and %v ignored errors",
kopiaStats.ErrorCount, kopiaStats.IgnoredErrorCount)
}
if kopiaStats.ErrorCount > 0 || kopiaStats.IgnoredErrorCount > 0 {
if err != nil {
err = errors.Wrapf(
err,
"kopia snapshot failed with %v catastrophic errors and %v ignored errors",
kopiaStats.ErrorCount,
kopiaStats.IgnoredErrorCount,
)
} else {
err = errors.Errorf(
"kopia snapshot failed with %v catastrophic errors and %v ignored errors",
kopiaStats.ErrorCount,
kopiaStats.IgnoredErrorCount,
)
}
err = errors.Errorf(
"kopia snapshot failed with %v catastrophic errors and %v ignored errors",
kopiaStats.ErrorCount, kopiaStats.IgnoredErrorCount)
}
return kopiaStats, deets, itemsSourcedFromBase, err
@ -589,15 +610,21 @@ func (op *BackupOperation) persistResults(
opStats.writeErr)
}
op.Results.BytesRead = opStats.k.TotalHashedBytes
op.Results.BytesUploaded = opStats.k.TotalUploadedBytes
op.Results.ItemsWritten = opStats.k.TotalFileCount
op.Results.ResourceOwners = opStats.resourceCount
if opStats.gc == nil {
op.Status = Failed
return errors.New("data population never completed")
}
if opStats.readErr == nil && opStats.writeErr == nil && opStats.gc.Successful == 0 {
op.Status = NoData
}
op.Results.BytesRead = opStats.k.TotalHashedBytes
op.Results.BytesUploaded = opStats.k.TotalUploadedBytes
op.Results.ItemsRead = opStats.gc.Successful
op.Results.ItemsWritten = opStats.k.TotalFileCount
op.Results.ResourceOwners = opStats.resourceCount
return nil
}

View File

@ -3,6 +3,7 @@ package operations
import (
"context"
"fmt"
"runtime/debug"
"time"
"github.com/alcionai/clues"
@ -106,6 +107,12 @@ type restorer interface {
// Run begins a synchronous restore operation.
func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.Details, err error) {
defer func() {
if r := recover(); r != nil {
err = clues.Wrap(r.(error), "panic recovery").WithClues(ctx).With("stacktrace", debug.Stack())
}
}()
ctx, end := D.Span(ctx, "operations:restore:run")
defer func() {
end()
@ -143,6 +150,12 @@ func (op *RestoreOperation) do(ctx context.Context) (restoreDetails *details.Det
)
defer func() {
// panic recovery here prevents additional errors in op.persistResults()
if r := recover(); r != nil {
err = clues.Wrap(r.(error), "panic recovery").WithClues(ctx).With("stacktrace", debug.Stack())
return
}
err = op.persistResults(ctx, startTime, &opStats)
if err != nil {
return
@ -250,14 +263,20 @@ func (op *RestoreOperation) persistResults(
opStats.writeErr)
}
op.Results.BytesRead = opStats.bytesRead.NumBytes
op.Results.ItemsRead = len(opStats.cs) // TODO: file count, not collection count
op.Results.ResourceOwners = opStats.resourceCount
if opStats.gc == nil {
op.Status = Failed
return errors.New("data restoration never completed")
}
if opStats.readErr == nil && opStats.writeErr == nil && opStats.gc.Successful == 0 {
op.Status = NoData
}
op.Results.BytesRead = opStats.bytesRead.NumBytes
op.Results.ItemsRead = len(opStats.cs) // TODO: file count, not collection count
op.Results.ItemsWritten = opStats.gc.Successful
op.Results.ResourceOwners = opStats.resourceCount
dur := op.Results.CompletedAt.Sub(op.Results.StartedAt)

View File

@ -87,11 +87,12 @@ func (e *Errors) Fail(err error) *Errors {
// setErr handles setting errors.err. Sync locking gets
// handled upstream of this call.
func (e *Errors) setErr(err error) *Errors {
if e.err != nil {
return e.addErr(err)
if e.err == nil {
e.err = err
return e
}
e.err = err
e.errs = append(e.errs, err)
return e
}

View File

@ -73,6 +73,8 @@ func (suite *FaultErrorsUnitSuite) TestErr() {
suite.T().Run(test.name, func(t *testing.T) {
n := fault.New(test.failFast)
require.NotNil(t, n)
require.NoError(t, n.Err())
require.Empty(t, n.Errs())
e := n.Fail(test.fail)
require.NotNil(t, e)
@ -90,6 +92,8 @@ func (suite *FaultErrorsUnitSuite) TestFail() {
n := fault.New(false)
require.NotNil(t, n)
require.NoError(t, n.Err())
require.Empty(t, n.Errs())
n.Fail(assert.AnError)
assert.Error(t, n.Err())