auto-log recoverable errors with stack (#3598)

automatically log when we add a recoverable error or a skipped item to fault.  This log will include a stack trace of the call from the location of the logged recoverable.  Clues does not have a method for pulling a stack trace out of an error yet; that can be added at a future date.

---

#### Does this PR need a docs update or release note?

- [x]  No

#### Type of change

- [x] 🤖 Supportability/Tests

#### Test Plan

- [x]  Unit test
- [x] 💚 E2E
This commit is contained in:
Keepers 2023-06-13 16:18:18 -06:00 committed by GitHub
parent e4ec00a5d2
commit ce72acbcc1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
35 changed files with 271 additions and 159 deletions

View File

@ -42,7 +42,7 @@ func (kdc *kopiaDataCollection) Items(
for _, item := range kdc.items { for _, item := range kdc.items {
s, err := kdc.FetchItemByName(ctx, item) s, err := kdc.FetchItemByName(ctx, item)
if err != nil { if err != nil {
el.AddRecoverable(clues.Wrap(err, "fetching item"). el.AddRecoverable(ctx, clues.Wrap(err, "fetching item").
WithClues(ctx). WithClues(ctx).
Label(fault.LabelForceNoBackupCreation)) Label(fault.LabelForceNoBackupCreation))

View File

@ -133,6 +133,12 @@ type itemDetails struct {
} }
type corsoProgress struct { type corsoProgress struct {
// this is an unwanted hack. We can't extend the kopia interface
// funcs to pass through a context. This is the second best way to
// get an at least partially formed context into funcs that need it
// for logging and other purposes.
ctx context.Context
snapshotfs.UploadProgress snapshotfs.UploadProgress
pending map[string]*itemDetails pending map[string]*itemDetails
deets *details.Builder deets *details.Builder
@ -183,11 +189,10 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
// never had to materialize their details in-memory. // never had to materialize their details in-memory.
if d.info == nil { if d.info == nil {
if d.prevPath == nil { if d.prevPath == nil {
cp.errs.AddRecoverable(clues.New("item sourced from previous backup with no previous path"). cp.errs.AddRecoverable(cp.ctx, clues.New("item sourced from previous backup with no previous path").
With( With(
"service", d.repoPath.Service().String(), "service", d.repoPath.Service().String(),
"category", d.repoPath.Category().String(), "category", d.repoPath.Category().String()).
).
Label(fault.LabelForceNoBackupCreation)) Label(fault.LabelForceNoBackupCreation))
return return
@ -198,11 +203,10 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
err := cp.toMerge.addRepoRef(d.prevPath.ToBuilder(), d.repoPath, d.locationPath) err := cp.toMerge.addRepoRef(d.prevPath.ToBuilder(), d.repoPath, d.locationPath)
if err != nil { if err != nil {
cp.errs.AddRecoverable(clues.Wrap(err, "adding item to merge list"). cp.errs.AddRecoverable(cp.ctx, clues.Wrap(err, "adding item to merge list").
With( With(
"service", d.repoPath.Service().String(), "service", d.repoPath.Service().String(),
"category", d.repoPath.Category().String(), "category", d.repoPath.Category().String()).
).
Label(fault.LabelForceNoBackupCreation)) Label(fault.LabelForceNoBackupCreation))
} }
@ -215,11 +219,10 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
!d.cached, !d.cached,
*d.info) *d.info)
if err != nil { if err != nil {
cp.errs.AddRecoverable(clues.New("adding item to details"). cp.errs.AddRecoverable(cp.ctx, clues.New("adding item to details").
With( With(
"service", d.repoPath.Service().String(), "service", d.repoPath.Service().String(),
"category", d.repoPath.Category().String(), "category", d.repoPath.Category().String()).
).
Label(fault.LabelForceNoBackupCreation)) Label(fault.LabelForceNoBackupCreation))
return return
@ -278,7 +281,7 @@ func (cp *corsoProgress) Error(relpath string, err error, isIgnored bool) {
defer cp.UploadProgress.Error(relpath, err, isIgnored) defer cp.UploadProgress.Error(relpath, err, isIgnored)
cp.errs.AddRecoverable(clues.Wrap(err, "kopia reported error"). cp.errs.AddRecoverable(cp.ctx, clues.Wrap(err, "kopia reported error").
With("is_ignored", isIgnored, "relative_path", relpath). With("is_ignored", isIgnored, "relative_path", relpath).
Label(fault.LabelForceNoBackupCreation)) Label(fault.LabelForceNoBackupCreation))
} }
@ -350,7 +353,7 @@ func collectionEntries(
itemPath, err := streamedEnts.FullPath().AppendItem(e.UUID()) itemPath, err := streamedEnts.FullPath().AppendItem(e.UUID())
if err != nil { if err != nil {
err = clues.Wrap(err, "getting full item path") err = clues.Wrap(err, "getting full item path")
progress.errs.AddRecoverable(err) progress.errs.AddRecoverable(ctx, err)
logger.CtxErr(ctx, err).Error("getting full item path") logger.CtxErr(ctx, err).Error("getting full item path")

View File

@ -472,8 +472,12 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFile() {
suite.Run(test.name, func() { suite.Run(test.name, func() {
t := suite.T() t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
bd := &details.Builder{} bd := &details.Builder{}
cp := corsoProgress{ cp := corsoProgress{
ctx: ctx,
UploadProgress: &snapshotfs.NullUploadProgress{}, UploadProgress: &snapshotfs.NullUploadProgress{},
deets: bd, deets: bd,
pending: map[string]*itemDetails{}, pending: map[string]*itemDetails{},
@ -526,6 +530,10 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFile() {
func (suite *CorsoProgressUnitSuite) TestFinishedFileCachedNoPrevPathErrors() { func (suite *CorsoProgressUnitSuite) TestFinishedFileCachedNoPrevPathErrors() {
t := suite.T() t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
bd := &details.Builder{} bd := &details.Builder{}
cachedItems := map[string]testInfo{ cachedItems := map[string]testInfo{
suite.targetFileName: { suite.targetFileName: {
@ -535,6 +543,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileCachedNoPrevPathErrors() {
}, },
} }
cp := corsoProgress{ cp := corsoProgress{
ctx: ctx,
UploadProgress: &snapshotfs.NullUploadProgress{}, UploadProgress: &snapshotfs.NullUploadProgress{},
deets: bd, deets: bd,
pending: map[string]*itemDetails{}, pending: map[string]*itemDetails{},
@ -565,6 +574,9 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBaseItemDoesntBuildHierarch
t := suite.T() t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
prevPath := makePath( prevPath := makePath(
suite.T(), suite.T(),
[]string{testTenant, service, testUser, category, testInboxDir, testFileName2}, []string{testTenant, service, testUser, category, testInboxDir, testFileName2},
@ -582,6 +594,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBaseItemDoesntBuildHierarch
// Setup stuff. // Setup stuff.
db := &details.Builder{} db := &details.Builder{}
cp := corsoProgress{ cp := corsoProgress{
ctx: ctx,
UploadProgress: &snapshotfs.NullUploadProgress{}, UploadProgress: &snapshotfs.NullUploadProgress{},
deets: db, deets: db,
pending: map[string]*itemDetails{}, pending: map[string]*itemDetails{},
@ -617,8 +630,12 @@ func (suite *CorsoProgressUnitSuite) TestFinishedHashingFile() {
suite.Run(test.name, func() { suite.Run(test.name, func() {
t := suite.T() t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
bd := &details.Builder{} bd := &details.Builder{}
cp := corsoProgress{ cp := corsoProgress{
ctx: ctx,
UploadProgress: &snapshotfs.NullUploadProgress{}, UploadProgress: &snapshotfs.NullUploadProgress{},
deets: bd, deets: bd,
pending: map[string]*itemDetails{}, pending: map[string]*itemDetails{},
@ -682,6 +699,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree() {
} }
progress := &corsoProgress{ progress := &corsoProgress{
ctx: ctx,
pending: map[string]*itemDetails{}, pending: map[string]*itemDetails{},
toMerge: newMergeDetails(), toMerge: newMergeDetails(),
errs: fault.New(true), errs: fault.New(true),
@ -801,6 +819,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_MixedDirectory()
defer flush() defer flush()
progress := &corsoProgress{ progress := &corsoProgress{
ctx: ctx,
pending: map[string]*itemDetails{}, pending: map[string]*itemDetails{},
toMerge: newMergeDetails(), toMerge: newMergeDetails(),
errs: fault.New(true), errs: fault.New(true),
@ -908,6 +927,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_Fails() {
defer flush() defer flush()
progress := &corsoProgress{ progress := &corsoProgress{
ctx: ctx,
toMerge: newMergeDetails(), toMerge: newMergeDetails(),
errs: fault.New(true), errs: fault.New(true),
} }
@ -1004,6 +1024,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeErrors() {
defer flush() defer flush()
progress := &corsoProgress{ progress := &corsoProgress{
ctx: ctx,
pending: map[string]*itemDetails{}, pending: map[string]*itemDetails{},
toMerge: newMergeDetails(), toMerge: newMergeDetails(),
errs: fault.New(true), errs: fault.New(true),
@ -1298,6 +1319,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() {
defer flush() defer flush()
progress := &corsoProgress{ progress := &corsoProgress{
ctx: ctx,
pending: map[string]*itemDetails{}, pending: map[string]*itemDetails{},
toMerge: newMergeDetails(), toMerge: newMergeDetails(),
errs: fault.New(true), errs: fault.New(true),
@ -2221,6 +2243,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
defer flush() defer flush()
progress := &corsoProgress{ progress := &corsoProgress{
ctx: ctx,
pending: map[string]*itemDetails{}, pending: map[string]*itemDetails{},
toMerge: newMergeDetails(), toMerge: newMergeDetails(),
errs: fault.New(true), errs: fault.New(true),
@ -2375,6 +2398,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSkipsDeletedSubtre
) )
progress := &corsoProgress{ progress := &corsoProgress{
ctx: ctx,
pending: map[string]*itemDetails{}, pending: map[string]*itemDetails{},
toMerge: newMergeDetails(), toMerge: newMergeDetails(),
errs: fault.New(true), errs: fault.New(true),
@ -2477,6 +2501,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_HandleEmptyBase()
) )
progress := &corsoProgress{ progress := &corsoProgress{
ctx: ctx,
pending: map[string]*itemDetails{}, pending: map[string]*itemDetails{},
toMerge: newMergeDetails(), toMerge: newMergeDetails(),
errs: fault.New(true), errs: fault.New(true),
@ -2733,6 +2758,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsCorrectSubt
) )
progress := &corsoProgress{ progress := &corsoProgress{
ctx: ctx,
pending: map[string]*itemDetails{}, pending: map[string]*itemDetails{},
toMerge: newMergeDetails(), toMerge: newMergeDetails(),
errs: fault.New(true), errs: fault.New(true),
@ -2901,6 +2927,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsMigrateSubt
) )
progress := &corsoProgress{ progress := &corsoProgress{
ctx: ctx,
pending: map[string]*itemDetails{}, pending: map[string]*itemDetails{},
toMerge: newMergeDetails(), toMerge: newMergeDetails(),
errs: fault.New(true), errs: fault.New(true),

View File

@ -160,6 +160,7 @@ func (w Wrapper) ConsumeBackupCollections(
} }
progress := &corsoProgress{ progress := &corsoProgress{
ctx: ctx,
pending: map[string]*itemDetails{}, pending: map[string]*itemDetails{},
deets: &details.Builder{}, deets: &details.Builder{},
toMerge: newMergeDetails(), toMerge: newMergeDetails(),
@ -415,7 +416,7 @@ func loadDirsAndItems(
dir, err := getDir(ictx, dirItems.dir, snapshotRoot) dir, err := getDir(ictx, dirItems.dir, snapshotRoot)
if err != nil { if err != nil {
el.AddRecoverable(clues.Wrap(err, "loading storage directory"). el.AddRecoverable(ctx, clues.Wrap(err, "loading storage directory").
WithClues(ictx). WithClues(ictx).
Label(fault.LabelForceNoBackupCreation)) Label(fault.LabelForceNoBackupCreation))
@ -431,7 +432,7 @@ func loadDirsAndItems(
} }
if err := mergeCol.addCollection(dirItems.dir.String(), dc); err != nil { if err := mergeCol.addCollection(dirItems.dir.String(), dc); err != nil {
el.AddRecoverable(clues.Wrap(err, "adding collection to merge collection"). el.AddRecoverable(ctx, clues.Wrap(err, "adding collection to merge collection").
WithClues(ctx). WithClues(ctx).
Label(fault.LabelForceNoBackupCreation)) Label(fault.LabelForceNoBackupCreation))
@ -493,7 +494,7 @@ func (w Wrapper) ProduceRestoreCollections(
parentStoragePath, err := itemPaths.StoragePath.Dir() parentStoragePath, err := itemPaths.StoragePath.Dir()
if err != nil { if err != nil {
el.AddRecoverable(clues.Wrap(err, "getting storage directory path"). el.AddRecoverable(ictx, clues.Wrap(err, "getting storage directory path").
WithClues(ictx). WithClues(ictx).
Label(fault.LabelForceNoBackupCreation)) Label(fault.LabelForceNoBackupCreation))

View File

@ -224,7 +224,7 @@ func ProduceBackupCollections(
su, su,
errs) errs)
if err != nil { if err != nil {
el.AddRecoverable(err) el.AddRecoverable(ctx, err)
continue continue
} }
@ -404,7 +404,7 @@ func populateCollections(
!ctrlOpts.ToggleFeatures.DisableDelta) !ctrlOpts.ToggleFeatures.DisableDelta)
if err != nil { if err != nil {
if !graph.IsErrDeletedInFlight(err) { if !graph.IsErrDeletedInFlight(err) {
el.AddRecoverable(clues.Stack(err).Label(fault.LabelForceNoBackupCreation)) el.AddRecoverable(ctx, clues.Stack(err).Label(fault.LabelForceNoBackupCreation))
continue continue
} }
@ -467,7 +467,7 @@ func populateCollections(
) )
if collections[id] != nil { if collections[id] != nil {
el.AddRecoverable(clues.Wrap(err, "conflict: tombstone exists for a live collection").WithClues(ictx)) el.AddRecoverable(ctx, clues.Wrap(err, "conflict: tombstone exists for a live collection").WithClues(ictx))
continue continue
} }

View File

@ -332,7 +332,7 @@ func (f failingColl) Items(ctx context.Context, errs *fault.Bus) <-chan data.Str
ic := make(chan data.Stream) ic := make(chan data.Stream)
defer close(ic) defer close(ic)
errs.AddRecoverable(assert.AnError) errs.AddRecoverable(ctx, assert.AnError)
return ic return ic
} }

View File

@ -230,7 +230,7 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) {
atomic.AddInt64(&success, 1) atomic.AddInt64(&success, 1)
log.With("err", err).Infow("item not found", clues.InErr(err).Slice()...) log.With("err", err).Infow("item not found", clues.InErr(err).Slice()...)
} else { } else {
errs.AddRecoverable(clues.Wrap(err, "fetching item").Label(fault.LabelForceNoBackupCreation)) errs.AddRecoverable(ctx, clues.Wrap(err, "fetching item").Label(fault.LabelForceNoBackupCreation))
} }
return return
@ -238,7 +238,7 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) {
data, err := col.items.Serialize(ctx, item, user, id) data, err := col.items.Serialize(ctx, item, user, id)
if err != nil { if err != nil {
errs.AddRecoverable(clues.Wrap(err, "serializing item").Label(fault.LabelForceNoBackupCreation)) errs.AddRecoverable(ctx, clues.Wrap(err, "serializing item").Label(fault.LabelForceNoBackupCreation))
return return
} }

View File

@ -403,7 +403,7 @@ func (cr *containerResolver) populatePaths(
_, err := cr.idToPath(ctx, ptr.Val(f.GetId()), 0) _, err := cr.idToPath(ctx, ptr.Val(f.GetId()), 0)
if err != nil { if err != nil {
err = clues.Wrap(err, "populating path") err = clues.Wrap(err, "populating path")
el.AddRecoverable(err) el.AddRecoverable(ctx, err)
lastErr = err lastErr = err
} }
} }

View File

@ -64,7 +64,7 @@ func ConsumeRestoreCollections(
handler, ok := handlers[category] handler, ok := handlers[category]
if !ok { if !ok {
el.AddRecoverable(clues.New("unsupported restore path category").WithClues(ictx)) el.AddRecoverable(ctx, clues.New("unsupported restore path category").WithClues(ictx))
continue continue
} }
@ -82,7 +82,7 @@ func ConsumeRestoreCollections(
isNewCache, isNewCache,
errs) errs)
if err != nil { if err != nil {
el.AddRecoverable(err) el.AddRecoverable(ctx, err)
continue continue
} }
@ -107,7 +107,7 @@ func ConsumeRestoreCollections(
break break
} }
el.AddRecoverable(err) el.AddRecoverable(ctx, err)
} }
} }
@ -166,7 +166,7 @@ func restoreCollection(
_, err := buf.ReadFrom(itemData.ToReader()) _, err := buf.ReadFrom(itemData.ToReader())
if err != nil { if err != nil {
el.AddRecoverable(clues.Wrap(err, "reading item bytes").WithClues(ictx)) el.AddRecoverable(ctx, clues.Wrap(err, "reading item bytes").WithClues(ictx))
continue continue
} }
@ -174,7 +174,7 @@ func restoreCollection(
info, err := ir.restore(ictx, body, userID, destinationID, errs) info, err := ir.restore(ictx, body, userID, destinationID, errs)
if err != nil { if err != nil {
el.AddRecoverable(err) el.AddRecoverable(ctx, err)
continue continue
} }
@ -185,7 +185,7 @@ func restoreCollection(
// destination folder, then the restore path no longer matches the fullPath. // destination folder, then the restore path no longer matches the fullPath.
itemPath, err := fullPath.AppendItem(itemData.UUID()) itemPath, err := fullPath.AppendItem(itemData.UUID())
if err != nil { if err != nil {
el.AddRecoverable(clues.Wrap(err, "adding item to collection path").WithClues(ctx)) el.AddRecoverable(ctx, clues.Wrap(err, "adding item to collection path").WithClues(ctx))
continue continue
} }
@ -343,7 +343,7 @@ func uploadAttachments(
continue continue
} }
el.AddRecoverable(clues.Wrap(err, "uploading mail attachment").WithClues(ctx)) el.AddRecoverable(ctx, clues.Wrap(err, "uploading mail attachment").WithClues(ctx))
} }
} }

View File

@ -83,7 +83,7 @@ func BaseCollections(
if err != nil { if err != nil {
// Shouldn't happen. // Shouldn't happen.
err = clues.Wrap(err, "making path").WithClues(ictx) err = clues.Wrap(err, "making path").WithClues(ictx)
el.AddRecoverable(err) el.AddRecoverable(ctx, err)
lastErr = err lastErr = err
continue continue

View File

@ -77,7 +77,7 @@ func ProduceBackupCollections(
odcs, canUsePreviousBackup, err = nc.Get(ctx, metadata, ssmb, errs) odcs, canUsePreviousBackup, err = nc.Get(ctx, metadata, ssmb, errs)
if err != nil { if err != nil {
el.AddRecoverable(clues.Stack(err).Label(fault.LabelForceNoBackupCreation)) el.AddRecoverable(ctx, clues.Stack(err).Label(fault.LabelForceNoBackupCreation))
} }
categories[scope.Category().PathType()] = struct{}{} categories[scope.Category().PathType()] = struct{}{}

View File

@ -271,14 +271,14 @@ func (oc *Collection) getDriveItemContent(
if err != nil { if err != nil {
if clues.HasLabel(err, graph.LabelsMalware) || (item != nil && item.GetMalware() != nil) { if clues.HasLabel(err, graph.LabelsMalware) || (item != nil && item.GetMalware() != nil) {
logger.CtxErr(ctx, err).With("skipped_reason", fault.SkipMalware).Info("item flagged as malware") logger.CtxErr(ctx, err).With("skipped_reason", fault.SkipMalware).Info("item flagged as malware")
el.AddSkip(fault.FileSkip(fault.SkipMalware, driveID, itemID, itemName, graph.ItemInfo(item))) el.AddSkip(ctx, fault.FileSkip(fault.SkipMalware, driveID, itemID, itemName, graph.ItemInfo(item)))
return nil, clues.Wrap(err, "malware item").Label(graph.LabelsSkippable) return nil, clues.Wrap(err, "malware item").Label(graph.LabelsSkippable)
} }
if clues.HasLabel(err, graph.LabelStatus(http.StatusNotFound)) || graph.IsErrDeletedInFlight(err) { if clues.HasLabel(err, graph.LabelStatus(http.StatusNotFound)) || graph.IsErrDeletedInFlight(err) {
logger.CtxErr(ctx, err).With("skipped_reason", fault.SkipNotFound).Info("item not found") logger.CtxErr(ctx, err).With("skipped_reason", fault.SkipNotFound).Info("item not found")
el.AddSkip(fault.FileSkip(fault.SkipNotFound, driveID, itemID, itemName, graph.ItemInfo(item))) el.AddSkip(ctx, fault.FileSkip(fault.SkipNotFound, driveID, itemID, itemName, graph.ItemInfo(item)))
return nil, clues.Wrap(err, "deleted item").Label(graph.LabelsSkippable) return nil, clues.Wrap(err, "deleted item").Label(graph.LabelsSkippable)
} }
@ -293,13 +293,13 @@ func (oc *Collection) getDriveItemContent(
// restore, or we have to handle it separately by somehow // restore, or we have to handle it separately by somehow
// deleting the entire collection. // deleting the entire collection.
logger.CtxErr(ctx, err).With("skipped_reason", fault.SkipBigOneNote).Info("max OneNote file size exceeded") logger.CtxErr(ctx, err).With("skipped_reason", fault.SkipBigOneNote).Info("max OneNote file size exceeded")
el.AddSkip(fault.FileSkip(fault.SkipBigOneNote, driveID, itemID, itemName, graph.ItemInfo(item))) el.AddSkip(ctx, fault.FileSkip(fault.SkipBigOneNote, driveID, itemID, itemName, graph.ItemInfo(item)))
return nil, clues.Wrap(err, "max oneNote item").Label(graph.LabelsSkippable) return nil, clues.Wrap(err, "max oneNote item").Label(graph.LabelsSkippable)
} }
logger.CtxErr(ctx, err).Error("downloading item") logger.CtxErr(ctx, err).Error("downloading item")
el.AddRecoverable(clues.Stack(err).WithClues(ctx).Label(fault.LabelForceNoBackupCreation)) el.AddRecoverable(ctx, clues.Stack(err).WithClues(ctx).Label(fault.LabelForceNoBackupCreation))
// return err, not el.Err(), because the lazy reader needs to communicate to // return err, not el.Err(), because the lazy reader needs to communicate to
// the data consumer that this item is unreadable, regardless of the fault state. // the data consumer that this item is unreadable, regardless of the fault state.
@ -431,7 +431,7 @@ func (oc *Collection) populateItems(ctx context.Context, errs *fault.Bus) {
// Fetch metadata for the file // Fetch metadata for the file
itemMeta, itemMetaSize, err = downloadItemMeta(ctx, oc.handler, oc.driveID, item) itemMeta, itemMetaSize, err = downloadItemMeta(ctx, oc.handler, oc.driveID, item)
if err != nil { if err != nil {
el.AddRecoverable(clues.Wrap(err, "getting item metadata").Label(fault.LabelForceNoBackupCreation)) el.AddRecoverable(ctx, clues.Wrap(err, "getting item metadata").Label(fault.LabelForceNoBackupCreation))
return return
} }

View File

@ -663,7 +663,7 @@ func (c *Collections) UpdateCollections(
skip = fault.ContainerSkip(fault.SkipMalware, driveID, itemID, itemName, addtl) skip = fault.ContainerSkip(fault.SkipMalware, driveID, itemID, itemName, addtl)
} }
errs.AddSkip(skip) errs.AddSkip(ctx, skip)
logger.Ctx(ctx).Infow("malware detected", "item_details", addtl) logger.Ctx(ctx).Infow("malware detected", "item_details", addtl)
continue continue
@ -689,7 +689,7 @@ func (c *Collections) UpdateCollections(
collectionPath, err := c.getCollectionPath(driveID, item) collectionPath, err := c.getCollectionPath(driveID, item)
if err != nil { if err != nil {
el.AddRecoverable(clues.Stack(err). el.AddRecoverable(ctx, clues.Stack(err).
WithClues(ictx). WithClues(ictx).
Label(fault.LabelForceNoBackupCreation)) Label(fault.LabelForceNoBackupCreation))
@ -711,7 +711,7 @@ func (c *Collections) UpdateCollections(
if ok { if ok {
prevPath, err = path.FromDataLayerPath(prevPathStr, false) prevPath, err = path.FromDataLayerPath(prevPathStr, false)
if err != nil { if err != nil {
el.AddRecoverable(clues.Wrap(err, "invalid previous path"). el.AddRecoverable(ctx, clues.Wrap(err, "invalid previous path").
WithClues(ictx). WithClues(ictx).
With("path_string", prevPathStr)) With("path_string", prevPathStr))
} }

View File

@ -1157,7 +1157,7 @@ func (f failingColl) Items(ctx context.Context, errs *fault.Bus) <-chan data.Str
ic := make(chan data.Stream) ic := make(chan data.Stream)
defer close(ic) defer close(ic)
errs.AddRecoverable(assert.AnError) errs.AddRecoverable(ctx, assert.AnError)
return ic return ic
} }

View File

@ -238,7 +238,7 @@ func GetAllFolders(
"", "",
errs) errs)
if err != nil { if err != nil {
el.AddRecoverable(clues.Wrap(err, "enumerating items in drive")) el.AddRecoverable(ctx, clues.Wrap(err, "enumerating items in drive"))
} }
} }

View File

@ -113,7 +113,7 @@ func RestoreCollections(
opts.RestorePermissions, opts.RestorePermissions,
errs) errs)
if err != nil { if err != nil {
el.AddRecoverable(err) el.AddRecoverable(ctx, err)
} }
restoreMetrics = support.CombineMetrics(restoreMetrics, metrics) restoreMetrics = support.CombineMetrics(restoreMetrics, metrics)
@ -273,7 +273,7 @@ func ProduceRestoreCollection(
itemPath, err := dc.FullPath().AppendItem(itemData.UUID()) itemPath, err := dc.FullPath().AppendItem(itemData.UUID())
if err != nil { if err != nil {
el.AddRecoverable(clues.Wrap(err, "appending item to full path").WithClues(ictx)) el.AddRecoverable(ctx, clues.Wrap(err, "appending item to full path").WithClues(ictx))
return return
} }
@ -297,7 +297,7 @@ func ProduceRestoreCollection(
} }
if err != nil { if err != nil {
el.AddRecoverable(clues.Wrap(err, "restoring item")) el.AddRecoverable(ctx, clues.Wrap(err, "restoring item"))
return return
} }

View File

@ -70,7 +70,7 @@ func GetSitePages(
page, err = serv.Client().SitesById(siteID).PagesById(pageID).Get(ctx, opts) page, err = serv.Client().SitesById(siteID).PagesById(pageID).Get(ctx, opts)
if err != nil { if err != nil {
el.AddRecoverable(graph.Wrap(ctx, err, "fetching page")) el.AddRecoverable(ctx, graph.Wrap(ctx, err, "fetching page"))
return return
} }

View File

@ -80,7 +80,7 @@ func ProduceBackupCollections(
ctrlOpts, ctrlOpts,
errs) errs)
if err != nil { if err != nil {
el.AddRecoverable(err) el.AddRecoverable(ctx, err)
continue continue
} }
@ -101,7 +101,7 @@ func ProduceBackupCollections(
ctrlOpts, ctrlOpts,
errs) errs)
if err != nil { if err != nil {
el.AddRecoverable(err) el.AddRecoverable(ctx, err)
continue continue
} }
@ -115,7 +115,7 @@ func ProduceBackupCollections(
ctrlOpts, ctrlOpts,
errs) errs)
if err != nil { if err != nil {
el.AddRecoverable(err) el.AddRecoverable(ctx, err)
continue continue
} }
@ -184,7 +184,7 @@ func collectLists(
false, false,
tuple.name) tuple.name)
if err != nil { if err != nil {
el.AddRecoverable(clues.Wrap(err, "creating list collection path").WithClues(ctx)) el.AddRecoverable(ctx, clues.Wrap(err, "creating list collection path").WithClues(ctx))
} }
collection := NewCollection( collection := NewCollection(
@ -284,7 +284,7 @@ func collectPages(
false, false,
tuple.Name) tuple.Name)
if err != nil { if err != nil {
el.AddRecoverable(clues.Wrap(err, "creating page collection path").WithClues(ctx)) el.AddRecoverable(ctx, clues.Wrap(err, "creating page collection path").WithClues(ctx))
} }
collection := NewCollection( collection := NewCollection(

View File

@ -239,7 +239,7 @@ func (sc *Collection) retrieveLists(
byteArray, err := serializeContent(ctx, wtr, lst) byteArray, err := serializeContent(ctx, wtr, lst)
if err != nil { if err != nil {
el.AddRecoverable(clues.Wrap(err, "serializing list").WithClues(ctx).Label(fault.LabelForceNoBackupCreation)) el.AddRecoverable(ctx, clues.Wrap(err, "serializing list").WithClues(ctx).Label(fault.LabelForceNoBackupCreation))
continue continue
} }
@ -308,7 +308,7 @@ func (sc *Collection) retrievePages(
byteArray, err := serializeContent(ctx, wtr, pg) byteArray, err := serializeContent(ctx, wtr, pg)
if err != nil { if err != nil {
el.AddRecoverable(clues.Wrap(err, "serializing page").WithClues(ctx).Label(fault.LabelForceNoBackupCreation)) el.AddRecoverable(ctx, clues.Wrap(err, "serializing page").WithClues(ctx).Label(fault.LabelForceNoBackupCreation))
continue continue
} }

View File

@ -130,13 +130,13 @@ func loadSiteLists(
entry, err = gs.Client().Sites().BySiteId(siteID).Lists().ByListId(id).Get(ctx, nil) entry, err = gs.Client().Sites().BySiteId(siteID).Lists().ByListId(id).Get(ctx, nil)
if err != nil { if err != nil {
el.AddRecoverable(graph.Wrap(ctx, err, "getting site list")) el.AddRecoverable(ctx, graph.Wrap(ctx, err, "getting site list"))
return return
} }
cols, cTypes, lItems, err := fetchListContents(ctx, gs, siteID, id, errs) cols, cTypes, lItems, err := fetchListContents(ctx, gs, siteID, id, errs)
if err != nil { if err != nil {
el.AddRecoverable(clues.Wrap(err, "getting list contents")) el.AddRecoverable(ctx, clues.Wrap(err, "getting list contents"))
return return
} }
@ -220,7 +220,7 @@ func fetchListItems(
fields, err := newPrefix.Fields().Get(ctx, nil) fields, err := newPrefix.Fields().Get(ctx, nil)
if err != nil { if err != nil {
el.AddRecoverable(graph.Wrap(ctx, err, "getting list fields")) el.AddRecoverable(ctx, graph.Wrap(ctx, err, "getting list fields"))
continue continue
} }
@ -336,7 +336,7 @@ func fetchContentTypes(
links, err := fetchColumnLinks(ctx, gs, siteID, listID, id) links, err := fetchColumnLinks(ctx, gs, siteID, listID, id)
if err != nil { if err != nil {
el.AddRecoverable(err) el.AddRecoverable(ctx, err)
continue continue
} }
@ -344,7 +344,7 @@ func fetchContentTypes(
cs, err := fetchColumns(ctx, gs, siteID, listID, id) cs, err := fetchColumns(ctx, gs, siteID, listID, id)
if err != nil { if err != nil {
el.AddRecoverable(err) el.AddRecoverable(ctx, err)
continue continue
} }

View File

@ -101,7 +101,7 @@ func ConsumeRestoreCollections(
restoreMetrics = support.CombineMetrics(restoreMetrics, metrics) restoreMetrics = support.CombineMetrics(restoreMetrics, metrics)
if err != nil { if err != nil {
el.AddRecoverable(err) el.AddRecoverable(ctx, err)
} }
if errors.Is(err, context.Canceled) { if errors.Is(err, context.Canceled) {
@ -238,7 +238,7 @@ func RestoreListCollection(
siteID, siteID,
restoreContainerName) restoreContainerName)
if err != nil { if err != nil {
el.AddRecoverable(err) el.AddRecoverable(ctx, err)
continue continue
} }
@ -246,7 +246,7 @@ func RestoreListCollection(
itemPath, err := dc.FullPath().AppendItem(itemData.UUID()) itemPath, err := dc.FullPath().AppendItem(itemData.UUID())
if err != nil { if err != nil {
el.AddRecoverable(clues.Wrap(err, "appending item to full path").WithClues(ctx)) el.AddRecoverable(ctx, clues.Wrap(err, "appending item to full path").WithClues(ctx))
continue continue
} }
@ -318,7 +318,7 @@ func RestorePageCollection(
siteID, siteID,
restoreContainerName) restoreContainerName)
if err != nil { if err != nil {
el.AddRecoverable(err) el.AddRecoverable(ctx, err)
continue continue
} }
@ -326,7 +326,7 @@ func RestorePageCollection(
itemPath, err := dc.FullPath().AppendItem(itemData.UUID()) itemPath, err := dc.FullPath().AppendItem(itemData.UUID())
if err != nil { if err != nil {
el.AddRecoverable(clues.Wrap(err, "appending item to full path").WithClues(ctx)) el.AddRecoverable(ctx, clues.Wrap(err, "appending item to full path").WithClues(ctx))
continue continue
} }

View File

@ -1,6 +1,7 @@
package operations package operations
import ( import (
"context"
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -22,13 +23,13 @@ func TestHelpersUnitSuite(t *testing.T) {
func (suite *HelpersUnitSuite) TestFinalizeErrorHandling() { func (suite *HelpersUnitSuite) TestFinalizeErrorHandling() {
table := []struct { table := []struct {
name string name string
errs func() *fault.Bus errs func(context.Context) *fault.Bus
opts control.Options opts control.Options
expectErr assert.ErrorAssertionFunc expectErr assert.ErrorAssertionFunc
}{ }{
{ {
name: "no errors", name: "no errors",
errs: func() *fault.Bus { errs: func(ctx context.Context) *fault.Bus {
return fault.New(false) return fault.New(false)
}, },
opts: control.Options{ opts: control.Options{
@ -38,7 +39,7 @@ func (suite *HelpersUnitSuite) TestFinalizeErrorHandling() {
}, },
{ {
name: "already failed", name: "already failed",
errs: func() *fault.Bus { errs: func(ctx context.Context) *fault.Bus {
fn := fault.New(false) fn := fault.New(false)
fn.Fail(assert.AnError) fn.Fail(assert.AnError)
return fn return fn
@ -50,9 +51,9 @@ func (suite *HelpersUnitSuite) TestFinalizeErrorHandling() {
}, },
{ {
name: "best effort", name: "best effort",
errs: func() *fault.Bus { errs: func(ctx context.Context) *fault.Bus {
fn := fault.New(false) fn := fault.New(false)
fn.AddRecoverable(assert.AnError) fn.AddRecoverable(ctx, assert.AnError)
return fn return fn
}, },
opts: control.Options{ opts: control.Options{
@ -62,9 +63,9 @@ func (suite *HelpersUnitSuite) TestFinalizeErrorHandling() {
}, },
{ {
name: "recoverable errors produce hard fail", name: "recoverable errors produce hard fail",
errs: func() *fault.Bus { errs: func(ctx context.Context) *fault.Bus {
fn := fault.New(false) fn := fault.New(false)
fn.AddRecoverable(assert.AnError) fn.AddRecoverable(ctx, assert.AnError)
return fn return fn
}, },
opts: control.Options{ opts: control.Options{
@ -74,11 +75,11 @@ func (suite *HelpersUnitSuite) TestFinalizeErrorHandling() {
}, },
{ {
name: "multiple recoverable errors produce hard fail", name: "multiple recoverable errors produce hard fail",
errs: func() *fault.Bus { errs: func(ctx context.Context) *fault.Bus {
fn := fault.New(false) fn := fault.New(false)
fn.AddRecoverable(assert.AnError) fn.AddRecoverable(ctx, assert.AnError)
fn.AddRecoverable(assert.AnError) fn.AddRecoverable(ctx, assert.AnError)
fn.AddRecoverable(assert.AnError) fn.AddRecoverable(ctx, assert.AnError)
return fn return fn
}, },
opts: control.Options{ opts: control.Options{
@ -94,7 +95,7 @@ func (suite *HelpersUnitSuite) TestFinalizeErrorHandling() {
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
defer flush() defer flush()
errs := test.errs() errs := test.errs(ctx)
finalizeErrorHandling(ctx, test.opts, errs, "test") finalizeErrorHandling(ctx, test.opts, errs, "test")
test.expectErr(t, errs.Failure()) test.expectErr(t, errs.Failure())

View File

@ -168,7 +168,7 @@ func GetPaths(
restorePaths, err := makeRestorePathsForEntry(ctx, backupVersion, ent) restorePaths, err := makeRestorePathsForEntry(ctx, backupVersion, ent)
if err != nil { if err != nil {
el.AddRecoverable(clues.Wrap(err, "getting restore paths")) el.AddRecoverable(ctx, clues.Wrap(err, "getting restore paths"))
continue continue
} }

View File

@ -1,6 +1,7 @@
package streamstore package streamstore
import ( import (
"context"
"testing" "testing"
"github.com/alcionai/clues" "github.com/alcionai/clues"
@ -73,13 +74,13 @@ func (suite *StreamStoreIntgSuite) TestStreamer() {
table := []struct { table := []struct {
name string name string
deets func(*testing.T) *details.Details deets func(*testing.T) *details.Details
errs func() *fault.Errors errs func(context.Context) *fault.Errors
hasSnapID assert.ValueAssertionFunc hasSnapID assert.ValueAssertionFunc
}{ }{
{ {
name: "none", name: "none",
deets: func(*testing.T) *details.Details { return nil }, deets: func(*testing.T) *details.Details { return nil },
errs: func() *fault.Errors { return nil }, errs: func(context.Context) *fault.Errors { return nil },
hasSnapID: assert.Empty, hasSnapID: assert.Empty,
}, },
{ {
@ -98,18 +99,20 @@ func (suite *StreamStoreIntgSuite) TestStreamer() {
})) }))
return deetsBuilder.Details() return deetsBuilder.Details()
}, },
errs: func() *fault.Errors { return nil }, errs: func(context.Context) *fault.Errors { return nil },
hasSnapID: assert.NotEmpty, hasSnapID: assert.NotEmpty,
}, },
{ {
name: "errors", name: "errors",
deets: func(*testing.T) *details.Details { return nil }, deets: func(*testing.T) *details.Details { return nil },
errs: func() *fault.Errors { errs: func(ctx context.Context) *fault.Errors {
bus := fault.New(false) bus := fault.New(false)
bus.Fail(clues.New("foo")) bus.Fail(clues.New("foo"))
bus.AddRecoverable(clues.New("bar")) bus.AddRecoverable(ctx, clues.New("bar"))
bus.AddRecoverable(fault.FileErr(clues.New("file"), "ns", "file-id", "file-name", map[string]any{"foo": "bar"})) bus.AddRecoverable(
bus.AddSkip(fault.FileSkip(fault.SkipMalware, "ns", "file-id", "file-name", map[string]any{"foo": "bar"})) ctx,
fault.FileErr(clues.New("file"), "ns", "file-id", "file-name", map[string]any{"foo": "bar"}))
bus.AddSkip(ctx, fault.FileSkip(fault.SkipMalware, "ns", "file-id", "file-name", map[string]any{"foo": "bar"}))
fe := bus.Errors() fe := bus.Errors()
return fe return fe
@ -133,12 +136,14 @@ func (suite *StreamStoreIntgSuite) TestStreamer() {
return deetsBuilder.Details() return deetsBuilder.Details()
}, },
errs: func() *fault.Errors { errs: func(ctx context.Context) *fault.Errors {
bus := fault.New(false) bus := fault.New(false)
bus.Fail(clues.New("foo")) bus.Fail(clues.New("foo"))
bus.AddRecoverable(clues.New("bar")) bus.AddRecoverable(ctx, clues.New("bar"))
bus.AddRecoverable(fault.FileErr(clues.New("file"), "ns", "file-id", "file-name", map[string]any{"foo": "bar"})) bus.AddRecoverable(
bus.AddSkip(fault.FileSkip(fault.SkipMalware, "ns", "file-id", "file-name", map[string]any{"foo": "bar"})) ctx,
fault.FileErr(clues.New("file"), "ns", "file-id", "file-name", map[string]any{"foo": "bar"}))
bus.AddSkip(ctx, fault.FileSkip(fault.SkipMalware, "ns", "file-id", "file-name", map[string]any{"foo": "bar"}))
fe := bus.Errors() fe := bus.Errors()
return fe return fe
@ -164,7 +169,7 @@ func (suite *StreamStoreIntgSuite) TestStreamer() {
require.NoError(t, err) require.NoError(t, err)
} }
errs := test.errs() errs := test.errs(ctx)
if errs != nil { if errs != nil {
err = ss.Collect(ctx, FaultErrorsCollector(errs)) err = ss.Collect(ctx, FaultErrorsCollector(errs))
require.NoError(t, err) require.NoError(t, err)

View File

@ -1,6 +1,7 @@
package fault_test package fault_test
import ( import (
"context"
"fmt" "fmt"
"github.com/alcionai/clues" "github.com/alcionai/clues"
@ -15,6 +16,8 @@ import (
var ( var (
ctrl any ctrl any
items = []string{} items = []string{}
//nolint:forbidigo
ctx = context.Background()
) )
type mockController struct { type mockController struct {
@ -133,7 +136,7 @@ func ExampleBus_AddRecoverable() {
// to aggregate the error using fault. // to aggregate the error using fault.
// Side note: technically, you should use a local bus // Side note: technically, you should use a local bus
// here (see below) instead of errs. // here (see below) instead of errs.
errs.AddRecoverable(err) errs.AddRecoverable(ctx, err)
} }
} }
@ -150,7 +153,7 @@ func ExampleBus_AddRecoverable() {
} }
if err := getIthItem(i); err != nil { if err := getIthItem(i); err != nil {
errs.AddRecoverable(err) errs.AddRecoverable(ctx, err)
} }
} }
} }
@ -175,13 +178,13 @@ func ExampleBus_Failure() {
// If Failure() is nil, then you can assume the operation completed. // If Failure() is nil, then you can assume the operation completed.
// A complete operation is not necessarily an error-free operation. // A complete operation is not necessarily an error-free operation.
// Recoverable errors may still have been added using AddRecoverable(err). // Recoverable errors may still have been added using AddRecoverable(ctx, err).
// Make sure you check both. // Make sure you check both.
// If failFast is set to true, then the first recoerable error Added gets // If failFast is set to true, then the first recoerable error Added gets
// promoted to the Err() position. // promoted to the Err() position.
errs = fault.New(true) errs = fault.New(true)
errs.AddRecoverable(clues.New("not catastrophic, but still becomes the Failure()")) errs.AddRecoverable(ctx, clues.New("not catastrophic, but still becomes the Failure()"))
err = errs.Failure() err = errs.Failure()
fmt.Println(err) fmt.Println(err)
@ -194,8 +197,8 @@ func ExampleBus_Failure() {
// recover from and continue. // recover from and continue.
func ExampleErrors_Recovered() { func ExampleErrors_Recovered() {
errs := fault.New(false) errs := fault.New(false)
errs.AddRecoverable(clues.New("not catastrophic")) errs.AddRecoverable(ctx, clues.New("not catastrophic"))
errs.AddRecoverable(clues.New("something unwanted")) errs.AddRecoverable(ctx, clues.New("something unwanted"))
// Recovered() gets the slice of all recoverable errors added during // Recovered() gets the slice of all recoverable errors added during
// the run, but which did not cause a failure. // the run, but which did not cause a failure.
@ -247,12 +250,12 @@ func ExampleBus_Local() {
} }
if err := getIthItem(i); err != nil { if err := getIthItem(i); err != nil {
// instead of calling errs.AddRecoverable(err), we call the // instead of calling errs.AddRecoverable(ctx, err), we call the
// local bus's Add method. The error will still get // local bus's Add method. The error will still get
// added to the errs.Recovered() set. But if this err // added to the errs.Recovered() set. But if this err
// causes the run to fail, only this local bus treats // causes the run to fail, only this local bus treats
// it as the causal failure. // it as the causal failure.
el.AddRecoverable(err) el.AddRecoverable(ctx, err)
} }
} }
@ -330,7 +333,7 @@ func Example_e2e() {
if err := storer(d); err != nil { if err := storer(d); err != nil {
// Since we're at the top of the iteration, we need // Since we're at the top of the iteration, we need
// to add each error to the fault.localBus struct. // to add each error to the fault.localBus struct.
el.AddRecoverable(err) el.AddRecoverable(ctx, err)
} }
} }
@ -383,7 +386,7 @@ func ExampleErrors_Failure_return() {
} }
if err := dependency.do(); err != nil { if err := dependency.do(); err != nil {
errs.AddRecoverable(clues.Wrap(err, "recoverable")) errs.AddRecoverable(ctx, clues.Wrap(err, "recoverable"))
} }
} }
@ -426,7 +429,7 @@ func ExampleBus_AddSkip() {
// over, instead of error out. An initial case is when Graph API identifies // over, instead of error out. An initial case is when Graph API identifies
// a file as containing malware. We can't download the file: it'll always // a file as containing malware. We can't download the file: it'll always
// error. Our only option is to skip it. // error. Our only option is to skip it.
errs.AddSkip(fault.FileSkip( errs.AddSkip(ctx, fault.FileSkip(
fault.SkipMalware, fault.SkipMalware,
"deduplication-namespace", "deduplication-namespace",
"file-id", "file-id",

View File

@ -12,6 +12,7 @@ import (
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
"github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/print"
"github.com/alcionai/corso/src/pkg/logger"
) )
type Bus struct { type Bus struct {
@ -118,27 +119,45 @@ func (e *Bus) setFailure(err error) *Bus {
// //
// TODO: nil return, not Bus, since we don't want people to return // TODO: nil return, not Bus, since we don't want people to return
// from errors.AddRecoverable(). // from errors.AddRecoverable().
func (e *Bus) AddRecoverable(err error) *Bus { func (e *Bus) AddRecoverable(ctx context.Context, err error) {
if err == nil { if err == nil {
return e return
} }
e.mu.Lock() e.mu.Lock()
defer e.mu.Unlock() defer e.mu.Unlock()
return e.addRecoverableErr(err) e.logAndAddRecoverable(ctx, err, 1)
}
// logs the error and adds it to the bus. If the error is a failure,
// it gets logged at an Error level. Otherwise logs an Info.
func (e *Bus) logAndAddRecoverable(ctx context.Context, err error, skip int) {
log := logger.CtxErrStack(ctx, err, skip+1)
isFail := e.addRecoverableErr(err)
if isFail {
log.Error("recoverable error")
} else {
log.Info("recoverable error")
}
} }
// addErr handles adding errors to errors.errs. Sync locking // addErr handles adding errors to errors.errs. Sync locking
// gets handled upstream of this call. // gets handled upstream of this call. Returns true if the
func (e *Bus) addRecoverableErr(err error) *Bus { // error is a failure, false otherwise.
func (e *Bus) addRecoverableErr(err error) bool {
var isFail bool
if e.failure == nil && e.failFast { if e.failure == nil && e.failFast {
e.setFailure(err) e.setFailure(err)
isFail = true
} }
e.recoverable = append(e.recoverable, err) e.recoverable = append(e.recoverable, err)
return e return isFail
} }
// AddSkip appends a record of a Skipped item to the fault bus. // AddSkip appends a record of a Skipped item to the fault bus.
@ -151,15 +170,23 @@ func (e *Bus) addRecoverableErr(err error) *Bus {
// 2. Skipping avoids a permanent and consistent failure. If // 2. Skipping avoids a permanent and consistent failure. If
// the underlying reason is transient or otherwise recoverable, // the underlying reason is transient or otherwise recoverable,
// the item should not be skipped. // the item should not be skipped.
func (e *Bus) AddSkip(s *Skipped) *Bus { func (e *Bus) AddSkip(ctx context.Context, s *Skipped) {
if s == nil { if s == nil {
return e return
} }
e.mu.Lock() e.mu.Lock()
defer e.mu.Unlock() defer e.mu.Unlock()
return e.addSkip(s) e.logAndAddSkip(ctx, s, 1)
}
// logs the error and adds a skipped item.
func (e *Bus) logAndAddSkip(ctx context.Context, s *Skipped, skip int) {
logger.CtxStack(ctx, skip+1).
With("skipped", s).
Info("recoverable error")
e.addSkip(s)
} }
func (e *Bus) addSkip(s *Skipped) *Bus { func (e *Bus) addSkip(s *Skipped) *Bus {
@ -344,7 +371,7 @@ type localBus struct {
current error current error
} }
func (e *localBus) AddRecoverable(err error) { func (e *localBus) AddRecoverable(ctx context.Context, err error) {
if err == nil { if err == nil {
return return
} }
@ -356,7 +383,7 @@ func (e *localBus) AddRecoverable(err error) {
e.current = err e.current = err
} }
e.bus.AddRecoverable(err) e.bus.logAndAddRecoverable(ctx, err, 1)
} }
// AddSkip appends a record of a Skipped item to the local bus. // AddSkip appends a record of a Skipped item to the local bus.
@ -369,7 +396,7 @@ func (e *localBus) AddRecoverable(err error) {
// 2. Skipping avoids a permanent and consistent failure. If // 2. Skipping avoids a permanent and consistent failure. If
// the underlying reason is transient or otherwise recoverable, // the underlying reason is transient or otherwise recoverable,
// the item should not be skipped. // the item should not be skipped.
func (e *localBus) AddSkip(s *Skipped) { func (e *localBus) AddSkip(ctx context.Context, s *Skipped) {
if s == nil { if s == nil {
return return
} }
@ -377,7 +404,7 @@ func (e *localBus) AddSkip(s *Skipped) {
e.mu.Lock() e.mu.Lock()
defer e.mu.Unlock() defer e.mu.Unlock()
e.bus.AddSkip(s) e.bus.logAndAddSkip(ctx, s, 1)
} }
// Failure returns the failure that happened within the local bus. // Failure returns the failure that happened within the local bus.

View File

@ -1,6 +1,7 @@
package fault_test package fault_test
import ( import (
"context"
"encoding/json" "encoding/json"
"testing" "testing"
@ -75,6 +76,9 @@ func (suite *FaultErrorsUnitSuite) TestErr() {
suite.Run(test.name, func() { suite.Run(test.name, func() {
t := suite.T() t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
n := fault.New(test.failFast) n := fault.New(test.failFast)
require.NotNil(t, n) require.NotNil(t, n)
require.NoError(t, n.Failure(), clues.ToCore(n.Failure())) require.NoError(t, n.Failure(), clues.ToCore(n.Failure()))
@ -83,8 +87,7 @@ func (suite *FaultErrorsUnitSuite) TestErr() {
e := n.Fail(test.fail) e := n.Fail(test.fail)
require.NotNil(t, e) require.NotNil(t, e)
e = n.AddRecoverable(test.add) n.AddRecoverable(ctx, test.add)
require.NotNil(t, e)
test.expect(t, n.Failure()) test.expect(t, n.Failure())
}) })
@ -152,14 +155,16 @@ func (suite *FaultErrorsUnitSuite) TestErrs() {
suite.Run(test.name, func() { suite.Run(test.name, func() {
t := suite.T() t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
n := fault.New(test.failFast) n := fault.New(test.failFast)
require.NotNil(t, n) require.NotNil(t, n)
e := n.Fail(test.fail) e := n.Fail(test.fail)
require.NotNil(t, e) require.NotNil(t, e)
e = n.AddRecoverable(test.add) n.AddRecoverable(ctx, test.add)
require.NotNil(t, e)
test.expect(t, n.Recovered()) test.expect(t, n.Recovered())
}) })
@ -169,14 +174,17 @@ func (suite *FaultErrorsUnitSuite) TestErrs() {
func (suite *FaultErrorsUnitSuite) TestAdd() { func (suite *FaultErrorsUnitSuite) TestAdd() {
t := suite.T() t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
n := fault.New(true) n := fault.New(true)
require.NotNil(t, n) require.NotNil(t, n)
n.AddRecoverable(assert.AnError) n.AddRecoverable(ctx, assert.AnError)
assert.Error(t, n.Failure()) assert.Error(t, n.Failure())
assert.Len(t, n.Recovered(), 1) assert.Len(t, n.Recovered(), 1)
n.AddRecoverable(assert.AnError) n.AddRecoverable(ctx, assert.AnError)
assert.Error(t, n.Failure()) assert.Error(t, n.Failure())
assert.Len(t, n.Recovered(), 2) assert.Len(t, n.Recovered(), 2)
} }
@ -184,29 +192,35 @@ func (suite *FaultErrorsUnitSuite) TestAdd() {
func (suite *FaultErrorsUnitSuite) TestAddSkip() { func (suite *FaultErrorsUnitSuite) TestAddSkip() {
t := suite.T() t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
n := fault.New(true) n := fault.New(true)
require.NotNil(t, n) require.NotNil(t, n)
n.Fail(assert.AnError) n.Fail(assert.AnError)
assert.Len(t, n.Skipped(), 0) assert.Len(t, n.Skipped(), 0)
n.AddRecoverable(assert.AnError) n.AddRecoverable(ctx, assert.AnError)
assert.Len(t, n.Skipped(), 0) assert.Len(t, n.Skipped(), 0)
n.AddSkip(fault.OwnerSkip(fault.SkipMalware, "ns", "id", "name", nil)) n.AddSkip(ctx, fault.OwnerSkip(fault.SkipMalware, "ns", "id", "name", nil))
assert.Len(t, n.Skipped(), 1) assert.Len(t, n.Skipped(), 1)
} }
func (suite *FaultErrorsUnitSuite) TestErrors() { func (suite *FaultErrorsUnitSuite) TestErrors() {
t := suite.T() t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
// not fail-fast // not fail-fast
n := fault.New(false) n := fault.New(false)
require.NotNil(t, n) require.NotNil(t, n)
n.Fail(clues.New("fail")) n.Fail(clues.New("fail"))
n.AddRecoverable(clues.New("1")) n.AddRecoverable(ctx, clues.New("1"))
n.AddRecoverable(clues.New("2")) n.AddRecoverable(ctx, clues.New("2"))
d := n.Errors() d := n.Errors()
assert.Equal(t, clues.ToCore(n.Failure()), d.Failure) assert.Equal(t, clues.ToCore(n.Failure()), d.Failure)
@ -218,8 +232,8 @@ func (suite *FaultErrorsUnitSuite) TestErrors() {
require.NotNil(t, n) require.NotNil(t, n)
n.Fail(clues.New("fail")) n.Fail(clues.New("fail"))
n.AddRecoverable(clues.New("1")) n.AddRecoverable(ctx, clues.New("1"))
n.AddRecoverable(clues.New("2")) n.AddRecoverable(ctx, clues.New("2"))
d = n.Errors() d = n.Errors()
assert.Equal(t, clues.ToCore(n.Failure()), d.Failure) assert.Equal(t, clues.ToCore(n.Failure()), d.Failure)
@ -234,13 +248,13 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() {
table := []struct { table := []struct {
name string name string
errs func() *fault.Errors errs func(context.Context) *fault.Errors
expectItems []fault.Item expectItems []fault.Item
expectRecoverable []*clues.ErrCore expectRecoverable []*clues.ErrCore
}{ }{
{ {
name: "no errors", name: "no errors",
errs: func() *fault.Errors { errs: func(ctx context.Context) *fault.Errors {
return fault.New(false).Errors() return fault.New(false).Errors()
}, },
expectItems: []fault.Item{}, expectItems: []fault.Item{},
@ -248,10 +262,10 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() {
}, },
{ {
name: "no items", name: "no items",
errs: func() *fault.Errors { errs: func(ctx context.Context) *fault.Errors {
b := fault.New(false) b := fault.New(false)
b.Fail(ae) b.Fail(ae)
b.AddRecoverable(ae) b.AddRecoverable(ctx, ae)
return b.Errors() return b.Errors()
}, },
@ -260,10 +274,10 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() {
}, },
{ {
name: "failure item", name: "failure item",
errs: func() *fault.Errors { errs: func(ctx context.Context) *fault.Errors {
b := fault.New(false) b := fault.New(false)
b.Fail(fault.OwnerErr(ae, "ns", "id", "name", addtl)) b.Fail(fault.OwnerErr(ae, "ns", "id", "name", addtl))
b.AddRecoverable(ae) b.AddRecoverable(ctx, ae)
return b.Errors() return b.Errors()
}, },
@ -272,10 +286,10 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() {
}, },
{ {
name: "recoverable item", name: "recoverable item",
errs: func() *fault.Errors { errs: func(ctx context.Context) *fault.Errors {
b := fault.New(false) b := fault.New(false)
b.Fail(ae) b.Fail(ae)
b.AddRecoverable(fault.OwnerErr(ae, "ns", "id", "name", addtl)) b.AddRecoverable(ctx, fault.OwnerErr(ae, "ns", "id", "name", addtl))
return b.Errors() return b.Errors()
}, },
@ -284,10 +298,10 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() {
}, },
{ {
name: "two items", name: "two items",
errs: func() *fault.Errors { errs: func(ctx context.Context) *fault.Errors {
b := fault.New(false) b := fault.New(false)
b.Fail(fault.OwnerErr(ae, "ns", "oid", "name", addtl)) b.Fail(fault.OwnerErr(ae, "ns", "oid", "name", addtl))
b.AddRecoverable(fault.FileErr(ae, "ns", "fid", "name", addtl)) b.AddRecoverable(ctx, fault.FileErr(ae, "ns", "fid", "name", addtl))
return b.Errors() return b.Errors()
}, },
@ -299,10 +313,10 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() {
}, },
{ {
name: "two items - diff namespace same id", name: "two items - diff namespace same id",
errs: func() *fault.Errors { errs: func(ctx context.Context) *fault.Errors {
b := fault.New(false) b := fault.New(false)
b.Fail(fault.OwnerErr(ae, "ns", "id", "name", addtl)) b.Fail(fault.OwnerErr(ae, "ns", "id", "name", addtl))
b.AddRecoverable(fault.FileErr(ae, "ns2", "id", "name", addtl)) b.AddRecoverable(ctx, fault.FileErr(ae, "ns2", "id", "name", addtl))
return b.Errors() return b.Errors()
}, },
@ -314,10 +328,10 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() {
}, },
{ {
name: "duplicate items - failure priority", name: "duplicate items - failure priority",
errs: func() *fault.Errors { errs: func(ctx context.Context) *fault.Errors {
b := fault.New(false) b := fault.New(false)
b.Fail(fault.OwnerErr(ae, "ns", "id", "name", addtl)) b.Fail(fault.OwnerErr(ae, "ns", "id", "name", addtl))
b.AddRecoverable(fault.FileErr(ae, "ns", "id", "name", addtl)) b.AddRecoverable(ctx, fault.FileErr(ae, "ns", "id", "name", addtl))
return b.Errors() return b.Errors()
}, },
@ -328,11 +342,11 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() {
}, },
{ {
name: "duplicate items - last recoverable priority", name: "duplicate items - last recoverable priority",
errs: func() *fault.Errors { errs: func(ctx context.Context) *fault.Errors {
b := fault.New(false) b := fault.New(false)
b.Fail(ae) b.Fail(ae)
b.AddRecoverable(fault.FileErr(ae, "ns", "fid", "name", addtl)) b.AddRecoverable(ctx, fault.FileErr(ae, "ns", "fid", "name", addtl))
b.AddRecoverable(fault.FileErr(ae, "ns", "fid", "name2", addtl)) b.AddRecoverable(ctx, fault.FileErr(ae, "ns", "fid", "name2", addtl))
return b.Errors() return b.Errors()
}, },
@ -343,11 +357,11 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() {
}, },
{ {
name: "recoverable item and non-items", name: "recoverable item and non-items",
errs: func() *fault.Errors { errs: func(ctx context.Context) *fault.Errors {
b := fault.New(false) b := fault.New(false)
b.Fail(ae) b.Fail(ae)
b.AddRecoverable(fault.FileErr(ae, "ns", "fid", "name", addtl)) b.AddRecoverable(ctx, fault.FileErr(ae, "ns", "fid", "name", addtl))
b.AddRecoverable(ae) b.AddRecoverable(ctx, ae)
return b.Errors() return b.Errors()
}, },
@ -360,7 +374,11 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() {
for _, test := range table { for _, test := range table {
suite.Run(test.name, func() { suite.Run(test.name, func() {
t := suite.T() t := suite.T()
fe := test.errs()
ctx, flush := tester.NewContext(t)
defer flush()
fe := test.errs(ctx)
assert.ElementsMatch(t, test.expectItems, fe.Items) assert.ElementsMatch(t, test.expectItems, fe.Items)
require.Equal(t, test.expectRecoverable, fe.Recovered) require.Equal(t, test.expectRecoverable, fe.Recovered)
@ -378,12 +396,15 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() {
func (suite *FaultErrorsUnitSuite) TestMarshalUnmarshal() { func (suite *FaultErrorsUnitSuite) TestMarshalUnmarshal() {
t := suite.T() t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
// not fail-fast // not fail-fast
n := fault.New(false) n := fault.New(false)
require.NotNil(t, n) require.NotNil(t, n)
n.AddRecoverable(clues.New("1")) n.AddRecoverable(ctx, clues.New("1"))
n.AddRecoverable(clues.New("2")) n.AddRecoverable(ctx, clues.New("2"))
bs, err := json.Marshal(n.Errors()) bs, err := json.Marshal(n.Errors())
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
@ -419,13 +440,16 @@ func (suite *FaultErrorsUnitSuite) TestUnmarshalLegacy() {
func (suite *FaultErrorsUnitSuite) TestTracker() { func (suite *FaultErrorsUnitSuite) TestTracker() {
t := suite.T() t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
eb := fault.New(false) eb := fault.New(false)
lb := eb.Local() lb := eb.Local()
assert.NoError(t, lb.Failure(), clues.ToCore(lb.Failure())) assert.NoError(t, lb.Failure(), clues.ToCore(lb.Failure()))
assert.Empty(t, eb.Recovered()) assert.Empty(t, eb.Recovered())
lb.AddRecoverable(assert.AnError) lb.AddRecoverable(ctx, assert.AnError)
assert.NoError(t, lb.Failure(), clues.ToCore(lb.Failure())) assert.NoError(t, lb.Failure(), clues.ToCore(lb.Failure()))
assert.NoError(t, eb.Failure(), clues.ToCore(eb.Failure())) assert.NoError(t, eb.Failure(), clues.ToCore(eb.Failure()))
assert.NotEmpty(t, eb.Recovered()) assert.NotEmpty(t, eb.Recovered())
@ -436,7 +460,7 @@ func (suite *FaultErrorsUnitSuite) TestTracker() {
assert.NoError(t, lbt.Failure(), clues.ToCore(lbt.Failure())) assert.NoError(t, lbt.Failure(), clues.ToCore(lbt.Failure()))
assert.Empty(t, ebt.Recovered()) assert.Empty(t, ebt.Recovered())
lbt.AddRecoverable(assert.AnError) lbt.AddRecoverable(ctx, assert.AnError)
assert.Error(t, lbt.Failure()) assert.Error(t, lbt.Failure())
assert.Error(t, ebt.Failure()) assert.Error(t, ebt.Failure())
assert.NotEmpty(t, ebt.Recovered()) assert.NotEmpty(t, ebt.Recovered())

View File

@ -437,6 +437,14 @@ func Ctx(ctx context.Context) *zap.SugaredLogger {
return l.(*zap.SugaredLogger).With(clues.In(ctx).Slice()...) return l.(*zap.SugaredLogger).With(clues.In(ctx).Slice()...)
} }
// CtxStack retrieves the logger embedded in the context, and adds the
// stacktrace to the log info.
// If skip is non-zero, it skips the stack calls starting from the
// first. Skip always adds +1 to account for this wrapper.
func CtxStack(ctx context.Context, skip int) *zap.SugaredLogger {
return Ctx(ctx).With(zap.StackSkip("trace", skip+1))
}
// CtxErr retrieves the logger embedded in the context // CtxErr retrieves the logger embedded in the context
// and packs all of the structured data in the error inside it. // and packs all of the structured data in the error inside it.
func CtxErr(ctx context.Context, err error) *zap.SugaredLogger { func CtxErr(ctx context.Context, err error) *zap.SugaredLogger {
@ -447,6 +455,19 @@ func CtxErr(ctx context.Context, err error) *zap.SugaredLogger {
With(clues.InErr(err).Slice()...) With(clues.InErr(err).Slice()...)
} }
// CtxErrStack retrieves the logger embedded in the context
// and packs all of the structured data in the error inside it.
// If skip is non-zero, it skips the stack calls starting from the
// first. Skip always adds +1 to account for this wrapper.
func CtxErrStack(ctx context.Context, err error, skip int) *zap.SugaredLogger {
return Ctx(ctx).
With(
"error", err,
"error_labels", clues.Labels(err)).
With(zap.StackSkip("trace", skip+1)).
With(clues.InErr(err).Slice()...)
}
// Flush writes out all buffered logs. // Flush writes out all buffered logs.
func Flush(ctx context.Context) { func Flush(ctx context.Context) {
_ = Ctx(ctx).Sync() _ = Ctx(ctx).Sync()

View File

@ -402,7 +402,7 @@ func (r repository) Backups(ctx context.Context, ids []string) ([]*backup.Backup
b, err := sw.GetBackup(ictx, model.StableID(id)) b, err := sw.GetBackup(ictx, model.StableID(id))
if err != nil { if err != nil {
errs.AddRecoverable(errWrapper(err)) errs.AddRecoverable(ctx, errWrapper(err))
} }
bups = append(bups, b) bups = append(bups, b)

View File

@ -368,7 +368,7 @@ func reduce[T scopeT, C categoryT](
repoPath, err := path.FromDataLayerPath(ent.RepoRef, true) repoPath, err := path.FromDataLayerPath(ent.RepoRef, true)
if err != nil { if err != nil {
el.AddRecoverable(clues.Wrap(err, "transforming repoRef to path").WithClues(ictx)) el.AddRecoverable(ctx, clues.Wrap(err, "transforming repoRef to path").WithClues(ictx))
continue continue
} }
@ -391,7 +391,7 @@ func reduce[T scopeT, C categoryT](
pv, err := dc.pathValues(repoPath, *ent, s.Cfg) pv, err := dc.pathValues(repoPath, *ent, s.Cfg)
if err != nil { if err != nil {
el.AddRecoverable(clues.Wrap(err, "getting path values").WithClues(ictx)) el.AddRecoverable(ctx, clues.Wrap(err, "getting path values").WithClues(ictx))
continue continue
} }

View File

@ -183,7 +183,7 @@ func (c Contacts) EnumerateContainers(
} }
if err := graph.CheckIDNameAndParentFolderID(fold); err != nil { if err := graph.CheckIDNameAndParentFolderID(fold); err != nil {
errs.AddRecoverable(graph.Stack(ctx, err).Label(fault.LabelForceNoBackupCreation)) errs.AddRecoverable(ctx, graph.Stack(ctx, err).Label(fault.LabelForceNoBackupCreation))
continue continue
} }
@ -194,7 +194,7 @@ func (c Contacts) EnumerateContainers(
temp := graph.NewCacheFolder(fold, nil, nil) temp := graph.NewCacheFolder(fold, nil, nil)
if err := fn(&temp); err != nil { if err := fn(&temp); err != nil {
errs.AddRecoverable(graph.Stack(fctx, err).Label(fault.LabelForceNoBackupCreation)) errs.AddRecoverable(ctx, graph.Stack(fctx, err).Label(fault.LabelForceNoBackupCreation))
continue continue
} }
} }

View File

@ -236,7 +236,7 @@ func (c Events) EnumerateContainers(
cd := CalendarDisplayable{Calendarable: cal} cd := CalendarDisplayable{Calendarable: cal}
if err := graph.CheckIDAndName(cd); err != nil { if err := graph.CheckIDAndName(cd); err != nil {
errs.AddRecoverable(graph.Stack(ctx, err).Label(fault.LabelForceNoBackupCreation)) errs.AddRecoverable(ctx, graph.Stack(ctx, err).Label(fault.LabelForceNoBackupCreation))
continue continue
} }
@ -250,7 +250,7 @@ func (c Events) EnumerateContainers(
path.Builder{}.Append(ptr.Val(cd.GetId())), // storage path path.Builder{}.Append(ptr.Val(cd.GetId())), // storage path
path.Builder{}.Append(ptr.Val(cd.GetDisplayName()))) // display location path.Builder{}.Append(ptr.Val(cd.GetDisplayName()))) // display location
if err := fn(&temp); err != nil { if err := fn(&temp); err != nil {
errs.AddRecoverable(graph.Stack(fctx, err).Label(fault.LabelForceNoBackupCreation)) errs.AddRecoverable(ctx, graph.Stack(fctx, err).Label(fault.LabelForceNoBackupCreation))
continue continue
} }
} }

View File

@ -264,7 +264,7 @@ func (c Mail) EnumerateContainers(
} }
if err := graph.CheckIDNameAndParentFolderID(fold); err != nil { if err := graph.CheckIDNameAndParentFolderID(fold); err != nil {
errs.AddRecoverable(graph.Stack(ctx, err).Label(fault.LabelForceNoBackupCreation)) errs.AddRecoverable(ctx, graph.Stack(ctx, err).Label(fault.LabelForceNoBackupCreation))
continue continue
} }
@ -275,7 +275,7 @@ func (c Mail) EnumerateContainers(
temp := graph.NewCacheFolder(fold, nil, nil) temp := graph.NewCacheFolder(fold, nil, nil)
if err := fn(&temp); err != nil { if err := fn(&temp); err != nil {
errs.AddRecoverable(graph.Stack(fctx, err).Label(fault.LabelForceNoBackupCreation)) errs.AddRecoverable(ctx, graph.Stack(fctx, err).Label(fault.LabelForceNoBackupCreation))
continue continue
} }
} }

View File

@ -72,7 +72,7 @@ func (c Sites) GetAll(ctx context.Context, errs *fault.Bus) ([]models.Siteable,
} }
if err != nil { if err != nil {
el.AddRecoverable(graph.Wrap(ctx, err, "validating site")) el.AddRecoverable(ctx, graph.Wrap(ctx, err, "validating site"))
return true return true
} }

View File

@ -97,7 +97,7 @@ func (c Users) GetAll(
err := validateUser(item) err := validateUser(item)
if err != nil { if err != nil {
el.AddRecoverable(graph.Wrap(ctx, err, "validating user")) el.AddRecoverable(ctx, graph.Wrap(ctx, err, "validating user"))
} else { } else {
us = append(us, item) us = append(us, item)
} }