diff --git a/src/internal/kopia/data_collection.go b/src/internal/kopia/data_collection.go index 4cc53e871..64d5f1a17 100644 --- a/src/internal/kopia/data_collection.go +++ b/src/internal/kopia/data_collection.go @@ -42,7 +42,7 @@ func (kdc *kopiaDataCollection) Items( for _, item := range kdc.items { s, err := kdc.FetchItemByName(ctx, item) if err != nil { - el.AddRecoverable(clues.Wrap(err, "fetching item"). + el.AddRecoverable(ctx, clues.Wrap(err, "fetching item"). WithClues(ctx). Label(fault.LabelForceNoBackupCreation)) diff --git a/src/internal/kopia/upload.go b/src/internal/kopia/upload.go index 4a610c892..5c8d19950 100644 --- a/src/internal/kopia/upload.go +++ b/src/internal/kopia/upload.go @@ -133,6 +133,12 @@ type itemDetails struct { } type corsoProgress struct { + // this is an unwanted hack. We can't extend the kopia interface + // funcs to pass through a context. This is the second best way to + // get an at least partially formed context into funcs that need it + // for logging and other purposes. + ctx context.Context + snapshotfs.UploadProgress pending map[string]*itemDetails deets *details.Builder @@ -183,11 +189,10 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) { // never had to materialize their details in-memory. if d.info == nil { if d.prevPath == nil { - cp.errs.AddRecoverable(clues.New("item sourced from previous backup with no previous path"). + cp.errs.AddRecoverable(cp.ctx, clues.New("item sourced from previous backup with no previous path"). With( "service", d.repoPath.Service().String(), - "category", d.repoPath.Category().String(), - ). + "category", d.repoPath.Category().String()). Label(fault.LabelForceNoBackupCreation)) return @@ -198,11 +203,10 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) { err := cp.toMerge.addRepoRef(d.prevPath.ToBuilder(), d.repoPath, d.locationPath) if err != nil { - cp.errs.AddRecoverable(clues.Wrap(err, "adding item to merge list"). + cp.errs.AddRecoverable(cp.ctx, clues.Wrap(err, "adding item to merge list"). With( "service", d.repoPath.Service().String(), - "category", d.repoPath.Category().String(), - ). + "category", d.repoPath.Category().String()). Label(fault.LabelForceNoBackupCreation)) } @@ -215,11 +219,10 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) { !d.cached, *d.info) if err != nil { - cp.errs.AddRecoverable(clues.New("adding item to details"). + cp.errs.AddRecoverable(cp.ctx, clues.New("adding item to details"). With( "service", d.repoPath.Service().String(), - "category", d.repoPath.Category().String(), - ). + "category", d.repoPath.Category().String()). Label(fault.LabelForceNoBackupCreation)) return @@ -278,7 +281,7 @@ func (cp *corsoProgress) Error(relpath string, err error, isIgnored bool) { defer cp.UploadProgress.Error(relpath, err, isIgnored) - cp.errs.AddRecoverable(clues.Wrap(err, "kopia reported error"). + cp.errs.AddRecoverable(cp.ctx, clues.Wrap(err, "kopia reported error"). With("is_ignored", isIgnored, "relative_path", relpath). Label(fault.LabelForceNoBackupCreation)) } @@ -350,7 +353,7 @@ func collectionEntries( itemPath, err := streamedEnts.FullPath().AppendItem(e.UUID()) if err != nil { err = clues.Wrap(err, "getting full item path") - progress.errs.AddRecoverable(err) + progress.errs.AddRecoverable(ctx, err) logger.CtxErr(ctx, err).Error("getting full item path") diff --git a/src/internal/kopia/upload_test.go b/src/internal/kopia/upload_test.go index fba178fdf..0ac10ec6b 100644 --- a/src/internal/kopia/upload_test.go +++ b/src/internal/kopia/upload_test.go @@ -472,8 +472,12 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFile() { suite.Run(test.name, func() { t := suite.T() + ctx, flush := tester.NewContext(t) + defer flush() + bd := &details.Builder{} cp := corsoProgress{ + ctx: ctx, UploadProgress: &snapshotfs.NullUploadProgress{}, deets: bd, pending: map[string]*itemDetails{}, @@ -526,6 +530,10 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFile() { func (suite *CorsoProgressUnitSuite) TestFinishedFileCachedNoPrevPathErrors() { t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + bd := &details.Builder{} cachedItems := map[string]testInfo{ suite.targetFileName: { @@ -535,6 +543,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileCachedNoPrevPathErrors() { }, } cp := corsoProgress{ + ctx: ctx, UploadProgress: &snapshotfs.NullUploadProgress{}, deets: bd, pending: map[string]*itemDetails{}, @@ -565,6 +574,9 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBaseItemDoesntBuildHierarch t := suite.T() + ctx, flush := tester.NewContext(t) + defer flush() + prevPath := makePath( suite.T(), []string{testTenant, service, testUser, category, testInboxDir, testFileName2}, @@ -582,6 +594,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBaseItemDoesntBuildHierarch // Setup stuff. db := &details.Builder{} cp := corsoProgress{ + ctx: ctx, UploadProgress: &snapshotfs.NullUploadProgress{}, deets: db, pending: map[string]*itemDetails{}, @@ -617,8 +630,12 @@ func (suite *CorsoProgressUnitSuite) TestFinishedHashingFile() { suite.Run(test.name, func() { t := suite.T() + ctx, flush := tester.NewContext(t) + defer flush() + bd := &details.Builder{} cp := corsoProgress{ + ctx: ctx, UploadProgress: &snapshotfs.NullUploadProgress{}, deets: bd, pending: map[string]*itemDetails{}, @@ -682,6 +699,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree() { } progress := &corsoProgress{ + ctx: ctx, pending: map[string]*itemDetails{}, toMerge: newMergeDetails(), errs: fault.New(true), @@ -801,6 +819,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_MixedDirectory() defer flush() progress := &corsoProgress{ + ctx: ctx, pending: map[string]*itemDetails{}, toMerge: newMergeDetails(), errs: fault.New(true), @@ -908,6 +927,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_Fails() { defer flush() progress := &corsoProgress{ + ctx: ctx, toMerge: newMergeDetails(), errs: fault.New(true), } @@ -1004,6 +1024,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeErrors() { defer flush() progress := &corsoProgress{ + ctx: ctx, pending: map[string]*itemDetails{}, toMerge: newMergeDetails(), errs: fault.New(true), @@ -1298,6 +1319,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() { defer flush() progress := &corsoProgress{ + ctx: ctx, pending: map[string]*itemDetails{}, toMerge: newMergeDetails(), errs: fault.New(true), @@ -2221,6 +2243,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto defer flush() progress := &corsoProgress{ + ctx: ctx, pending: map[string]*itemDetails{}, toMerge: newMergeDetails(), errs: fault.New(true), @@ -2375,6 +2398,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSkipsDeletedSubtre ) progress := &corsoProgress{ + ctx: ctx, pending: map[string]*itemDetails{}, toMerge: newMergeDetails(), errs: fault.New(true), @@ -2477,6 +2501,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_HandleEmptyBase() ) progress := &corsoProgress{ + ctx: ctx, pending: map[string]*itemDetails{}, toMerge: newMergeDetails(), errs: fault.New(true), @@ -2733,6 +2758,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsCorrectSubt ) progress := &corsoProgress{ + ctx: ctx, pending: map[string]*itemDetails{}, toMerge: newMergeDetails(), errs: fault.New(true), @@ -2901,6 +2927,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsMigrateSubt ) progress := &corsoProgress{ + ctx: ctx, pending: map[string]*itemDetails{}, toMerge: newMergeDetails(), errs: fault.New(true), diff --git a/src/internal/kopia/wrapper.go b/src/internal/kopia/wrapper.go index e424a47b6..4ed8e3a6a 100644 --- a/src/internal/kopia/wrapper.go +++ b/src/internal/kopia/wrapper.go @@ -160,6 +160,7 @@ func (w Wrapper) ConsumeBackupCollections( } progress := &corsoProgress{ + ctx: ctx, pending: map[string]*itemDetails{}, deets: &details.Builder{}, toMerge: newMergeDetails(), @@ -415,7 +416,7 @@ func loadDirsAndItems( dir, err := getDir(ictx, dirItems.dir, snapshotRoot) if err != nil { - el.AddRecoverable(clues.Wrap(err, "loading storage directory"). + el.AddRecoverable(ctx, clues.Wrap(err, "loading storage directory"). WithClues(ictx). Label(fault.LabelForceNoBackupCreation)) @@ -431,7 +432,7 @@ func loadDirsAndItems( } if err := mergeCol.addCollection(dirItems.dir.String(), dc); err != nil { - el.AddRecoverable(clues.Wrap(err, "adding collection to merge collection"). + el.AddRecoverable(ctx, clues.Wrap(err, "adding collection to merge collection"). WithClues(ctx). Label(fault.LabelForceNoBackupCreation)) @@ -493,7 +494,7 @@ func (w Wrapper) ProduceRestoreCollections( parentStoragePath, err := itemPaths.StoragePath.Dir() if err != nil { - el.AddRecoverable(clues.Wrap(err, "getting storage directory path"). + el.AddRecoverable(ictx, clues.Wrap(err, "getting storage directory path"). WithClues(ictx). Label(fault.LabelForceNoBackupCreation)) diff --git a/src/internal/m365/exchange/backup.go b/src/internal/m365/exchange/backup.go index a24cc1c44..1f2732ea0 100644 --- a/src/internal/m365/exchange/backup.go +++ b/src/internal/m365/exchange/backup.go @@ -224,7 +224,7 @@ func ProduceBackupCollections( su, errs) if err != nil { - el.AddRecoverable(err) + el.AddRecoverable(ctx, err) continue } @@ -404,7 +404,7 @@ func populateCollections( !ctrlOpts.ToggleFeatures.DisableDelta) if err != nil { if !graph.IsErrDeletedInFlight(err) { - el.AddRecoverable(clues.Stack(err).Label(fault.LabelForceNoBackupCreation)) + el.AddRecoverable(ctx, clues.Stack(err).Label(fault.LabelForceNoBackupCreation)) continue } @@ -467,7 +467,7 @@ func populateCollections( ) if collections[id] != nil { - el.AddRecoverable(clues.Wrap(err, "conflict: tombstone exists for a live collection").WithClues(ictx)) + el.AddRecoverable(ctx, clues.Wrap(err, "conflict: tombstone exists for a live collection").WithClues(ictx)) continue } diff --git a/src/internal/m365/exchange/backup_test.go b/src/internal/m365/exchange/backup_test.go index 06d5a75c6..0b900945d 100644 --- a/src/internal/m365/exchange/backup_test.go +++ b/src/internal/m365/exchange/backup_test.go @@ -332,7 +332,7 @@ func (f failingColl) Items(ctx context.Context, errs *fault.Bus) <-chan data.Str ic := make(chan data.Stream) defer close(ic) - errs.AddRecoverable(assert.AnError) + errs.AddRecoverable(ctx, assert.AnError) return ic } diff --git a/src/internal/m365/exchange/collection.go b/src/internal/m365/exchange/collection.go index bc5ff3027..9bb320f18 100644 --- a/src/internal/m365/exchange/collection.go +++ b/src/internal/m365/exchange/collection.go @@ -230,7 +230,7 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) { atomic.AddInt64(&success, 1) log.With("err", err).Infow("item not found", clues.InErr(err).Slice()...) } else { - errs.AddRecoverable(clues.Wrap(err, "fetching item").Label(fault.LabelForceNoBackupCreation)) + errs.AddRecoverable(ctx, clues.Wrap(err, "fetching item").Label(fault.LabelForceNoBackupCreation)) } return @@ -238,7 +238,7 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) { data, err := col.items.Serialize(ctx, item, user, id) if err != nil { - errs.AddRecoverable(clues.Wrap(err, "serializing item").Label(fault.LabelForceNoBackupCreation)) + errs.AddRecoverable(ctx, clues.Wrap(err, "serializing item").Label(fault.LabelForceNoBackupCreation)) return } diff --git a/src/internal/m365/exchange/container_resolver.go b/src/internal/m365/exchange/container_resolver.go index 368b2ae73..fff528bae 100644 --- a/src/internal/m365/exchange/container_resolver.go +++ b/src/internal/m365/exchange/container_resolver.go @@ -403,7 +403,7 @@ func (cr *containerResolver) populatePaths( _, err := cr.idToPath(ctx, ptr.Val(f.GetId()), 0) if err != nil { err = clues.Wrap(err, "populating path") - el.AddRecoverable(err) + el.AddRecoverable(ctx, err) lastErr = err } } diff --git a/src/internal/m365/exchange/restore.go b/src/internal/m365/exchange/restore.go index 7703933df..f540e621d 100644 --- a/src/internal/m365/exchange/restore.go +++ b/src/internal/m365/exchange/restore.go @@ -64,7 +64,7 @@ func ConsumeRestoreCollections( handler, ok := handlers[category] if !ok { - el.AddRecoverable(clues.New("unsupported restore path category").WithClues(ictx)) + el.AddRecoverable(ctx, clues.New("unsupported restore path category").WithClues(ictx)) continue } @@ -82,7 +82,7 @@ func ConsumeRestoreCollections( isNewCache, errs) if err != nil { - el.AddRecoverable(err) + el.AddRecoverable(ctx, err) continue } @@ -107,7 +107,7 @@ func ConsumeRestoreCollections( break } - el.AddRecoverable(err) + el.AddRecoverable(ctx, err) } } @@ -166,7 +166,7 @@ func restoreCollection( _, err := buf.ReadFrom(itemData.ToReader()) if err != nil { - el.AddRecoverable(clues.Wrap(err, "reading item bytes").WithClues(ictx)) + el.AddRecoverable(ctx, clues.Wrap(err, "reading item bytes").WithClues(ictx)) continue } @@ -174,7 +174,7 @@ func restoreCollection( info, err := ir.restore(ictx, body, userID, destinationID, errs) if err != nil { - el.AddRecoverable(err) + el.AddRecoverable(ctx, err) continue } @@ -185,7 +185,7 @@ func restoreCollection( // destination folder, then the restore path no longer matches the fullPath. itemPath, err := fullPath.AppendItem(itemData.UUID()) if err != nil { - el.AddRecoverable(clues.Wrap(err, "adding item to collection path").WithClues(ctx)) + el.AddRecoverable(ctx, clues.Wrap(err, "adding item to collection path").WithClues(ctx)) continue } @@ -343,7 +343,7 @@ func uploadAttachments( continue } - el.AddRecoverable(clues.Wrap(err, "uploading mail attachment").WithClues(ctx)) + el.AddRecoverable(ctx, clues.Wrap(err, "uploading mail attachment").WithClues(ctx)) } } diff --git a/src/internal/m365/graph/collections.go b/src/internal/m365/graph/collections.go index 24a8138f3..57c206490 100644 --- a/src/internal/m365/graph/collections.go +++ b/src/internal/m365/graph/collections.go @@ -83,7 +83,7 @@ func BaseCollections( if err != nil { // Shouldn't happen. err = clues.Wrap(err, "making path").WithClues(ictx) - el.AddRecoverable(err) + el.AddRecoverable(ctx, err) lastErr = err continue diff --git a/src/internal/m365/onedrive/backup.go b/src/internal/m365/onedrive/backup.go index 06aab38ae..f1a47e9e1 100644 --- a/src/internal/m365/onedrive/backup.go +++ b/src/internal/m365/onedrive/backup.go @@ -77,7 +77,7 @@ func ProduceBackupCollections( odcs, canUsePreviousBackup, err = nc.Get(ctx, metadata, ssmb, errs) if err != nil { - el.AddRecoverable(clues.Stack(err).Label(fault.LabelForceNoBackupCreation)) + el.AddRecoverable(ctx, clues.Stack(err).Label(fault.LabelForceNoBackupCreation)) } categories[scope.Category().PathType()] = struct{}{} diff --git a/src/internal/m365/onedrive/collection.go b/src/internal/m365/onedrive/collection.go index 3e9fe5157..afeb0bcb0 100644 --- a/src/internal/m365/onedrive/collection.go +++ b/src/internal/m365/onedrive/collection.go @@ -271,14 +271,14 @@ func (oc *Collection) getDriveItemContent( if err != nil { if clues.HasLabel(err, graph.LabelsMalware) || (item != nil && item.GetMalware() != nil) { logger.CtxErr(ctx, err).With("skipped_reason", fault.SkipMalware).Info("item flagged as malware") - el.AddSkip(fault.FileSkip(fault.SkipMalware, driveID, itemID, itemName, graph.ItemInfo(item))) + el.AddSkip(ctx, fault.FileSkip(fault.SkipMalware, driveID, itemID, itemName, graph.ItemInfo(item))) return nil, clues.Wrap(err, "malware item").Label(graph.LabelsSkippable) } if clues.HasLabel(err, graph.LabelStatus(http.StatusNotFound)) || graph.IsErrDeletedInFlight(err) { logger.CtxErr(ctx, err).With("skipped_reason", fault.SkipNotFound).Info("item not found") - el.AddSkip(fault.FileSkip(fault.SkipNotFound, driveID, itemID, itemName, graph.ItemInfo(item))) + el.AddSkip(ctx, fault.FileSkip(fault.SkipNotFound, driveID, itemID, itemName, graph.ItemInfo(item))) return nil, clues.Wrap(err, "deleted item").Label(graph.LabelsSkippable) } @@ -293,13 +293,13 @@ func (oc *Collection) getDriveItemContent( // restore, or we have to handle it separately by somehow // deleting the entire collection. logger.CtxErr(ctx, err).With("skipped_reason", fault.SkipBigOneNote).Info("max OneNote file size exceeded") - el.AddSkip(fault.FileSkip(fault.SkipBigOneNote, driveID, itemID, itemName, graph.ItemInfo(item))) + el.AddSkip(ctx, fault.FileSkip(fault.SkipBigOneNote, driveID, itemID, itemName, graph.ItemInfo(item))) return nil, clues.Wrap(err, "max oneNote item").Label(graph.LabelsSkippable) } logger.CtxErr(ctx, err).Error("downloading item") - el.AddRecoverable(clues.Stack(err).WithClues(ctx).Label(fault.LabelForceNoBackupCreation)) + el.AddRecoverable(ctx, clues.Stack(err).WithClues(ctx).Label(fault.LabelForceNoBackupCreation)) // return err, not el.Err(), because the lazy reader needs to communicate to // the data consumer that this item is unreadable, regardless of the fault state. @@ -431,7 +431,7 @@ func (oc *Collection) populateItems(ctx context.Context, errs *fault.Bus) { // Fetch metadata for the file itemMeta, itemMetaSize, err = downloadItemMeta(ctx, oc.handler, oc.driveID, item) if err != nil { - el.AddRecoverable(clues.Wrap(err, "getting item metadata").Label(fault.LabelForceNoBackupCreation)) + el.AddRecoverable(ctx, clues.Wrap(err, "getting item metadata").Label(fault.LabelForceNoBackupCreation)) return } diff --git a/src/internal/m365/onedrive/collections.go b/src/internal/m365/onedrive/collections.go index d05b06820..24371b22c 100644 --- a/src/internal/m365/onedrive/collections.go +++ b/src/internal/m365/onedrive/collections.go @@ -663,7 +663,7 @@ func (c *Collections) UpdateCollections( skip = fault.ContainerSkip(fault.SkipMalware, driveID, itemID, itemName, addtl) } - errs.AddSkip(skip) + errs.AddSkip(ctx, skip) logger.Ctx(ctx).Infow("malware detected", "item_details", addtl) continue @@ -689,7 +689,7 @@ func (c *Collections) UpdateCollections( collectionPath, err := c.getCollectionPath(driveID, item) if err != nil { - el.AddRecoverable(clues.Stack(err). + el.AddRecoverable(ctx, clues.Stack(err). WithClues(ictx). Label(fault.LabelForceNoBackupCreation)) @@ -711,7 +711,7 @@ func (c *Collections) UpdateCollections( if ok { prevPath, err = path.FromDataLayerPath(prevPathStr, false) if err != nil { - el.AddRecoverable(clues.Wrap(err, "invalid previous path"). + el.AddRecoverable(ctx, clues.Wrap(err, "invalid previous path"). WithClues(ictx). With("path_string", prevPathStr)) } diff --git a/src/internal/m365/onedrive/collections_test.go b/src/internal/m365/onedrive/collections_test.go index 2888bc149..5a5805179 100644 --- a/src/internal/m365/onedrive/collections_test.go +++ b/src/internal/m365/onedrive/collections_test.go @@ -1157,7 +1157,7 @@ func (f failingColl) Items(ctx context.Context, errs *fault.Bus) <-chan data.Str ic := make(chan data.Stream) defer close(ic) - errs.AddRecoverable(assert.AnError) + errs.AddRecoverable(ctx, assert.AnError) return ic } diff --git a/src/internal/m365/onedrive/drive.go b/src/internal/m365/onedrive/drive.go index 5a4fadf68..c1df2c539 100644 --- a/src/internal/m365/onedrive/drive.go +++ b/src/internal/m365/onedrive/drive.go @@ -238,7 +238,7 @@ func GetAllFolders( "", errs) if err != nil { - el.AddRecoverable(clues.Wrap(err, "enumerating items in drive")) + el.AddRecoverable(ctx, clues.Wrap(err, "enumerating items in drive")) } } diff --git a/src/internal/m365/onedrive/restore.go b/src/internal/m365/onedrive/restore.go index cdd6a9844..c1416a0a3 100644 --- a/src/internal/m365/onedrive/restore.go +++ b/src/internal/m365/onedrive/restore.go @@ -113,7 +113,7 @@ func RestoreCollections( opts.RestorePermissions, errs) if err != nil { - el.AddRecoverable(err) + el.AddRecoverable(ctx, err) } restoreMetrics = support.CombineMetrics(restoreMetrics, metrics) @@ -273,7 +273,7 @@ func ProduceRestoreCollection( itemPath, err := dc.FullPath().AppendItem(itemData.UUID()) if err != nil { - el.AddRecoverable(clues.Wrap(err, "appending item to full path").WithClues(ictx)) + el.AddRecoverable(ctx, clues.Wrap(err, "appending item to full path").WithClues(ictx)) return } @@ -297,7 +297,7 @@ func ProduceRestoreCollection( } if err != nil { - el.AddRecoverable(clues.Wrap(err, "restoring item")) + el.AddRecoverable(ctx, clues.Wrap(err, "restoring item")) return } diff --git a/src/internal/m365/sharepoint/api/pages.go b/src/internal/m365/sharepoint/api/pages.go index 0434ff0a5..581d5de10 100644 --- a/src/internal/m365/sharepoint/api/pages.go +++ b/src/internal/m365/sharepoint/api/pages.go @@ -70,7 +70,7 @@ func GetSitePages( page, err = serv.Client().SitesById(siteID).PagesById(pageID).Get(ctx, opts) if err != nil { - el.AddRecoverable(graph.Wrap(ctx, err, "fetching page")) + el.AddRecoverable(ctx, graph.Wrap(ctx, err, "fetching page")) return } diff --git a/src/internal/m365/sharepoint/backup.go b/src/internal/m365/sharepoint/backup.go index ba5c45c23..79765bfdb 100644 --- a/src/internal/m365/sharepoint/backup.go +++ b/src/internal/m365/sharepoint/backup.go @@ -80,7 +80,7 @@ func ProduceBackupCollections( ctrlOpts, errs) if err != nil { - el.AddRecoverable(err) + el.AddRecoverable(ctx, err) continue } @@ -101,7 +101,7 @@ func ProduceBackupCollections( ctrlOpts, errs) if err != nil { - el.AddRecoverable(err) + el.AddRecoverable(ctx, err) continue } @@ -115,7 +115,7 @@ func ProduceBackupCollections( ctrlOpts, errs) if err != nil { - el.AddRecoverable(err) + el.AddRecoverable(ctx, err) continue } @@ -184,7 +184,7 @@ func collectLists( false, tuple.name) if err != nil { - el.AddRecoverable(clues.Wrap(err, "creating list collection path").WithClues(ctx)) + el.AddRecoverable(ctx, clues.Wrap(err, "creating list collection path").WithClues(ctx)) } collection := NewCollection( @@ -284,7 +284,7 @@ func collectPages( false, tuple.Name) if err != nil { - el.AddRecoverable(clues.Wrap(err, "creating page collection path").WithClues(ctx)) + el.AddRecoverable(ctx, clues.Wrap(err, "creating page collection path").WithClues(ctx)) } collection := NewCollection( diff --git a/src/internal/m365/sharepoint/collection.go b/src/internal/m365/sharepoint/collection.go index 12db281a4..90af58cbf 100644 --- a/src/internal/m365/sharepoint/collection.go +++ b/src/internal/m365/sharepoint/collection.go @@ -239,7 +239,7 @@ func (sc *Collection) retrieveLists( byteArray, err := serializeContent(ctx, wtr, lst) if err != nil { - el.AddRecoverable(clues.Wrap(err, "serializing list").WithClues(ctx).Label(fault.LabelForceNoBackupCreation)) + el.AddRecoverable(ctx, clues.Wrap(err, "serializing list").WithClues(ctx).Label(fault.LabelForceNoBackupCreation)) continue } @@ -308,7 +308,7 @@ func (sc *Collection) retrievePages( byteArray, err := serializeContent(ctx, wtr, pg) if err != nil { - el.AddRecoverable(clues.Wrap(err, "serializing page").WithClues(ctx).Label(fault.LabelForceNoBackupCreation)) + el.AddRecoverable(ctx, clues.Wrap(err, "serializing page").WithClues(ctx).Label(fault.LabelForceNoBackupCreation)) continue } diff --git a/src/internal/m365/sharepoint/list.go b/src/internal/m365/sharepoint/list.go index 3532e029b..3dcaaa58f 100644 --- a/src/internal/m365/sharepoint/list.go +++ b/src/internal/m365/sharepoint/list.go @@ -130,13 +130,13 @@ func loadSiteLists( entry, err = gs.Client().Sites().BySiteId(siteID).Lists().ByListId(id).Get(ctx, nil) if err != nil { - el.AddRecoverable(graph.Wrap(ctx, err, "getting site list")) + el.AddRecoverable(ctx, graph.Wrap(ctx, err, "getting site list")) return } cols, cTypes, lItems, err := fetchListContents(ctx, gs, siteID, id, errs) if err != nil { - el.AddRecoverable(clues.Wrap(err, "getting list contents")) + el.AddRecoverable(ctx, clues.Wrap(err, "getting list contents")) return } @@ -220,7 +220,7 @@ func fetchListItems( fields, err := newPrefix.Fields().Get(ctx, nil) if err != nil { - el.AddRecoverable(graph.Wrap(ctx, err, "getting list fields")) + el.AddRecoverable(ctx, graph.Wrap(ctx, err, "getting list fields")) continue } @@ -336,7 +336,7 @@ func fetchContentTypes( links, err := fetchColumnLinks(ctx, gs, siteID, listID, id) if err != nil { - el.AddRecoverable(err) + el.AddRecoverable(ctx, err) continue } @@ -344,7 +344,7 @@ func fetchContentTypes( cs, err := fetchColumns(ctx, gs, siteID, listID, id) if err != nil { - el.AddRecoverable(err) + el.AddRecoverable(ctx, err) continue } diff --git a/src/internal/m365/sharepoint/restore.go b/src/internal/m365/sharepoint/restore.go index 5bf53a4d6..504f9a06a 100644 --- a/src/internal/m365/sharepoint/restore.go +++ b/src/internal/m365/sharepoint/restore.go @@ -101,7 +101,7 @@ func ConsumeRestoreCollections( restoreMetrics = support.CombineMetrics(restoreMetrics, metrics) if err != nil { - el.AddRecoverable(err) + el.AddRecoverable(ctx, err) } if errors.Is(err, context.Canceled) { @@ -238,7 +238,7 @@ func RestoreListCollection( siteID, restoreContainerName) if err != nil { - el.AddRecoverable(err) + el.AddRecoverable(ctx, err) continue } @@ -246,7 +246,7 @@ func RestoreListCollection( itemPath, err := dc.FullPath().AppendItem(itemData.UUID()) if err != nil { - el.AddRecoverable(clues.Wrap(err, "appending item to full path").WithClues(ctx)) + el.AddRecoverable(ctx, clues.Wrap(err, "appending item to full path").WithClues(ctx)) continue } @@ -318,7 +318,7 @@ func RestorePageCollection( siteID, restoreContainerName) if err != nil { - el.AddRecoverable(err) + el.AddRecoverable(ctx, err) continue } @@ -326,7 +326,7 @@ func RestorePageCollection( itemPath, err := dc.FullPath().AppendItem(itemData.UUID()) if err != nil { - el.AddRecoverable(clues.Wrap(err, "appending item to full path").WithClues(ctx)) + el.AddRecoverable(ctx, clues.Wrap(err, "appending item to full path").WithClues(ctx)) continue } diff --git a/src/internal/operations/helpers_test.go b/src/internal/operations/helpers_test.go index 6afe7329b..339b1703e 100644 --- a/src/internal/operations/helpers_test.go +++ b/src/internal/operations/helpers_test.go @@ -1,6 +1,7 @@ package operations import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -22,13 +23,13 @@ func TestHelpersUnitSuite(t *testing.T) { func (suite *HelpersUnitSuite) TestFinalizeErrorHandling() { table := []struct { name string - errs func() *fault.Bus + errs func(context.Context) *fault.Bus opts control.Options expectErr assert.ErrorAssertionFunc }{ { name: "no errors", - errs: func() *fault.Bus { + errs: func(ctx context.Context) *fault.Bus { return fault.New(false) }, opts: control.Options{ @@ -38,7 +39,7 @@ func (suite *HelpersUnitSuite) TestFinalizeErrorHandling() { }, { name: "already failed", - errs: func() *fault.Bus { + errs: func(ctx context.Context) *fault.Bus { fn := fault.New(false) fn.Fail(assert.AnError) return fn @@ -50,9 +51,9 @@ func (suite *HelpersUnitSuite) TestFinalizeErrorHandling() { }, { name: "best effort", - errs: func() *fault.Bus { + errs: func(ctx context.Context) *fault.Bus { fn := fault.New(false) - fn.AddRecoverable(assert.AnError) + fn.AddRecoverable(ctx, assert.AnError) return fn }, opts: control.Options{ @@ -62,9 +63,9 @@ func (suite *HelpersUnitSuite) TestFinalizeErrorHandling() { }, { name: "recoverable errors produce hard fail", - errs: func() *fault.Bus { + errs: func(ctx context.Context) *fault.Bus { fn := fault.New(false) - fn.AddRecoverable(assert.AnError) + fn.AddRecoverable(ctx, assert.AnError) return fn }, opts: control.Options{ @@ -74,11 +75,11 @@ func (suite *HelpersUnitSuite) TestFinalizeErrorHandling() { }, { name: "multiple recoverable errors produce hard fail", - errs: func() *fault.Bus { + errs: func(ctx context.Context) *fault.Bus { fn := fault.New(false) - fn.AddRecoverable(assert.AnError) - fn.AddRecoverable(assert.AnError) - fn.AddRecoverable(assert.AnError) + fn.AddRecoverable(ctx, assert.AnError) + fn.AddRecoverable(ctx, assert.AnError) + fn.AddRecoverable(ctx, assert.AnError) return fn }, opts: control.Options{ @@ -94,7 +95,7 @@ func (suite *HelpersUnitSuite) TestFinalizeErrorHandling() { ctx, flush := tester.NewContext(t) defer flush() - errs := test.errs() + errs := test.errs(ctx) finalizeErrorHandling(ctx, test.opts, errs, "test") test.expectErr(t, errs.Failure()) diff --git a/src/internal/operations/pathtransformer/restore_path_transformer.go b/src/internal/operations/pathtransformer/restore_path_transformer.go index 8993328f3..0225d81ea 100644 --- a/src/internal/operations/pathtransformer/restore_path_transformer.go +++ b/src/internal/operations/pathtransformer/restore_path_transformer.go @@ -168,7 +168,7 @@ func GetPaths( restorePaths, err := makeRestorePathsForEntry(ctx, backupVersion, ent) if err != nil { - el.AddRecoverable(clues.Wrap(err, "getting restore paths")) + el.AddRecoverable(ctx, clues.Wrap(err, "getting restore paths")) continue } diff --git a/src/internal/streamstore/collectables_test.go b/src/internal/streamstore/collectables_test.go index 6b8b9d395..d9c827408 100644 --- a/src/internal/streamstore/collectables_test.go +++ b/src/internal/streamstore/collectables_test.go @@ -1,6 +1,7 @@ package streamstore import ( + "context" "testing" "github.com/alcionai/clues" @@ -73,13 +74,13 @@ func (suite *StreamStoreIntgSuite) TestStreamer() { table := []struct { name string deets func(*testing.T) *details.Details - errs func() *fault.Errors + errs func(context.Context) *fault.Errors hasSnapID assert.ValueAssertionFunc }{ { name: "none", deets: func(*testing.T) *details.Details { return nil }, - errs: func() *fault.Errors { return nil }, + errs: func(context.Context) *fault.Errors { return nil }, hasSnapID: assert.Empty, }, { @@ -98,18 +99,20 @@ func (suite *StreamStoreIntgSuite) TestStreamer() { })) return deetsBuilder.Details() }, - errs: func() *fault.Errors { return nil }, + errs: func(context.Context) *fault.Errors { return nil }, hasSnapID: assert.NotEmpty, }, { name: "errors", deets: func(*testing.T) *details.Details { return nil }, - errs: func() *fault.Errors { + errs: func(ctx context.Context) *fault.Errors { bus := fault.New(false) bus.Fail(clues.New("foo")) - bus.AddRecoverable(clues.New("bar")) - bus.AddRecoverable(fault.FileErr(clues.New("file"), "ns", "file-id", "file-name", map[string]any{"foo": "bar"})) - bus.AddSkip(fault.FileSkip(fault.SkipMalware, "ns", "file-id", "file-name", map[string]any{"foo": "bar"})) + bus.AddRecoverable(ctx, clues.New("bar")) + bus.AddRecoverable( + ctx, + fault.FileErr(clues.New("file"), "ns", "file-id", "file-name", map[string]any{"foo": "bar"})) + bus.AddSkip(ctx, fault.FileSkip(fault.SkipMalware, "ns", "file-id", "file-name", map[string]any{"foo": "bar"})) fe := bus.Errors() return fe @@ -133,12 +136,14 @@ func (suite *StreamStoreIntgSuite) TestStreamer() { return deetsBuilder.Details() }, - errs: func() *fault.Errors { + errs: func(ctx context.Context) *fault.Errors { bus := fault.New(false) bus.Fail(clues.New("foo")) - bus.AddRecoverable(clues.New("bar")) - bus.AddRecoverable(fault.FileErr(clues.New("file"), "ns", "file-id", "file-name", map[string]any{"foo": "bar"})) - bus.AddSkip(fault.FileSkip(fault.SkipMalware, "ns", "file-id", "file-name", map[string]any{"foo": "bar"})) + bus.AddRecoverable(ctx, clues.New("bar")) + bus.AddRecoverable( + ctx, + fault.FileErr(clues.New("file"), "ns", "file-id", "file-name", map[string]any{"foo": "bar"})) + bus.AddSkip(ctx, fault.FileSkip(fault.SkipMalware, "ns", "file-id", "file-name", map[string]any{"foo": "bar"})) fe := bus.Errors() return fe @@ -164,7 +169,7 @@ func (suite *StreamStoreIntgSuite) TestStreamer() { require.NoError(t, err) } - errs := test.errs() + errs := test.errs(ctx) if errs != nil { err = ss.Collect(ctx, FaultErrorsCollector(errs)) require.NoError(t, err) diff --git a/src/pkg/fault/example_fault_test.go b/src/pkg/fault/example_fault_test.go index c830a9aa9..55a30295c 100644 --- a/src/pkg/fault/example_fault_test.go +++ b/src/pkg/fault/example_fault_test.go @@ -1,6 +1,7 @@ package fault_test import ( + "context" "fmt" "github.com/alcionai/clues" @@ -15,6 +16,8 @@ import ( var ( ctrl any items = []string{} + //nolint:forbidigo + ctx = context.Background() ) type mockController struct { @@ -133,7 +136,7 @@ func ExampleBus_AddRecoverable() { // to aggregate the error using fault. // Side note: technically, you should use a local bus // here (see below) instead of errs. - errs.AddRecoverable(err) + errs.AddRecoverable(ctx, err) } } @@ -150,7 +153,7 @@ func ExampleBus_AddRecoverable() { } if err := getIthItem(i); err != nil { - errs.AddRecoverable(err) + errs.AddRecoverable(ctx, err) } } } @@ -175,13 +178,13 @@ func ExampleBus_Failure() { // If Failure() is nil, then you can assume the operation completed. // A complete operation is not necessarily an error-free operation. - // Recoverable errors may still have been added using AddRecoverable(err). + // Recoverable errors may still have been added using AddRecoverable(ctx, err). // Make sure you check both. // If failFast is set to true, then the first recoerable error Added gets // promoted to the Err() position. errs = fault.New(true) - errs.AddRecoverable(clues.New("not catastrophic, but still becomes the Failure()")) + errs.AddRecoverable(ctx, clues.New("not catastrophic, but still becomes the Failure()")) err = errs.Failure() fmt.Println(err) @@ -194,8 +197,8 @@ func ExampleBus_Failure() { // recover from and continue. func ExampleErrors_Recovered() { errs := fault.New(false) - errs.AddRecoverable(clues.New("not catastrophic")) - errs.AddRecoverable(clues.New("something unwanted")) + errs.AddRecoverable(ctx, clues.New("not catastrophic")) + errs.AddRecoverable(ctx, clues.New("something unwanted")) // Recovered() gets the slice of all recoverable errors added during // the run, but which did not cause a failure. @@ -247,12 +250,12 @@ func ExampleBus_Local() { } if err := getIthItem(i); err != nil { - // instead of calling errs.AddRecoverable(err), we call the + // instead of calling errs.AddRecoverable(ctx, err), we call the // local bus's Add method. The error will still get // added to the errs.Recovered() set. But if this err // causes the run to fail, only this local bus treats // it as the causal failure. - el.AddRecoverable(err) + el.AddRecoverable(ctx, err) } } @@ -330,7 +333,7 @@ func Example_e2e() { if err := storer(d); err != nil { // Since we're at the top of the iteration, we need // to add each error to the fault.localBus struct. - el.AddRecoverable(err) + el.AddRecoverable(ctx, err) } } @@ -383,7 +386,7 @@ func ExampleErrors_Failure_return() { } if err := dependency.do(); err != nil { - errs.AddRecoverable(clues.Wrap(err, "recoverable")) + errs.AddRecoverable(ctx, clues.Wrap(err, "recoverable")) } } @@ -426,7 +429,7 @@ func ExampleBus_AddSkip() { // over, instead of error out. An initial case is when Graph API identifies // a file as containing malware. We can't download the file: it'll always // error. Our only option is to skip it. - errs.AddSkip(fault.FileSkip( + errs.AddSkip(ctx, fault.FileSkip( fault.SkipMalware, "deduplication-namespace", "file-id", diff --git a/src/pkg/fault/fault.go b/src/pkg/fault/fault.go index 041e3d4e2..d69569edc 100644 --- a/src/pkg/fault/fault.go +++ b/src/pkg/fault/fault.go @@ -12,6 +12,7 @@ import ( "golang.org/x/exp/slices" "github.com/alcionai/corso/src/cli/print" + "github.com/alcionai/corso/src/pkg/logger" ) type Bus struct { @@ -118,27 +119,45 @@ func (e *Bus) setFailure(err error) *Bus { // // TODO: nil return, not Bus, since we don't want people to return // from errors.AddRecoverable(). -func (e *Bus) AddRecoverable(err error) *Bus { +func (e *Bus) AddRecoverable(ctx context.Context, err error) { if err == nil { - return e + return } e.mu.Lock() defer e.mu.Unlock() - return e.addRecoverableErr(err) + e.logAndAddRecoverable(ctx, err, 1) +} + +// logs the error and adds it to the bus. If the error is a failure, +// it gets logged at an Error level. Otherwise logs an Info. +func (e *Bus) logAndAddRecoverable(ctx context.Context, err error, skip int) { + log := logger.CtxErrStack(ctx, err, skip+1) + isFail := e.addRecoverableErr(err) + + if isFail { + log.Error("recoverable error") + } else { + log.Info("recoverable error") + } } // addErr handles adding errors to errors.errs. Sync locking -// gets handled upstream of this call. -func (e *Bus) addRecoverableErr(err error) *Bus { +// gets handled upstream of this call. Returns true if the +// error is a failure, false otherwise. +func (e *Bus) addRecoverableErr(err error) bool { + var isFail bool + if e.failure == nil && e.failFast { e.setFailure(err) + + isFail = true } e.recoverable = append(e.recoverable, err) - return e + return isFail } // AddSkip appends a record of a Skipped item to the fault bus. @@ -151,15 +170,23 @@ func (e *Bus) addRecoverableErr(err error) *Bus { // 2. Skipping avoids a permanent and consistent failure. If // the underlying reason is transient or otherwise recoverable, // the item should not be skipped. -func (e *Bus) AddSkip(s *Skipped) *Bus { +func (e *Bus) AddSkip(ctx context.Context, s *Skipped) { if s == nil { - return e + return } e.mu.Lock() defer e.mu.Unlock() - return e.addSkip(s) + e.logAndAddSkip(ctx, s, 1) +} + +// logs the error and adds a skipped item. +func (e *Bus) logAndAddSkip(ctx context.Context, s *Skipped, skip int) { + logger.CtxStack(ctx, skip+1). + With("skipped", s). + Info("recoverable error") + e.addSkip(s) } func (e *Bus) addSkip(s *Skipped) *Bus { @@ -344,7 +371,7 @@ type localBus struct { current error } -func (e *localBus) AddRecoverable(err error) { +func (e *localBus) AddRecoverable(ctx context.Context, err error) { if err == nil { return } @@ -356,7 +383,7 @@ func (e *localBus) AddRecoverable(err error) { e.current = err } - e.bus.AddRecoverable(err) + e.bus.logAndAddRecoverable(ctx, err, 1) } // AddSkip appends a record of a Skipped item to the local bus. @@ -369,7 +396,7 @@ func (e *localBus) AddRecoverable(err error) { // 2. Skipping avoids a permanent and consistent failure. If // the underlying reason is transient or otherwise recoverable, // the item should not be skipped. -func (e *localBus) AddSkip(s *Skipped) { +func (e *localBus) AddSkip(ctx context.Context, s *Skipped) { if s == nil { return } @@ -377,7 +404,7 @@ func (e *localBus) AddSkip(s *Skipped) { e.mu.Lock() defer e.mu.Unlock() - e.bus.AddSkip(s) + e.bus.logAndAddSkip(ctx, s, 1) } // Failure returns the failure that happened within the local bus. diff --git a/src/pkg/fault/fault_test.go b/src/pkg/fault/fault_test.go index 4d731ede1..c4166456b 100644 --- a/src/pkg/fault/fault_test.go +++ b/src/pkg/fault/fault_test.go @@ -1,6 +1,7 @@ package fault_test import ( + "context" "encoding/json" "testing" @@ -75,6 +76,9 @@ func (suite *FaultErrorsUnitSuite) TestErr() { suite.Run(test.name, func() { t := suite.T() + ctx, flush := tester.NewContext(t) + defer flush() + n := fault.New(test.failFast) require.NotNil(t, n) require.NoError(t, n.Failure(), clues.ToCore(n.Failure())) @@ -83,8 +87,7 @@ func (suite *FaultErrorsUnitSuite) TestErr() { e := n.Fail(test.fail) require.NotNil(t, e) - e = n.AddRecoverable(test.add) - require.NotNil(t, e) + n.AddRecoverable(ctx, test.add) test.expect(t, n.Failure()) }) @@ -152,14 +155,16 @@ func (suite *FaultErrorsUnitSuite) TestErrs() { suite.Run(test.name, func() { t := suite.T() + ctx, flush := tester.NewContext(t) + defer flush() + n := fault.New(test.failFast) require.NotNil(t, n) e := n.Fail(test.fail) require.NotNil(t, e) - e = n.AddRecoverable(test.add) - require.NotNil(t, e) + n.AddRecoverable(ctx, test.add) test.expect(t, n.Recovered()) }) @@ -169,14 +174,17 @@ func (suite *FaultErrorsUnitSuite) TestErrs() { func (suite *FaultErrorsUnitSuite) TestAdd() { t := suite.T() + ctx, flush := tester.NewContext(t) + defer flush() + n := fault.New(true) require.NotNil(t, n) - n.AddRecoverable(assert.AnError) + n.AddRecoverable(ctx, assert.AnError) assert.Error(t, n.Failure()) assert.Len(t, n.Recovered(), 1) - n.AddRecoverable(assert.AnError) + n.AddRecoverable(ctx, assert.AnError) assert.Error(t, n.Failure()) assert.Len(t, n.Recovered(), 2) } @@ -184,29 +192,35 @@ func (suite *FaultErrorsUnitSuite) TestAdd() { func (suite *FaultErrorsUnitSuite) TestAddSkip() { t := suite.T() + ctx, flush := tester.NewContext(t) + defer flush() + n := fault.New(true) require.NotNil(t, n) n.Fail(assert.AnError) assert.Len(t, n.Skipped(), 0) - n.AddRecoverable(assert.AnError) + n.AddRecoverable(ctx, assert.AnError) assert.Len(t, n.Skipped(), 0) - n.AddSkip(fault.OwnerSkip(fault.SkipMalware, "ns", "id", "name", nil)) + n.AddSkip(ctx, fault.OwnerSkip(fault.SkipMalware, "ns", "id", "name", nil)) assert.Len(t, n.Skipped(), 1) } func (suite *FaultErrorsUnitSuite) TestErrors() { t := suite.T() + ctx, flush := tester.NewContext(t) + defer flush() + // not fail-fast n := fault.New(false) require.NotNil(t, n) n.Fail(clues.New("fail")) - n.AddRecoverable(clues.New("1")) - n.AddRecoverable(clues.New("2")) + n.AddRecoverable(ctx, clues.New("1")) + n.AddRecoverable(ctx, clues.New("2")) d := n.Errors() assert.Equal(t, clues.ToCore(n.Failure()), d.Failure) @@ -218,8 +232,8 @@ func (suite *FaultErrorsUnitSuite) TestErrors() { require.NotNil(t, n) n.Fail(clues.New("fail")) - n.AddRecoverable(clues.New("1")) - n.AddRecoverable(clues.New("2")) + n.AddRecoverable(ctx, clues.New("1")) + n.AddRecoverable(ctx, clues.New("2")) d = n.Errors() assert.Equal(t, clues.ToCore(n.Failure()), d.Failure) @@ -234,13 +248,13 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() { table := []struct { name string - errs func() *fault.Errors + errs func(context.Context) *fault.Errors expectItems []fault.Item expectRecoverable []*clues.ErrCore }{ { name: "no errors", - errs: func() *fault.Errors { + errs: func(ctx context.Context) *fault.Errors { return fault.New(false).Errors() }, expectItems: []fault.Item{}, @@ -248,10 +262,10 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() { }, { name: "no items", - errs: func() *fault.Errors { + errs: func(ctx context.Context) *fault.Errors { b := fault.New(false) b.Fail(ae) - b.AddRecoverable(ae) + b.AddRecoverable(ctx, ae) return b.Errors() }, @@ -260,10 +274,10 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() { }, { name: "failure item", - errs: func() *fault.Errors { + errs: func(ctx context.Context) *fault.Errors { b := fault.New(false) b.Fail(fault.OwnerErr(ae, "ns", "id", "name", addtl)) - b.AddRecoverable(ae) + b.AddRecoverable(ctx, ae) return b.Errors() }, @@ -272,10 +286,10 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() { }, { name: "recoverable item", - errs: func() *fault.Errors { + errs: func(ctx context.Context) *fault.Errors { b := fault.New(false) b.Fail(ae) - b.AddRecoverable(fault.OwnerErr(ae, "ns", "id", "name", addtl)) + b.AddRecoverable(ctx, fault.OwnerErr(ae, "ns", "id", "name", addtl)) return b.Errors() }, @@ -284,10 +298,10 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() { }, { name: "two items", - errs: func() *fault.Errors { + errs: func(ctx context.Context) *fault.Errors { b := fault.New(false) b.Fail(fault.OwnerErr(ae, "ns", "oid", "name", addtl)) - b.AddRecoverable(fault.FileErr(ae, "ns", "fid", "name", addtl)) + b.AddRecoverable(ctx, fault.FileErr(ae, "ns", "fid", "name", addtl)) return b.Errors() }, @@ -299,10 +313,10 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() { }, { name: "two items - diff namespace same id", - errs: func() *fault.Errors { + errs: func(ctx context.Context) *fault.Errors { b := fault.New(false) b.Fail(fault.OwnerErr(ae, "ns", "id", "name", addtl)) - b.AddRecoverable(fault.FileErr(ae, "ns2", "id", "name", addtl)) + b.AddRecoverable(ctx, fault.FileErr(ae, "ns2", "id", "name", addtl)) return b.Errors() }, @@ -314,10 +328,10 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() { }, { name: "duplicate items - failure priority", - errs: func() *fault.Errors { + errs: func(ctx context.Context) *fault.Errors { b := fault.New(false) b.Fail(fault.OwnerErr(ae, "ns", "id", "name", addtl)) - b.AddRecoverable(fault.FileErr(ae, "ns", "id", "name", addtl)) + b.AddRecoverable(ctx, fault.FileErr(ae, "ns", "id", "name", addtl)) return b.Errors() }, @@ -328,11 +342,11 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() { }, { name: "duplicate items - last recoverable priority", - errs: func() *fault.Errors { + errs: func(ctx context.Context) *fault.Errors { b := fault.New(false) b.Fail(ae) - b.AddRecoverable(fault.FileErr(ae, "ns", "fid", "name", addtl)) - b.AddRecoverable(fault.FileErr(ae, "ns", "fid", "name2", addtl)) + b.AddRecoverable(ctx, fault.FileErr(ae, "ns", "fid", "name", addtl)) + b.AddRecoverable(ctx, fault.FileErr(ae, "ns", "fid", "name2", addtl)) return b.Errors() }, @@ -343,11 +357,11 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() { }, { name: "recoverable item and non-items", - errs: func() *fault.Errors { + errs: func(ctx context.Context) *fault.Errors { b := fault.New(false) b.Fail(ae) - b.AddRecoverable(fault.FileErr(ae, "ns", "fid", "name", addtl)) - b.AddRecoverable(ae) + b.AddRecoverable(ctx, fault.FileErr(ae, "ns", "fid", "name", addtl)) + b.AddRecoverable(ctx, ae) return b.Errors() }, @@ -360,7 +374,11 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() { for _, test := range table { suite.Run(test.name, func() { t := suite.T() - fe := test.errs() + + ctx, flush := tester.NewContext(t) + defer flush() + + fe := test.errs(ctx) assert.ElementsMatch(t, test.expectItems, fe.Items) require.Equal(t, test.expectRecoverable, fe.Recovered) @@ -378,12 +396,15 @@ func (suite *FaultErrorsUnitSuite) TestErrors_Items() { func (suite *FaultErrorsUnitSuite) TestMarshalUnmarshal() { t := suite.T() + ctx, flush := tester.NewContext(t) + defer flush() + // not fail-fast n := fault.New(false) require.NotNil(t, n) - n.AddRecoverable(clues.New("1")) - n.AddRecoverable(clues.New("2")) + n.AddRecoverable(ctx, clues.New("1")) + n.AddRecoverable(ctx, clues.New("2")) bs, err := json.Marshal(n.Errors()) require.NoError(t, err, clues.ToCore(err)) @@ -419,13 +440,16 @@ func (suite *FaultErrorsUnitSuite) TestUnmarshalLegacy() { func (suite *FaultErrorsUnitSuite) TestTracker() { t := suite.T() + ctx, flush := tester.NewContext(t) + defer flush() + eb := fault.New(false) lb := eb.Local() assert.NoError(t, lb.Failure(), clues.ToCore(lb.Failure())) assert.Empty(t, eb.Recovered()) - lb.AddRecoverable(assert.AnError) + lb.AddRecoverable(ctx, assert.AnError) assert.NoError(t, lb.Failure(), clues.ToCore(lb.Failure())) assert.NoError(t, eb.Failure(), clues.ToCore(eb.Failure())) assert.NotEmpty(t, eb.Recovered()) @@ -436,7 +460,7 @@ func (suite *FaultErrorsUnitSuite) TestTracker() { assert.NoError(t, lbt.Failure(), clues.ToCore(lbt.Failure())) assert.Empty(t, ebt.Recovered()) - lbt.AddRecoverable(assert.AnError) + lbt.AddRecoverable(ctx, assert.AnError) assert.Error(t, lbt.Failure()) assert.Error(t, ebt.Failure()) assert.NotEmpty(t, ebt.Recovered()) diff --git a/src/pkg/logger/logger.go b/src/pkg/logger/logger.go index 48c23e5af..fb4d37e4b 100644 --- a/src/pkg/logger/logger.go +++ b/src/pkg/logger/logger.go @@ -437,6 +437,14 @@ func Ctx(ctx context.Context) *zap.SugaredLogger { return l.(*zap.SugaredLogger).With(clues.In(ctx).Slice()...) } +// CtxStack retrieves the logger embedded in the context, and adds the +// stacktrace to the log info. +// If skip is non-zero, it skips the stack calls starting from the +// first. Skip always adds +1 to account for this wrapper. +func CtxStack(ctx context.Context, skip int) *zap.SugaredLogger { + return Ctx(ctx).With(zap.StackSkip("trace", skip+1)) +} + // CtxErr retrieves the logger embedded in the context // and packs all of the structured data in the error inside it. func CtxErr(ctx context.Context, err error) *zap.SugaredLogger { @@ -447,6 +455,19 @@ func CtxErr(ctx context.Context, err error) *zap.SugaredLogger { With(clues.InErr(err).Slice()...) } +// CtxErrStack retrieves the logger embedded in the context +// and packs all of the structured data in the error inside it. +// If skip is non-zero, it skips the stack calls starting from the +// first. Skip always adds +1 to account for this wrapper. +func CtxErrStack(ctx context.Context, err error, skip int) *zap.SugaredLogger { + return Ctx(ctx). + With( + "error", err, + "error_labels", clues.Labels(err)). + With(zap.StackSkip("trace", skip+1)). + With(clues.InErr(err).Slice()...) +} + // Flush writes out all buffered logs. func Flush(ctx context.Context) { _ = Ctx(ctx).Sync() diff --git a/src/pkg/repository/repository.go b/src/pkg/repository/repository.go index b58cf3eac..c8e30829a 100644 --- a/src/pkg/repository/repository.go +++ b/src/pkg/repository/repository.go @@ -402,7 +402,7 @@ func (r repository) Backups(ctx context.Context, ids []string) ([]*backup.Backup b, err := sw.GetBackup(ictx, model.StableID(id)) if err != nil { - errs.AddRecoverable(errWrapper(err)) + errs.AddRecoverable(ctx, errWrapper(err)) } bups = append(bups, b) diff --git a/src/pkg/selectors/scopes.go b/src/pkg/selectors/scopes.go index f0c5fb4da..2a860892b 100644 --- a/src/pkg/selectors/scopes.go +++ b/src/pkg/selectors/scopes.go @@ -368,7 +368,7 @@ func reduce[T scopeT, C categoryT]( repoPath, err := path.FromDataLayerPath(ent.RepoRef, true) if err != nil { - el.AddRecoverable(clues.Wrap(err, "transforming repoRef to path").WithClues(ictx)) + el.AddRecoverable(ctx, clues.Wrap(err, "transforming repoRef to path").WithClues(ictx)) continue } @@ -391,7 +391,7 @@ func reduce[T scopeT, C categoryT]( pv, err := dc.pathValues(repoPath, *ent, s.Cfg) if err != nil { - el.AddRecoverable(clues.Wrap(err, "getting path values").WithClues(ictx)) + el.AddRecoverable(ctx, clues.Wrap(err, "getting path values").WithClues(ictx)) continue } diff --git a/src/pkg/services/m365/api/contacts.go b/src/pkg/services/m365/api/contacts.go index fa52d8312..9d2c253d5 100644 --- a/src/pkg/services/m365/api/contacts.go +++ b/src/pkg/services/m365/api/contacts.go @@ -183,7 +183,7 @@ func (c Contacts) EnumerateContainers( } if err := graph.CheckIDNameAndParentFolderID(fold); err != nil { - errs.AddRecoverable(graph.Stack(ctx, err).Label(fault.LabelForceNoBackupCreation)) + errs.AddRecoverable(ctx, graph.Stack(ctx, err).Label(fault.LabelForceNoBackupCreation)) continue } @@ -194,7 +194,7 @@ func (c Contacts) EnumerateContainers( temp := graph.NewCacheFolder(fold, nil, nil) if err := fn(&temp); err != nil { - errs.AddRecoverable(graph.Stack(fctx, err).Label(fault.LabelForceNoBackupCreation)) + errs.AddRecoverable(ctx, graph.Stack(fctx, err).Label(fault.LabelForceNoBackupCreation)) continue } } diff --git a/src/pkg/services/m365/api/events.go b/src/pkg/services/m365/api/events.go index 8f35481e2..37f40107b 100644 --- a/src/pkg/services/m365/api/events.go +++ b/src/pkg/services/m365/api/events.go @@ -236,7 +236,7 @@ func (c Events) EnumerateContainers( cd := CalendarDisplayable{Calendarable: cal} if err := graph.CheckIDAndName(cd); err != nil { - errs.AddRecoverable(graph.Stack(ctx, err).Label(fault.LabelForceNoBackupCreation)) + errs.AddRecoverable(ctx, graph.Stack(ctx, err).Label(fault.LabelForceNoBackupCreation)) continue } @@ -250,7 +250,7 @@ func (c Events) EnumerateContainers( path.Builder{}.Append(ptr.Val(cd.GetId())), // storage path path.Builder{}.Append(ptr.Val(cd.GetDisplayName()))) // display location if err := fn(&temp); err != nil { - errs.AddRecoverable(graph.Stack(fctx, err).Label(fault.LabelForceNoBackupCreation)) + errs.AddRecoverable(ctx, graph.Stack(fctx, err).Label(fault.LabelForceNoBackupCreation)) continue } } diff --git a/src/pkg/services/m365/api/mail.go b/src/pkg/services/m365/api/mail.go index 8c7ca138c..441198c2a 100644 --- a/src/pkg/services/m365/api/mail.go +++ b/src/pkg/services/m365/api/mail.go @@ -264,7 +264,7 @@ func (c Mail) EnumerateContainers( } if err := graph.CheckIDNameAndParentFolderID(fold); err != nil { - errs.AddRecoverable(graph.Stack(ctx, err).Label(fault.LabelForceNoBackupCreation)) + errs.AddRecoverable(ctx, graph.Stack(ctx, err).Label(fault.LabelForceNoBackupCreation)) continue } @@ -275,7 +275,7 @@ func (c Mail) EnumerateContainers( temp := graph.NewCacheFolder(fold, nil, nil) if err := fn(&temp); err != nil { - errs.AddRecoverable(graph.Stack(fctx, err).Label(fault.LabelForceNoBackupCreation)) + errs.AddRecoverable(ctx, graph.Stack(fctx, err).Label(fault.LabelForceNoBackupCreation)) continue } } diff --git a/src/pkg/services/m365/api/sites.go b/src/pkg/services/m365/api/sites.go index d1f506cda..a73703703 100644 --- a/src/pkg/services/m365/api/sites.go +++ b/src/pkg/services/m365/api/sites.go @@ -72,7 +72,7 @@ func (c Sites) GetAll(ctx context.Context, errs *fault.Bus) ([]models.Siteable, } if err != nil { - el.AddRecoverable(graph.Wrap(ctx, err, "validating site")) + el.AddRecoverable(ctx, graph.Wrap(ctx, err, "validating site")) return true } diff --git a/src/pkg/services/m365/api/users.go b/src/pkg/services/m365/api/users.go index 908128579..07d2430ac 100644 --- a/src/pkg/services/m365/api/users.go +++ b/src/pkg/services/m365/api/users.go @@ -97,7 +97,7 @@ func (c Users) GetAll( err := validateUser(item) if err != nil { - el.AddRecoverable(graph.Wrap(ctx, err, "validating user")) + el.AddRecoverable(ctx, graph.Wrap(ctx, err, "validating user")) } else { us = append(us, item) }