use clues new|wrap|stackWC() (#4684)

replace all cases of new|wrap|stack(...).WithClues(ctx) with the flattened new|wrap|stackWC(ctx, ...) functions introduced in the latest clues bump.

Other changes added:
* remove WithClues builders when the error producer already called it.
* corrected some usages of ictx within loops.

no logic changes, just cleanup.

---

#### Does this PR need a docs update or release note?

- [x]  No

#### Type of change

- [x] 🧹 Tech Debt/Cleanup

#### Test Plan

- [x]  Unit test
- [x] 💚 E2E
This commit is contained in:
Keepers 2023-11-15 16:01:08 -07:00 committed by GitHub
parent ea2bf19bd1
commit 4c72e9eab7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
79 changed files with 415 additions and 479 deletions

View File

@ -53,6 +53,9 @@ linters-settings:
# Prefer suite.Run(name, func() {}) for subtests as testify has it instead # Prefer suite.Run(name, func() {}) for subtests as testify has it instead
# of suite.T().Run(name, func(t *testing.T) {}). # of suite.T().Run(name, func(t *testing.T) {}).
- '(T\(\)|\st[a-zA-Z0-9]*)\.Run(# prefer testify suite.Run(name, func()) )?' - '(T\(\)|\st[a-zA-Z0-9]*)\.Run(# prefer testify suite.Run(name, func()) )?'
# Prefer packing ctx values into the error using NewWC, WrapWC, or StackWC
# instead of New|Stack|Wrap().WithClues(ctx)
- 'WithClues(# prefer the builderWC variant - ex: StackWC(ctx, ...))?'
lll: lll:
line-length: 120 line-length: 120
revive: revive:

View File

@ -187,7 +187,7 @@ func genericCreateCommand(
bo, err := r.NewBackupWithLookup(ictx, discSel, ins) bo, err := r.NewBackupWithLookup(ictx, discSel, ins)
if err != nil { if err != nil {
errs = append(errs, clues.Wrap(err, owner).WithClues(ictx)) errs = append(errs, clues.WrapWC(ictx, err, owner))
Errf(ictx, "%v\n", err) Errf(ictx, "%v\n", err)
continue continue
@ -208,7 +208,7 @@ func genericCreateCommand(
continue continue
} }
errs = append(errs, clues.Wrap(err, owner).WithClues(ictx)) errs = append(errs, clues.WrapWC(ictx, err, owner))
Errf(ictx, "%v\n", err) Errf(ictx, "%v\n", err)
continue continue

View File

@ -33,14 +33,14 @@ func deleteBackups(
r, _, err := utils.GetAccountAndConnectWithOverrides(ctx, service, storage.ProviderS3, nil) r, _, err := utils.GetAccountAndConnectWithOverrides(ctx, service, storage.ProviderS3, nil)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "connecting to account").WithClues(ctx) return nil, clues.WrapWC(ctx, err, "connecting to account")
} }
defer r.Close(ctx) defer r.Close(ctx)
backups, err := r.BackupsByTag(ctx, store.Service(service)) backups, err := r.BackupsByTag(ctx, store.Service(service))
if err != nil { if err != nil {
return nil, clues.Wrap(err, "listing backups").WithClues(ctx) return nil, clues.WrapWC(ctx, err, "listing backups")
} }
var ( var (
@ -51,11 +51,11 @@ func deleteBackups(
for _, backup := range backups { for _, backup := range backups {
if backup.StartAndEndTime.CompletedAt.Before(cutoff) { if backup.StartAndEndTime.CompletedAt.Before(cutoff) {
if err := r.DeleteBackups(ctx, true, backup.ID.String()); err != nil { if err := r.DeleteBackups(ctx, true, backup.ID.String()); err != nil {
return nil, clues.Wrap( return nil, clues.WrapWC(
ctx,
err, err,
"deleting backup"). "deleting backup").
With("backup_id", backup.ID). With("backup_id", backup.ID)
WithClues(ctx)
} }
deleted = append(deleted, backup.ID.String()) deleted = append(deleted, backup.ID.String())
@ -122,7 +122,7 @@ func pitrListBackups(
backups, err := r.BackupsByTag(ctx, store.Service(pst)) backups, err := r.BackupsByTag(ctx, store.Service(pst))
if err != nil { if err != nil {
return clues.Wrap(err, "listing backups").WithClues(ctx) return clues.WrapWC(ctx, err, "listing backups")
} }
bups := map[string]struct{}{} bups := map[string]struct{}{}
@ -135,9 +135,8 @@ func pitrListBackups(
for _, backupID := range backupIDs { for _, backupID := range backupIDs {
if _, ok := bups[backupID]; !ok { if _, ok := bups[backupID]; !ok {
return clues.New("looking for backup"). return clues.NewWC(ctx, "looking for backup").
With("search_backup_id", backupID). With("search_backup_id", backupID)
WithClues(ctx)
} }
} }

View File

@ -59,8 +59,7 @@ func Recovery(ctx context.Context, r any, namespace string) error {
} }
} }
err = clues.Wrap(err, "panic recovery"+inFile). err = clues.WrapWC(ctx, err, "panic recovery"+inFile).
WithClues(ctx).
With("stacktrace", string(debug.Stack())). With("stacktrace", string(debug.Stack())).
WithTrace(2) WithTrace(2)
logger.CtxErr(ctx, err).Error(namespace + " panic") logger.CtxErr(ctx, err).Error(namespace + " panic")

View File

@ -128,7 +128,7 @@ func (rrh *resetRetryHandler) Read(p []byte) (int, error) {
return read, io.EOF return read, io.EOF
} }
return read, clues.Stack(err).WithClues(rrh.ctx).OrNil() return read, clues.StackWC(rrh.ctx, err).OrNil()
} }
logger.Ctx(rrh.ctx).Infow( logger.Ctx(rrh.ctx).Infow(
@ -192,8 +192,7 @@ func (rrh *resetRetryHandler) reconnect(maxRetries int) (int, error) {
r, err = rrh.getter.Get(ctx, headers) r, err = rrh.getter.Get(ctx, headers)
if err != nil { if err != nil {
err = clues.Wrap(err, "retrying connection"). err = clues.WrapWC(ctx, err, "retrying connection").
WithClues(ctx).
With("attempt_num", attempts) With("attempt_num", attempts)
continue continue
@ -211,8 +210,7 @@ func (rrh *resetRetryHandler) reconnect(maxRetries int) (int, error) {
if skip > 0 { if skip > 0 {
_, err = io.CopyN(io.Discard, rrh.innerReader, skip) _, err = io.CopyN(io.Discard, rrh.innerReader, skip)
if err != nil { if err != nil {
err = clues.Wrap(err, "seeking to correct offset"). err = clues.WrapWC(ctx, err, "seeking to correct offset").
WithClues(ctx).
With("attempt_num", attempts) With("attempt_num", attempts)
} }
} }

View File

@ -259,10 +259,9 @@ func (i *lazyItemWithInfo) Info() (details.ItemInfo, error) {
defer i.mu.Unlock() defer i.mu.Unlock()
if i.delInFlight { if i.delInFlight {
return details.ItemInfo{}, clues.Stack(ErrNotFound).WithClues(i.ctx) return details.ItemInfo{}, clues.StackWC(i.ctx, ErrNotFound)
} else if i.info == nil { } else if i.info == nil {
return details.ItemInfo{}, clues.New("requesting ItemInfo before data retrieval"). return details.ItemInfo{}, clues.NewWC(i.ctx, "requesting ItemInfo before data retrieval")
WithClues(i.ctx)
} }
return *i.info, nil return *i.info, nil

View File

@ -107,7 +107,7 @@ func NewBus(ctx context.Context, s storage.Storage, tenID string, co control.Opt
}) })
if err != nil { if err != nil {
return Bus{}, clues.Wrap(err, "configuring event bus").WithClues(ctx) return Bus{}, clues.WrapWC(ctx, err, "configuring event bus")
} }
} }

View File

@ -134,7 +134,7 @@ func (b *baseFinder) getBackupModel(
bup, err := b.bg.GetBackup(ctx, model.StableID(bID)) bup, err := b.bg.GetBackup(ctx, model.StableID(bID))
if err != nil { if err != nil {
return nil, clues.Stack(err).WithClues(ctx) return nil, clues.StackWC(ctx, err)
} }
return bup, nil return bup, nil

View File

@ -106,12 +106,12 @@ func (w *conn) Initialize(
cfg, err := w.storage.CommonConfig() cfg, err := w.storage.CommonConfig()
if err != nil { if err != nil {
return clues.Stack(err).WithClues(ctx) return clues.StackWC(ctx, err)
} }
rOpts := retention.NewOpts() rOpts := retention.NewOpts()
if err := rOpts.Set(retentionOpts); err != nil { if err := rOpts.Set(retentionOpts); err != nil {
return clues.Wrap(err, "setting retention configuration").WithClues(ctx) return clues.WrapWC(ctx, err, "setting retention configuration")
} }
blobCfg, _, err := rOpts.AsConfigs(ctx) blobCfg, _, err := rOpts.AsConfigs(ctx)
@ -127,10 +127,10 @@ func (w *conn) Initialize(
if err = repo.Initialize(ctx, bst, &kopiaOpts, cfg.CorsoPassphrase); err != nil { if err = repo.Initialize(ctx, bst, &kopiaOpts, cfg.CorsoPassphrase); err != nil {
if errors.Is(err, repo.ErrAlreadyInitialized) { if errors.Is(err, repo.ErrAlreadyInitialized) {
return clues.Stack(ErrorRepoAlreadyExists, err).WithClues(ctx) return clues.StackWC(ctx, ErrorRepoAlreadyExists, err)
} }
return clues.Wrap(err, "initializing repo").WithClues(ctx) return clues.WrapWC(ctx, err, "initializing repo")
} }
err = w.commonConnect( err = w.commonConnect(
@ -146,7 +146,7 @@ func (w *conn) Initialize(
} }
if err := w.setDefaultConfigValues(ctx); err != nil { if err := w.setDefaultConfigValues(ctx); err != nil {
return clues.Stack(err).WithClues(ctx) return clues.StackWC(ctx, err)
} }
// Calling with all parameters here will set extend object locks for // Calling with all parameters here will set extend object locks for
@ -164,7 +164,7 @@ func (w *conn) Connect(ctx context.Context, opts repository.Options, repoNameHas
cfg, err := w.storage.CommonConfig() cfg, err := w.storage.CommonConfig()
if err != nil { if err != nil {
return clues.Stack(err).WithClues(ctx) return clues.StackWC(ctx, err)
} }
return w.commonConnect( return w.commonConnect(
@ -210,11 +210,11 @@ func (w *conn) commonConnect(
bst, bst,
password, password,
kopiaOpts); err != nil { kopiaOpts); err != nil {
return clues.Wrap(err, "connecting to kopia repo").WithClues(ctx) return clues.WrapWC(ctx, err, "connecting to kopia repo")
} }
if err := w.open(ctx, cfgFile, password); err != nil { if err := w.open(ctx, cfgFile, password); err != nil {
return clues.Stack(err).WithClues(ctx) return clues.StackWC(ctx, err)
} }
return nil return nil
@ -231,7 +231,7 @@ func blobStoreByProvider(
case storage.ProviderFilesystem: case storage.ProviderFilesystem:
return filesystemStorage(ctx, opts, s) return filesystemStorage(ctx, opts, s)
default: default:
return nil, clues.New("storage provider details are required").WithClues(ctx) return nil, clues.NewWC(ctx, "storage provider details are required")
} }
} }
@ -259,7 +259,7 @@ func (w *conn) close(ctx context.Context) error {
w.Repository = nil w.Repository = nil
if err != nil { if err != nil {
return clues.Wrap(err, "closing repository connection").WithClues(ctx) return clues.WrapWC(ctx, err, "closing repository connection")
} }
return nil return nil
@ -274,7 +274,7 @@ func (w *conn) open(ctx context.Context, configPath, password string) error {
// TODO(ashmrtnz): issue #75: nil here should be storage.ConnectionOptions(). // TODO(ashmrtnz): issue #75: nil here should be storage.ConnectionOptions().
rep, err := repo.Open(ctx, configPath, password, nil) rep, err := repo.Open(ctx, configPath, password, nil)
if err != nil { if err != nil {
return clues.Wrap(err, "opening repository connection").WithClues(ctx) return clues.WrapWC(ctx, err, "opening repository connection")
} }
w.Repository = rep w.Repository = rep
@ -332,7 +332,7 @@ func (w *conn) Compression(ctx context.Context, compressor string) error {
// compressor was given. // compressor was given.
comp := compression.Name(compressor) comp := compression.Name(compressor)
if err := checkCompressor(comp); err != nil { if err := checkCompressor(comp); err != nil {
return clues.Stack(err).WithClues(ctx) return clues.StackWC(ctx, err)
} }
p, err := w.getGlobalPolicyOrEmpty(ctx) p, err := w.getGlobalPolicyOrEmpty(ctx)
@ -342,7 +342,7 @@ func (w *conn) Compression(ctx context.Context, compressor string) error {
changed, err := updateCompressionOnPolicy(compressor, p) changed, err := updateCompressionOnPolicy(compressor, p)
if err != nil { if err != nil {
return clues.Stack(err).WithClues(ctx) return clues.StackWC(ctx, err)
} }
if !changed { if !changed {
@ -409,7 +409,7 @@ func (w *conn) getPolicyOrEmpty(ctx context.Context, si snapshot.SourceInfo) (*p
return &policy.Policy{}, nil return &policy.Policy{}, nil
} }
return nil, clues.Wrap(err, "getting backup policy").With("source_info", si).WithClues(ctx) return nil, clues.WrapWC(ctx, err, "getting backup policy").With("source_info", si)
} }
return p, nil return p, nil
@ -433,16 +433,16 @@ func (w *conn) writePolicy(
ctx = clues.Add(ctx, "source_info", si) ctx = clues.Add(ctx, "source_info", si)
writeOpts := repo.WriteSessionOptions{Purpose: purpose} writeOpts := repo.WriteSessionOptions{Purpose: purpose}
ctr := func(innerCtx context.Context, rw repo.RepositoryWriter) error { ctr := func(ictx context.Context, rw repo.RepositoryWriter) error {
if err := policy.SetPolicy(ctx, rw, si, p); err != nil { if err := policy.SetPolicy(ctx, rw, si, p); err != nil {
return clues.Stack(err).WithClues(innerCtx) return clues.StackWC(ictx, err)
} }
return nil return nil
} }
if err := repo.WriteSession(ctx, w.Repository, writeOpts, ctr); err != nil { if err := repo.WriteSession(ctx, w.Repository, writeOpts, ctr); err != nil {
return clues.Wrap(err, "updating policy").WithClues(ctx) return clues.WrapWC(ctx, err, "updating policy")
} }
return nil return nil
@ -470,12 +470,12 @@ func (w *conn) setRetentionParameters(
// it acts like we passed in only the duration and returns an error about // it acts like we passed in only the duration and returns an error about
// having to set both. Return a clearer error here instead. // having to set both. Return a clearer error here instead.
if ptr.Val(rrOpts.Mode) == repository.NoRetention && ptr.Val(rrOpts.Duration) != 0 { if ptr.Val(rrOpts.Mode) == repository.NoRetention && ptr.Val(rrOpts.Duration) != 0 {
return clues.New("duration must be 0 if rrOpts is disabled").WithClues(ctx) return clues.NewWC(ctx, "duration must be 0 if rrOpts is disabled")
} }
dr, ok := w.Repository.(repo.DirectRepository) dr, ok := w.Repository.(repo.DirectRepository)
if !ok { if !ok {
return clues.New("getting handle to repo").WithClues(ctx) return clues.NewWC(ctx, "getting handle to repo")
} }
blobCfg, params, err := getRetentionConfigs(ctx, dr) blobCfg, params, err := getRetentionConfigs(ctx, dr)
@ -485,7 +485,7 @@ func (w *conn) setRetentionParameters(
opts := retention.OptsFromConfigs(*blobCfg, *params) opts := retention.OptsFromConfigs(*blobCfg, *params)
if err := opts.Set(rrOpts); err != nil { if err := opts.Set(rrOpts); err != nil {
return clues.Stack(err).WithClues(ctx) return clues.StackWC(ctx, err)
} }
return clues.Stack(persistRetentionConfigs(ctx, dr, opts)).OrNil() return clues.Stack(persistRetentionConfigs(ctx, dr, opts)).OrNil()
@ -497,12 +497,12 @@ func getRetentionConfigs(
) (*format.BlobStorageConfiguration, *maintenance.Params, error) { ) (*format.BlobStorageConfiguration, *maintenance.Params, error) {
blobCfg, err := dr.FormatManager().BlobCfgBlob() blobCfg, err := dr.FormatManager().BlobCfgBlob()
if err != nil { if err != nil {
return nil, nil, clues.Wrap(err, "getting storage config").WithClues(ctx) return nil, nil, clues.WrapWC(ctx, err, "getting storage config")
} }
params, err := maintenance.GetParams(ctx, dr) params, err := maintenance.GetParams(ctx, dr)
if err != nil { if err != nil {
return nil, nil, clues.Wrap(err, "getting maintenance config").WithClues(ctx) return nil, nil, clues.WrapWC(ctx, err, "getting maintenance config")
} }
return &blobCfg, params, nil return &blobCfg, params, nil
@ -525,19 +525,21 @@ func persistRetentionConfigs(
mp, err := dr.FormatManager().GetMutableParameters() mp, err := dr.FormatManager().GetMutableParameters()
if err != nil { if err != nil {
return clues.Wrap(err, "getting mutable parameters").WithClues(ctx) return clues.WrapWC(ctx, err, "getting mutable parameters")
} }
requiredFeatures, err := dr.FormatManager().RequiredFeatures() requiredFeatures, err := dr.FormatManager().RequiredFeatures()
if err != nil { if err != nil {
return clues.Wrap(err, "getting required features").WithClues(ctx) return clues.WrapWC(ctx, err, "getting required features")
} }
// Must be the case that only blob changed. // Must be the case that only blob changed.
if !opts.ParamsChanged() { if !opts.ParamsChanged() {
return clues.Wrap( return clues.WrapWC(
ctx,
dr.FormatManager().SetParameters(ctx, mp, blobCfg, requiredFeatures), dr.FormatManager().SetParameters(ctx, mp, blobCfg, requiredFeatures),
"persisting storage config").WithClues(ctx).OrNil() "persisting storage config").
OrNil()
} }
// Both blob and maintenance changed. A DirectWriteSession is required to // Both blob and maintenance changed. A DirectWriteSession is required to
@ -552,20 +554,21 @@ func persistRetentionConfigs(
// Set the maintenance config first as we can bail out of the write // Set the maintenance config first as we can bail out of the write
// session later. // session later.
if err := maintenance.SetParams(ctx, dw, &params); err != nil { if err := maintenance.SetParams(ctx, dw, &params); err != nil {
return clues.Wrap(err, "maintenance config"). return clues.WrapWC(ctx, err, "maintenance config")
WithClues(ctx)
} }
if !opts.BlobChanged() { if !opts.BlobChanged() {
return nil return nil
} }
return clues.Wrap( return clues.WrapWC(
ctx,
dr.FormatManager().SetParameters(ctx, mp, blobCfg, requiredFeatures), dr.FormatManager().SetParameters(ctx, mp, blobCfg, requiredFeatures),
"storage config").WithClues(ctx).OrNil() "storage config").
OrNil()
}) })
return clues.Wrap(err, "persisting config changes").WithClues(ctx).OrNil() return clues.WrapWC(ctx, err, "persisting config changes").OrNil()
} }
func (w *conn) LoadSnapshot( func (w *conn) LoadSnapshot(
@ -574,7 +577,7 @@ func (w *conn) LoadSnapshot(
) (*snapshot.Manifest, error) { ) (*snapshot.Manifest, error) {
man, err := snapshot.LoadSnapshot(ctx, w.Repository, id) man, err := snapshot.LoadSnapshot(ctx, w.Repository, id)
if err != nil { if err != nil {
return nil, clues.Stack(err).WithClues(ctx) return nil, clues.StackWC(ctx, err)
} }
return man, nil return man, nil

View File

@ -44,8 +44,7 @@ func (kdc *kopiaDataCollection) Items(
for _, item := range kdc.items { for _, item := range kdc.items {
s, err := kdc.FetchItemByName(ctx, item) s, err := kdc.FetchItemByName(ctx, item)
if err != nil { if err != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "fetching item"). el.AddRecoverable(ctx, clues.WrapWC(ctx, err, "fetching item").
WithClues(ctx).
Label(fault.LabelForceNoBackupCreation)) Label(fault.LabelForceNoBackupCreation))
continue continue
@ -87,7 +86,7 @@ func (kdc kopiaDataCollection) FetchItemByName(
} }
if len(name) == 0 { if len(name) == 0 {
return nil, clues.Wrap(ErrNoRestorePath, "unknown item").WithClues(ctx) return nil, clues.WrapWC(ctx, ErrNoRestorePath, "unknown item")
} }
e, err := kdc.dir.Child(ctx, encodeAsPath(name)) e, err := kdc.dir.Child(ctx, encodeAsPath(name))
@ -96,12 +95,12 @@ func (kdc kopiaDataCollection) FetchItemByName(
err = clues.Stack(data.ErrNotFound, err) err = clues.Stack(data.ErrNotFound, err)
} }
return nil, clues.Wrap(err, "getting item").WithClues(ctx) return nil, clues.WrapWC(ctx, err, "getting item")
} }
f, ok := e.(fs.File) f, ok := e.(fs.File)
if !ok { if !ok {
return nil, clues.New("object is not a file").WithClues(ctx) return nil, clues.NewWC(ctx, "object is not a file")
} }
size := f.Size() - int64(readers.VersionFormatSize) size := f.Size() - int64(readers.VersionFormatSize)
@ -117,19 +116,18 @@ func (kdc kopiaDataCollection) FetchItemByName(
r, err := f.Open(ctx) r, err := f.Open(ctx)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "opening file").WithClues(ctx) return nil, clues.WrapWC(ctx, err, "opening file")
} }
// TODO(ashmrtn): Remove this when individual services implement checks for // TODO(ashmrtn): Remove this when individual services implement checks for
// version and deleted items. // version and deleted items.
rr, err := readers.NewVersionedRestoreReader(r) rr, err := readers.NewVersionedRestoreReader(r)
if err != nil { if err != nil {
return nil, clues.Stack(err).WithClues(ctx) return nil, clues.StackWC(ctx, err)
} }
if rr.Format().Version != kdc.expectedVersion { if rr.Format().Version != kdc.expectedVersion {
return nil, clues.New("unexpected data format"). return nil, clues.NewWC(ctx, "unexpected data format").
WithClues(ctx).
With( With(
"read_version", rr.Format().Version, "read_version", rr.Format().Version,
"expected_version", kdc.expectedVersion) "expected_version", kdc.expectedVersion)
@ -138,8 +136,7 @@ func (kdc kopiaDataCollection) FetchItemByName(
// This is a conservative check, but we shouldn't be seeing items that were // This is a conservative check, but we shouldn't be seeing items that were
// deleted in flight during restores because there's no way to select them. // deleted in flight during restores because there's no way to select them.
if rr.Format().DelInFlight { if rr.Format().DelInFlight {
return nil, clues.New("selected item marked as deleted in flight"). return nil, clues.NewWC(ctx, "selected item marked as deleted in flight")
WithClues(ctx)
} }
return &kopiaDataStream{ return &kopiaDataStream{

View File

@ -18,7 +18,7 @@ func filesystemStorage(
) (blob.Storage, error) { ) (blob.Storage, error) {
fsCfg, err := s.ToFilesystemConfig() fsCfg, err := s.ToFilesystemConfig()
if err != nil { if err != nil {
return nil, clues.Stack(err).WithClues(ctx) return nil, clues.StackWC(ctx, err)
} }
opts := filesystem.Options{ opts := filesystem.Options{
@ -27,7 +27,7 @@ func filesystemStorage(
store, err := filesystem.New(ctx, &opts, true) store, err := filesystem.New(ctx, &opts, true)
if err != nil { if err != nil {
return nil, clues.Stack(err).WithClues(ctx) return nil, clues.StackWC(ctx, err)
} }
return store, nil return store, nil

View File

@ -114,8 +114,7 @@ func (mc *mergeCollection) FetchItemByName(
if err == nil { if err == nil {
return s, nil return s, nil
} else if err != nil && !errors.Is(err, data.ErrNotFound) { } else if err != nil && !errors.Is(err, data.ErrNotFound) {
return nil, clues.Wrap(err, "fetching from merged collection"). return nil, clues.WrapWC(ctx, err, "fetching from merged collection")
WithClues(ictx)
} }
} }

View File

@ -125,7 +125,7 @@ func putInner(
create bool, create bool,
) error { ) error {
if !s.Valid() { if !s.Valid() {
return clues.Stack(errUnrecognizedSchema).WithClues(ctx) return clues.StackWC(ctx, errUnrecognizedSchema)
} }
base := m.Base() base := m.Base()
@ -136,13 +136,13 @@ func putInner(
tmpTags, err := tagsForModelWithID(s, base.ID, base.ModelVersion, base.Tags) tmpTags, err := tagsForModelWithID(s, base.ID, base.ModelVersion, base.Tags)
if err != nil { if err != nil {
// Will be wrapped at a higher layer. // Will be wrapped at a higher layer.
return clues.Stack(err).WithClues(ctx) return clues.StackWC(ctx, err)
} }
id, err := w.PutManifest(ctx, tmpTags, m) id, err := w.PutManifest(ctx, tmpTags, m)
if err != nil { if err != nil {
// Will be wrapped at a higher layer. // Will be wrapped at a higher layer.
return clues.Stack(err).WithClues(ctx) return clues.StackWC(ctx, err)
} }
base.ModelStoreID = id base.ModelStoreID = id
@ -167,16 +167,16 @@ func (ms *ModelStore) Put(
ctx, ctx,
ms.c, ms.c,
repo.WriteSessionOptions{Purpose: "ModelStorePut"}, repo.WriteSessionOptions{Purpose: "ModelStorePut"},
func(innerCtx context.Context, w repo.RepositoryWriter) error { func(ictx context.Context, w repo.RepositoryWriter) error {
err := putInner(innerCtx, w, s, m, true) err := putInner(ictx, w, s, m, true)
if err != nil { if err != nil {
return clues.Stack(err).WithClues(innerCtx) return clues.StackWC(ictx, err)
} }
return nil return nil
}) })
if err != nil { if err != nil {
return clues.Wrap(err, "putting model").WithClues(ctx) return clues.WrapWC(ctx, err, "putting model")
} }
return nil return nil
@ -237,21 +237,21 @@ func (ms *ModelStore) GetIDsForType(
tags map[string]string, tags map[string]string,
) ([]*model.BaseModel, error) { ) ([]*model.BaseModel, error) {
if !s.Valid() { if !s.Valid() {
return nil, clues.Stack(errUnrecognizedSchema).WithClues(ctx) return nil, clues.StackWC(ctx, errUnrecognizedSchema)
} }
if _, ok := tags[stableIDKey]; ok { if _, ok := tags[stableIDKey]; ok {
return nil, clues.Stack(errBadTagKey).WithClues(ctx) return nil, clues.StackWC(ctx, errBadTagKey)
} }
tmpTags, err := tagsForModel(s, tags) tmpTags, err := tagsForModel(s, tags)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "getting model metadata").WithClues(ctx) return nil, clues.WrapWC(ctx, err, "getting model metadata")
} }
metadata, err := ms.c.FindManifests(ctx, tmpTags) metadata, err := ms.c.FindManifests(ctx, tmpTags)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "getting model metadata").WithClues(ctx) return nil, clues.WrapWC(ctx, err, "getting model metadata")
} }
res := make([]*model.BaseModel, 0, len(metadata)) res := make([]*model.BaseModel, 0, len(metadata))
@ -259,7 +259,7 @@ func (ms *ModelStore) GetIDsForType(
for _, m := range metadata { for _, m := range metadata {
bm, err := ms.baseModelFromMetadata(m) bm, err := ms.baseModelFromMetadata(m)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "parsing model metadata").WithClues(ctx) return nil, clues.WrapWC(ctx, err, "parsing model metadata")
} }
res = append(res, bm) res = append(res, bm)
@ -277,30 +277,30 @@ func (ms *ModelStore) getModelStoreID(
id model.StableID, id model.StableID,
) (manifest.ID, error) { ) (manifest.ID, error) {
if !s.Valid() { if !s.Valid() {
return "", clues.Stack(errUnrecognizedSchema).WithClues(ctx) return "", clues.StackWC(ctx, errUnrecognizedSchema)
} }
if len(id) == 0 { if len(id) == 0 {
return "", clues.Stack(errNoStableID).WithClues(ctx) return "", clues.StackWC(ctx, errNoStableID)
} }
tags := map[string]string{stableIDKey: string(id)} tags := map[string]string{stableIDKey: string(id)}
metadata, err := ms.c.FindManifests(ctx, tags) metadata, err := ms.c.FindManifests(ctx, tags)
if err != nil { if err != nil {
return "", clues.Wrap(err, "getting ModelStoreID").WithClues(ctx) return "", clues.WrapWC(ctx, err, "getting ModelStoreID")
} }
if len(metadata) == 0 { if len(metadata) == 0 {
return "", clues.Wrap(data.ErrNotFound, "getting ModelStoreID").WithClues(ctx) return "", clues.WrapWC(ctx, data.ErrNotFound, "getting ModelStoreID")
} }
if len(metadata) != 1 { if len(metadata) != 1 {
return "", clues.New("multiple models with same StableID").WithClues(ctx) return "", clues.NewWC(ctx, "multiple models with same StableID")
} }
if metadata[0].Labels[manifest.TypeLabelKey] != s.String() { if metadata[0].Labels[manifest.TypeLabelKey] != s.String() {
return "", clues.Stack(errModelTypeMismatch).WithClues(ctx) return "", clues.StackWC(ctx, errModelTypeMismatch)
} }
return metadata[0].ID, nil return metadata[0].ID, nil
@ -316,7 +316,7 @@ func (ms *ModelStore) Get(
m model.Model, m model.Model,
) error { ) error {
if !s.Valid() { if !s.Valid() {
return clues.Stack(errUnrecognizedSchema).WithClues(ctx) return clues.StackWC(ctx, errUnrecognizedSchema)
} }
modelID, err := ms.getModelStoreID(ctx, s, id) modelID, err := ms.getModelStoreID(ctx, s, id)
@ -337,11 +337,11 @@ func (ms *ModelStore) GetWithModelStoreID(
m model.Model, m model.Model,
) error { ) error {
if !s.Valid() { if !s.Valid() {
return clues.Stack(errUnrecognizedSchema).WithClues(ctx) return clues.StackWC(ctx, errUnrecognizedSchema)
} }
if len(id) == 0 { if len(id) == 0 {
return clues.Stack(errNoModelStoreID).WithClues(ctx) return clues.StackWC(ctx, errNoModelStoreID)
} }
metadata, err := ms.c.GetManifest(ctx, id, m) metadata, err := ms.c.GetManifest(ctx, id, m)
@ -350,18 +350,17 @@ func (ms *ModelStore) GetWithModelStoreID(
err = data.ErrNotFound err = data.ErrNotFound
} }
return clues.Wrap(err, "getting model data").WithClues(ctx) return clues.WrapWC(ctx, err, "getting model data")
} }
mdlbl := metadata.Labels[manifest.TypeLabelKey] mdlbl := metadata.Labels[manifest.TypeLabelKey]
if mdlbl != s.String() { if mdlbl != s.String() {
return clues.Stack(errModelTypeMismatch). return clues.StackWC(ctx, errModelTypeMismatch).
WithClues(ctx).
With("expected_label", s, "got_label", mdlbl) With("expected_label", s, "got_label", mdlbl)
} }
if err := ms.populateBaseModelFromMetadata(m.Base(), metadata); err != nil { if err := ms.populateBaseModelFromMetadata(m.Base(), metadata); err != nil {
return clues.Wrap(err, "getting model by ID").WithClues(ctx) return clues.WrapWC(ctx, err, "getting model by ID")
} }
return nil return nil
@ -378,30 +377,28 @@ func (ms *ModelStore) checkPrevModelVersion(
b *model.BaseModel, b *model.BaseModel,
) error { ) error {
if !s.Valid() { if !s.Valid() {
return clues.Stack(errUnrecognizedSchema).WithClues(ctx) return clues.StackWC(ctx, errUnrecognizedSchema)
} }
id, err := ms.getModelStoreID(ctx, s, b.ID) id, err := ms.getModelStoreID(ctx, s, b.ID)
if err != nil { if err != nil {
return clues.Stack(err).WithClues(ctx) return clues.StackWC(ctx, err)
} }
// We actually got something back during our lookup. // We actually got something back during our lookup.
meta, err := ms.c.GetManifest(ctx, id, nil) meta, err := ms.c.GetManifest(ctx, id, nil)
if err != nil { if err != nil {
return clues.Wrap(err, "getting previous model version").WithClues(ctx) return clues.WrapWC(ctx, err, "getting previous model version")
} }
if meta.ID != b.ModelStoreID { if meta.ID != b.ModelStoreID {
return clues.New("updated model has different ModelStoreID"). return clues.NewWC(ctx, "updated model has different ModelStoreID").
WithClues(ctx).
With("expected_id", meta.ID, "model_store_id", b.ModelStoreID) With("expected_id", meta.ID, "model_store_id", b.ModelStoreID)
} }
mdlbl := meta.Labels[manifest.TypeLabelKey] mdlbl := meta.Labels[manifest.TypeLabelKey]
if mdlbl != s.String() { if mdlbl != s.String() {
return clues.New("updated model has different model type"). return clues.NewWC(ctx, "updated model has different model type").
WithClues(ctx).
With("expected_label", s, "got_label", mdlbl) With("expected_label", s, "got_label", mdlbl)
} }
@ -420,12 +417,12 @@ func (ms *ModelStore) Update(
m model.Model, m model.Model,
) error { ) error {
if !s.Valid() { if !s.Valid() {
return clues.Stack(errUnrecognizedSchema).WithClues(ctx) return clues.StackWC(ctx, errUnrecognizedSchema)
} }
base := m.Base() base := m.Base()
if len(base.ModelStoreID) == 0 { if len(base.ModelStoreID) == 0 {
return clues.Stack(errNoModelStoreID).WithClues(ctx) return clues.StackWC(ctx, errNoModelStoreID)
} }
base.ModelVersion = ms.modelVersion base.ModelVersion = ms.modelVersion
@ -468,13 +465,13 @@ func (ms *ModelStore) Update(
// collected the next time kopia maintenance is run. // collected the next time kopia maintenance is run.
innerErr = w.DeleteManifest(innerCtx, oldID) innerErr = w.DeleteManifest(innerCtx, oldID)
if innerErr != nil { if innerErr != nil {
return clues.Stack(innerErr).WithClues(ctx) return clues.StackWC(ctx, innerErr)
} }
return nil return nil
}) })
if err != nil { if err != nil {
return clues.Wrap(err, "updating model").WithClues(ctx) return clues.WrapWC(ctx, err, "updating model")
} }
return nil return nil
@ -485,7 +482,7 @@ func (ms *ModelStore) Update(
// have the same StableID. // have the same StableID.
func (ms *ModelStore) Delete(ctx context.Context, s model.Schema, id model.StableID) error { func (ms *ModelStore) Delete(ctx context.Context, s model.Schema, id model.StableID) error {
if !s.Valid() { if !s.Valid() {
return clues.Stack(errUnrecognizedSchema).WithClues(ctx) return clues.StackWC(ctx, errUnrecognizedSchema)
} }
latest, err := ms.getModelStoreID(ctx, s, id) latest, err := ms.getModelStoreID(ctx, s, id)
@ -511,14 +508,14 @@ func (ms *ModelStore) DeleteWithModelStoreIDs(
ids ...manifest.ID, ids ...manifest.ID,
) error { ) error {
opts := repo.WriteSessionOptions{Purpose: "ModelStoreDelete"} opts := repo.WriteSessionOptions{Purpose: "ModelStoreDelete"}
cb := func(innerCtx context.Context, w repo.RepositoryWriter) error { cb := func(ictx context.Context, w repo.RepositoryWriter) error {
for _, id := range ids { for _, id := range ids {
if len(id) == 0 { if len(id) == 0 {
return clues.Stack(errNoModelStoreID).WithClues(ctx) return clues.StackWC(ictx, errNoModelStoreID)
} }
if err := w.DeleteManifest(innerCtx, id); err != nil { if err := w.DeleteManifest(ictx, id); err != nil {
return clues.Stack(err).WithClues(innerCtx).With("model_store_id", id) return clues.StackWC(ictx, err).With("model_store_id", id)
} }
} }
@ -526,7 +523,7 @@ func (ms *ModelStore) DeleteWithModelStoreIDs(
} }
if err := repo.WriteSession(ctx, ms.c, opts, cb); err != nil { if err := repo.WriteSession(ctx, ms.c, opts, cb); err != nil {
return clues.Wrap(err, "deleting model").WithClues(ctx) return clues.WrapWC(ctx, err, "deleting model")
} }
return nil return nil

View File

@ -40,9 +40,10 @@ func (r *Opts) AsConfigs(
// Check the new config is valid. // Check the new config is valid.
if r.blobCfg.IsRetentionEnabled() { if r.blobCfg.IsRetentionEnabled() {
if err := maintenance.CheckExtendRetention(ctx, r.blobCfg, &r.params); err != nil { if err := maintenance.CheckExtendRetention(ctx, r.blobCfg, &r.params); err != nil {
return format.BlobStorageConfiguration{}, maintenance.Params{}, clues.Wrap( return format.BlobStorageConfiguration{}, maintenance.Params{}, clues.WrapWC(
ctx,
err, err,
"invalid retention config").WithClues(ctx) "invalid retention config")
} }
} }

View File

@ -22,7 +22,7 @@ func s3BlobStorage(
) (blob.Storage, error) { ) (blob.Storage, error) {
cfg, err := s.ToS3Config() cfg, err := s.ToS3Config()
if err != nil { if err != nil {
return nil, clues.Stack(err).WithClues(ctx) return nil, clues.StackWC(ctx, err)
} }
endpoint := defaultS3Endpoint endpoint := defaultS3Endpoint
@ -49,7 +49,7 @@ func s3BlobStorage(
store, err := s3.New(ctx, &opts, false) store, err := s3.New(ctx, &opts, false)
if err != nil { if err != nil {
return nil, clues.Stack(err).WithClues(ctx) return nil, clues.StackWC(ctx, err)
} }
return store, nil return store, nil

View File

@ -115,8 +115,7 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
// never had to materialize their details in-memory. // never had to materialize their details in-memory.
if d.infoer == nil || d.cached { if d.infoer == nil || d.cached {
if d.prevPath == nil { if d.prevPath == nil {
cp.errs.AddRecoverable(ctx, clues.New("finished file sourced from previous backup with no previous path"). cp.errs.AddRecoverable(ctx, clues.NewWC(ctx, "finished file sourced from previous backup with no previous path").
WithClues(ctx).
Label(fault.LabelForceNoBackupCreation)) Label(fault.LabelForceNoBackupCreation))
return return
@ -131,8 +130,7 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
d.repoPath, d.repoPath,
d.locationPath) d.locationPath)
if err != nil { if err != nil {
cp.errs.AddRecoverable(ctx, clues.Wrap(err, "adding finished file to merge list"). cp.errs.AddRecoverable(ctx, clues.WrapWC(ctx, err, "adding finished file to merge list").
WithClues(ctx).
Label(fault.LabelForceNoBackupCreation)) Label(fault.LabelForceNoBackupCreation))
} }
@ -145,27 +143,23 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
// adding it to details since there's no data for it. // adding it to details since there's no data for it.
return return
} else if err != nil { } else if err != nil {
cp.errs.AddRecoverable(ctx, clues.Wrap(err, "getting ItemInfo"). cp.errs.AddRecoverable(ctx, clues.WrapWC(ctx, err, "getting ItemInfo").
WithClues(ctx).
Label(fault.LabelForceNoBackupCreation)) Label(fault.LabelForceNoBackupCreation))
return return
} else if !ptr.Val(d.modTime).Equal(info.Modified()) { } else if !ptr.Val(d.modTime).Equal(info.Modified()) {
cp.errs.AddRecoverable(ctx, clues.New("item modTime mismatch"). cp.errs.AddRecoverable(ctx, clues.NewWC(ctx, "item modTime mismatch").
WithClues(ctx).
Label(fault.LabelForceNoBackupCreation)) Label(fault.LabelForceNoBackupCreation))
return return
} else if info.Modified().IsZero() { } else if info.Modified().IsZero() {
cp.errs.AddRecoverable(ctx, clues.New("zero-valued mod time"). cp.errs.AddRecoverable(ctx, clues.NewWC(ctx, "zero-valued mod time").
WithClues(ctx).
Label(fault.LabelForceNoBackupCreation)) Label(fault.LabelForceNoBackupCreation))
} }
err = cp.deets.Add(d.repoPath, d.locationPath, info) err = cp.deets.Add(d.repoPath, d.locationPath, info)
if err != nil { if err != nil {
cp.errs.AddRecoverable(ctx, clues.Wrap(err, "adding finished file to details"). cp.errs.AddRecoverable(ctx, clues.WrapWC(ctx, err, "adding finished file to details").
WithClues(ctx).
Label(fault.LabelForceNoBackupCreation)) Label(fault.LabelForceNoBackupCreation))
return return
@ -275,7 +269,7 @@ func collectionEntries(
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
return seen, clues.Stack(ctx.Err()).WithClues(ctx) return seen, clues.StackWC(ctx, ctx.Err())
case e, ok := <-items: case e, ok := <-items:
if !ok { if !ok {
@ -357,8 +351,7 @@ func collectionEntries(
if err != nil { if err != nil {
// Kopia's uploader swallows errors in most cases, so if we see // Kopia's uploader swallows errors in most cases, so if we see
// something here it's probably a big issue and we should return. // something here it's probably a big issue and we should return.
return seen, clues.Wrap(err, "executing callback"). return seen, clues.WrapWC(ctx, err, "executing callback").
WithClues(ctx).
With("item_path", itemPath) With("item_path", itemPath)
} }
} }
@ -397,13 +390,12 @@ func streamBaseEntries(
ctx, ctx,
func(innerCtx context.Context, entry fs.Entry) error { func(innerCtx context.Context, entry fs.Entry) error {
if err := innerCtx.Err(); err != nil { if err := innerCtx.Err(); err != nil {
return clues.Stack(err).WithClues(ctx) return clues.StackWC(ctx, err)
} }
entName, err := decodeElement(entry.Name()) entName, err := decodeElement(entry.Name())
if err != nil { if err != nil {
return clues.Wrap(err, "decoding entry name"). return clues.WrapWC(ctx, err, "decoding entry name").
WithClues(ctx).
With("entry_name", entry.Name()) With("entry_name", entry.Name())
} }
@ -421,14 +413,12 @@ func streamBaseEntries(
// LocationPath information associated with the directory. // LocationPath information associated with the directory.
newP, err := params.currentPath.Append(false, entName) newP, err := params.currentPath.Append(false, entName)
if err != nil { if err != nil {
return clues.Wrap(err, "getting current directory path"). return clues.WrapWC(ctx, err, "getting current directory path")
WithClues(ctx)
} }
oldP, err := params.prevPath.Append(false, entName) oldP, err := params.prevPath.Append(false, entName)
if err != nil { if err != nil {
return clues.Wrap(err, "getting previous directory path"). return clues.WrapWC(ctx, err, "getting previous directory path")
WithClues(ctx)
} }
e := virtualfs.NewStreamingDirectory( e := virtualfs.NewStreamingDirectory(
@ -445,8 +435,7 @@ func streamBaseEntries(
globalExcludeSet, globalExcludeSet,
progress)) progress))
return clues.Wrap(ctr(ctx, e), "executing callback on subdirectory"). return clues.WrapWC(ctx, ctr(ctx, e), "executing callback on subdirectory").
WithClues(ctx).
With("directory_path", newP). With("directory_path", newP).
OrNil() OrNil()
} }
@ -467,8 +456,7 @@ func streamBaseEntries(
// For now assuming that item IDs don't need escaping. // For now assuming that item IDs don't need escaping.
itemPath, err := params.currentPath.AppendItem(entName) itemPath, err := params.currentPath.AppendItem(entName)
if err != nil { if err != nil {
return clues.Wrap(err, "getting full item path for base entry"). return clues.WrapWC(ctx, err, "getting full item path for base entry")
WithClues(ctx)
} }
// We need the previous path so we can find this item in the base snapshot's // We need the previous path so we can find this item in the base snapshot's
@ -477,8 +465,7 @@ func streamBaseEntries(
// to look for. // to look for.
prevItemPath, err := params.prevPath.AppendItem(entName) prevItemPath, err := params.prevPath.AppendItem(entName)
if err != nil { if err != nil {
return clues.Wrap(err, "getting previous full item path for base entry"). return clues.WrapWC(ctx, err, "getting previous full item path for base entry")
WithClues(ctx)
} }
// Meta files aren't in backup details since it's the set of items the // Meta files aren't in backup details since it's the set of items the
@ -502,16 +489,14 @@ func streamBaseEntries(
} }
if err := ctr(ctx, entry); err != nil { if err := ctr(ctx, entry); err != nil {
return clues.Wrap(err, "executing callback on item"). return clues.WrapWC(ctx, err, "executing callback on item").
WithClues(ctx).
With("item_path", itemPath) With("item_path", itemPath)
} }
return nil return nil
}) })
if err != nil { if err != nil {
return clues.Wrap(err, "traversing items in base snapshot directory"). return clues.WrapWC(ctx, err, "traversing items in base snapshot directory")
WithClues(ctx)
} }
return nil return nil
@ -534,8 +519,7 @@ func getStreamItemFunc(
// Return static entries in this directory first. // Return static entries in this directory first.
for _, d := range staticEnts { for _, d := range staticEnts {
if err := ctr(ctx, d); err != nil { if err := ctr(ctx, d); err != nil {
return clues.Wrap(err, "executing callback on static directory"). return clues.WrapWC(ctx, err, "executing callback on static directory")
WithClues(ctx)
} }
} }
@ -763,15 +747,13 @@ func inflateCollectionTree(
switch s.State() { switch s.State() {
case data.DeletedState: case data.DeletedState:
if s.PreviousPath() == nil { if s.PreviousPath() == nil {
return nil, nil, clues.New("nil previous path on deleted collection"). return nil, nil, clues.NewWC(ictx, "nil previous path on deleted collection")
WithClues(ictx)
} }
changedPaths = append(changedPaths, s.PreviousPath()) changedPaths = append(changedPaths, s.PreviousPath())
if p, ok := updatedPaths[s.PreviousPath().String()]; ok { if p, ok := updatedPaths[s.PreviousPath().String()]; ok {
err := clues.New("multiple previous state changes"). err := clues.NewWC(ictx, "multiple previous state changes").
WithClues(ictx).
With("updated_path", p, "current_state", data.DeletedState) With("updated_path", p, "current_state", data.DeletedState)
logger.CtxErr(ictx, err).Error("previous path state collision") logger.CtxErr(ictx, err).Error("previous path state collision")
@ -788,8 +770,7 @@ func inflateCollectionTree(
changedPaths = append(changedPaths, s.PreviousPath()) changedPaths = append(changedPaths, s.PreviousPath())
if p, ok := updatedPaths[s.PreviousPath().String()]; ok { if p, ok := updatedPaths[s.PreviousPath().String()]; ok {
err := clues.New("multiple previous state changes"). err := clues.NewWC(ictx, "multiple previous state changes").
WithClues(ictx).
With("updated_path", p, "current_state", data.MovedState) With("updated_path", p, "current_state", data.MovedState)
logger.CtxErr(ictx, err).Error("previous path state collision") logger.CtxErr(ictx, err).Error("previous path state collision")
@ -809,15 +790,13 @@ func inflateCollectionTree(
// changed via one of the ancestor folders being moved. This catches the // changed via one of the ancestor folders being moved. This catches the
// ancestor folder move. // ancestor folder move.
if err := addMergeLocation(s, toMerge); err != nil { if err := addMergeLocation(s, toMerge); err != nil {
return nil, nil, clues.Wrap(err, "adding merge location"). return nil, nil, clues.WrapWC(ictx, err, "adding merge location")
WithClues(ictx)
} }
case data.NotMovedState: case data.NotMovedState:
p := s.PreviousPath().String() p := s.PreviousPath().String()
if p, ok := updatedPaths[p]; ok { if p, ok := updatedPaths[p]; ok {
err := clues.New("multiple previous state changes"). err := clues.NewWC(ictx, "multiple previous state changes").
WithClues(ictx).
With("updated_path", p, "current_state", data.NotMovedState) With("updated_path", p, "current_state", data.NotMovedState)
logger.CtxErr(ictx, err).Error("previous path state collision") logger.CtxErr(ictx, err).Error("previous path state collision")
@ -833,19 +812,18 @@ func inflateCollectionTree(
} }
if s.FullPath() == nil || len(s.FullPath().Elements()) == 0 { if s.FullPath() == nil || len(s.FullPath().Elements()) == 0 {
return nil, nil, clues.New("no identifier for collection").WithClues(ictx) return nil, nil, clues.NewWC(ictx, "no identifier for collection")
} }
node := getTreeNode(roots, s.FullPath().Elements()) node := getTreeNode(roots, s.FullPath().Elements())
if node == nil { if node == nil {
return nil, nil, clues.New("getting tree node").WithClues(ictx) return nil, nil, clues.NewWC(ictx, "getting tree node")
} }
// Make sure there's only a single collection adding items for any given // Make sure there's only a single collection adding items for any given
// path in the new hierarchy. // path in the new hierarchy.
if node.collection != nil { if node.collection != nil {
return nil, nil, clues.New("multiple instances of collection"). return nil, nil, clues.NewWC(ictx, "multiple instances of collection")
WithClues(ictx)
} }
node.collection = s node.collection = s
@ -863,8 +841,7 @@ func inflateCollectionTree(
} }
if node.collection != nil && node.collection.State() == data.NotMovedState { if node.collection != nil && node.collection.State() == data.NotMovedState {
err := clues.New("conflicting states for collection"). err := clues.NewWC(ctx, "conflicting states for collection")
WithClues(ctx)
logger.CtxErr(ctx, err).Error("adding node to tree") logger.CtxErr(ctx, err).Error("adding node to tree")
if firstErr == nil { if firstErr == nil {
@ -947,7 +924,7 @@ func traverseBaseDir(
"expected_parent_dir_path", expectedDirPath) "expected_parent_dir_path", expectedDirPath)
if depth >= maxInflateTraversalDepth { if depth >= maxInflateTraversalDepth {
return clues.New("base snapshot tree too tall").WithClues(ctx) return clues.NewWC(ctx, "base snapshot tree too tall")
} }
// Wrapper base64 encodes all file and folder names to avoid issues with // Wrapper base64 encodes all file and folder names to avoid issues with
@ -955,8 +932,7 @@ func traverseBaseDir(
// from kopia we need to do the decoding here. // from kopia we need to do the decoding here.
dirName, err := decodeElement(dir.Name()) dirName, err := decodeElement(dir.Name())
if err != nil { if err != nil {
return clues.Wrap(err, "decoding base directory name"). return clues.WrapWC(ctx, err, "decoding base directory name").
WithClues(ctx).
With("dir_name", clues.Hide(dir.Name())) With("dir_name", clues.Hide(dir.Name()))
} }
@ -1029,7 +1005,7 @@ func traverseBaseDir(
stats) stats)
}) })
if err != nil { if err != nil {
return clues.Wrap(err, "traversing base directory").WithClues(ctx) return clues.WrapWC(ctx, err, "traversing base directory")
} }
} else { } else {
stats.Inc(statPruned) stats.Inc(statPruned)
@ -1049,7 +1025,7 @@ func traverseBaseDir(
// in the if-block though as that is an optimization. // in the if-block though as that is an optimization.
node := getTreeNode(roots, currentPath.Elements()) node := getTreeNode(roots, currentPath.Elements())
if node == nil { if node == nil {
return clues.New("getting tree node").WithClues(ctx) return clues.NewWC(ctx, "getting tree node")
} }
// Now that we have the node we need to check if there is a collection // Now that we have the node we need to check if there is a collection
@ -1075,12 +1051,12 @@ func traverseBaseDir(
curP, err := path.PrefixOrPathFromDataLayerPath(currentPath.String(), false) curP, err := path.PrefixOrPathFromDataLayerPath(currentPath.String(), false)
if err != nil { if err != nil {
return clues.New("converting current path to path.Path").WithClues(ctx) return clues.NewWC(ctx, "converting current path to path.Path")
} }
oldP, err := path.PrefixOrPathFromDataLayerPath(oldDirPath.String(), false) oldP, err := path.PrefixOrPathFromDataLayerPath(oldDirPath.String(), false)
if err != nil { if err != nil {
return clues.New("converting old path to path.Path").WithClues(ctx) return clues.NewWC(ctx, "converting old path to path.Path")
} }
node.baseDir = dir node.baseDir = dir
@ -1159,12 +1135,12 @@ func inflateBaseTree(
root, err := loader.SnapshotRoot(base.ItemDataSnapshot) root, err := loader.SnapshotRoot(base.ItemDataSnapshot)
if err != nil { if err != nil {
return clues.Wrap(err, "getting snapshot root directory").WithClues(ctx) return clues.WrapWC(ctx, err, "getting snapshot root directory")
} }
dir, ok := root.(fs.Directory) dir, ok := root.(fs.Directory)
if !ok { if !ok {
return clues.New("snapshot root is not a directory").WithClues(ctx) return clues.NewWC(ctx, "snapshot root is not a directory")
} }
// For each subtree corresponding to the tuple // For each subtree corresponding to the tuple
@ -1178,7 +1154,7 @@ func inflateBaseTree(
subtreePath, err := r.SubtreePath() subtreePath, err := r.SubtreePath()
if err != nil { if err != nil {
return clues.Wrap(err, "building subtree path").WithClues(ictx) return clues.WrapWC(ictx, err, "building subtree path")
} }
// We're starting from the root directory so don't need it in the path. // We're starting from the root directory so don't need it in the path.
@ -1191,12 +1167,12 @@ func inflateBaseTree(
continue continue
} }
return clues.Wrap(err, "getting subtree root").WithClues(ictx) return clues.WrapWC(ictx, err, "getting subtree root")
} }
subtreeDir, ok := ent.(fs.Directory) subtreeDir, ok := ent.(fs.Directory)
if !ok { if !ok {
return clues.Wrap(err, "subtree root is not directory").WithClues(ictx) return clues.WrapWC(ictx, err, "subtree root is not directory")
} }
// This ensures that a migration on the directory prefix can complete. // This ensures that a migration on the directory prefix can complete.
@ -1219,7 +1195,7 @@ func inflateBaseTree(
subtreeDir, subtreeDir,
roots, roots,
stats); err != nil { stats); err != nil {
return clues.Wrap(err, "traversing base snapshot").WithClues(ictx) return clues.WrapWC(ictx, err, "traversing base snapshot")
} }
logger.Ctx(ctx).Infow( logger.Ctx(ctx).Infow(
@ -1278,7 +1254,7 @@ func inflateDirTree(
} }
if len(roots) > 1 { if len(roots) > 1 {
return nil, clues.New("multiple root directories").WithClues(ctx) return nil, clues.NewWC(ctx, "multiple root directories")
} }
var res fs.Directory var res fs.Directory

View File

@ -132,7 +132,7 @@ func (w *Wrapper) Close(ctx context.Context) error {
w.c = nil w.c = nil
if err != nil { if err != nil {
return clues.Wrap(err, "closing Wrapper").WithClues(ctx) return clues.WrapWC(ctx, err, "closing Wrapper")
} }
return nil return nil
@ -156,7 +156,7 @@ func (w Wrapper) ConsumeBackupCollections(
errs *fault.Bus, errs *fault.Bus,
) (*BackupStats, *details.Builder, DetailsMergeInfoer, error) { ) (*BackupStats, *details.Builder, DetailsMergeInfoer, error) {
if w.c == nil { if w.c == nil {
return nil, nil, nil, clues.Stack(errNotConnected).WithClues(ctx) return nil, nil, nil, clues.StackWC(ctx, errNotConnected)
} }
ctx, end := diagnostics.Span(ctx, "kopia:consumeBackupCollections") ctx, end := diagnostics.Span(ctx, "kopia:consumeBackupCollections")
@ -304,7 +304,7 @@ func (w Wrapper) makeSnapshotWithRoot(
policyTree, err := policy.TreeForSourceWithOverride(innerCtx, w.c, si, errPolicy) policyTree, err := policy.TreeForSourceWithOverride(innerCtx, w.c, si, errPolicy)
if err != nil { if err != nil {
err = clues.Wrap(err, "get policy tree").WithClues(ctx) err = clues.WrapWC(ctx, err, "get policy tree")
logger.CtxErr(innerCtx, err).Error("building kopia backup") logger.CtxErr(innerCtx, err).Error("building kopia backup")
return err return err
@ -318,7 +318,7 @@ func (w Wrapper) makeSnapshotWithRoot(
man, err = u.Upload(innerCtx, root, policyTree, si, prevSnaps...) man, err = u.Upload(innerCtx, root, policyTree, si, prevSnaps...)
if err != nil { if err != nil {
err = clues.Wrap(err, "uploading data").WithClues(ctx) err = clues.WrapWC(ctx, err, "uploading data")
logger.CtxErr(innerCtx, err).Error("uploading kopia backup") logger.CtxErr(innerCtx, err).Error("uploading kopia backup")
return err return err
@ -327,7 +327,7 @@ func (w Wrapper) makeSnapshotWithRoot(
man.Tags = tags man.Tags = tags
if _, err := snapshot.SaveSnapshot(innerCtx, rw, man); err != nil { if _, err := snapshot.SaveSnapshot(innerCtx, rw, man); err != nil {
err = clues.Wrap(err, "saving snapshot").WithClues(ctx) err = clues.WrapWC(ctx, err, "saving snapshot")
logger.CtxErr(innerCtx, err).Error("persisting kopia backup snapshot") logger.CtxErr(innerCtx, err).Error("persisting kopia backup snapshot")
return err return err
@ -338,7 +338,7 @@ func (w Wrapper) makeSnapshotWithRoot(
// Telling kopia to always flush may hide other errors if it fails while // Telling kopia to always flush may hide other errors if it fails while
// flushing the write session (hence logging above). // flushing the write session (hence logging above).
if err != nil { if err != nil {
return nil, clues.Wrap(err, "kopia backup").WithClues(ctx) return nil, clues.WrapWC(ctx, err, "kopia backup")
} }
res := manifestToStats(man, progress, bc) res := manifestToStats(man, progress, bc)
@ -352,12 +352,12 @@ func (w Wrapper) getSnapshotRoot(
) (fs.Entry, error) { ) (fs.Entry, error) {
man, err := snapshot.LoadSnapshot(ctx, w.c, manifest.ID(snapshotID)) man, err := snapshot.LoadSnapshot(ctx, w.c, manifest.ID(snapshotID))
if err != nil { if err != nil {
return nil, clues.Wrap(err, "getting snapshot handle").WithClues(ctx) return nil, clues.WrapWC(ctx, err, "getting snapshot handle")
} }
rootDirEntry, err := snapshotfs.SnapshotRoot(w.c, man) rootDirEntry, err := snapshotfs.SnapshotRoot(w.c, man)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "getting root directory").WithClues(ctx) return nil, clues.WrapWC(ctx, err, "getting root directory")
} }
return rootDirEntry, nil return rootDirEntry, nil
@ -373,7 +373,7 @@ func getDir(
snapshotRoot fs.Entry, snapshotRoot fs.Entry,
) (fs.Directory, error) { ) (fs.Directory, error) {
if dirPath == nil { if dirPath == nil {
return nil, clues.Wrap(ErrNoRestorePath, "getting directory").WithClues(ctx) return nil, clues.WrapWC(ctx, ErrNoRestorePath, "getting directory")
} }
toGet := dirPath.PopFront() toGet := dirPath.PopFront()
@ -387,15 +387,15 @@ func getDir(
encodeElements(toGet.Elements()...)) encodeElements(toGet.Elements()...))
if err != nil { if err != nil {
if isErrEntryNotFound(err) { if isErrEntryNotFound(err) {
err = clues.Stack(data.ErrNotFound, err).WithClues(ctx) err = clues.StackWC(ctx, data.ErrNotFound, err)
} }
return nil, clues.Wrap(err, "getting nested object handle").WithClues(ctx) return nil, clues.WrapWC(ctx, err, "getting nested object handle")
} }
f, ok := e.(fs.Directory) f, ok := e.(fs.Directory)
if !ok { if !ok {
return nil, clues.New("requested object is not a directory").WithClues(ctx) return nil, clues.NewWC(ctx, "requested object is not a directory")
} }
return f, nil return f, nil
@ -452,8 +452,7 @@ func loadDirsAndItems(
dir, err := getDir(ictx, dirItems.dir, snapshotRoot) dir, err := getDir(ictx, dirItems.dir, snapshotRoot)
if err != nil { if err != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "loading storage directory"). el.AddRecoverable(ctx, clues.WrapWC(ictx, err, "loading storage directory").
WithClues(ictx).
Label(fault.LabelForceNoBackupCreation)) Label(fault.LabelForceNoBackupCreation))
continue continue
@ -468,8 +467,7 @@ func loadDirsAndItems(
} }
if err := mergeCol.addCollection(dirItems.dir.String(), dc); err != nil { if err := mergeCol.addCollection(dirItems.dir.String(), dc); err != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "adding collection to merge collection"). el.AddRecoverable(ctx, clues.WrapWC(ictx, err, "adding collection to merge collection").
WithClues(ctx).
Label(fault.LabelForceNoBackupCreation)) Label(fault.LabelForceNoBackupCreation))
continue continue
@ -498,14 +496,14 @@ func (w Wrapper) ProduceRestoreCollections(
defer end() defer end()
if len(paths) == 0 { if len(paths) == 0 {
return nil, clues.Stack(ErrNoRestorePath).WithClues(ctx) return nil, clues.StackWC(ctx, ErrNoRestorePath)
} }
// Used later on, but less confusing to follow error propagation if we just // Used later on, but less confusing to follow error propagation if we just
// load it here. // load it here.
snapshotRoot, err := w.getSnapshotRoot(ctx, snapshotID) snapshotRoot, err := w.getSnapshotRoot(ctx, snapshotID)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "loading snapshot root").WithClues(ctx) return nil, clues.WrapWC(ctx, err, "loading snapshot root")
} }
var ( var (
@ -530,8 +528,7 @@ func (w Wrapper) ProduceRestoreCollections(
parentStoragePath, err := itemPaths.StoragePath.Dir() parentStoragePath, err := itemPaths.StoragePath.Dir()
if err != nil { if err != nil {
el.AddRecoverable(ictx, clues.Wrap(err, "getting storage directory path"). el.AddRecoverable(ictx, clues.WrapWC(ictx, err, "getting storage directory path").
WithClues(ictx).
Label(fault.LabelForceNoBackupCreation)) Label(fault.LabelForceNoBackupCreation))
continue continue
@ -570,7 +567,7 @@ func (w Wrapper) ProduceRestoreCollections(
// then load the items from the directory. // then load the items from the directory.
res, err := loadDirsAndItems(ctx, snapshotRoot, bcounter, dirsToItems, errs) res, err := loadDirsAndItems(ctx, snapshotRoot, bcounter, dirsToItems, errs)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "loading items").WithClues(ctx) return nil, clues.WrapWC(ctx, err, "loading items")
} }
return res, el.Failure() return res, el.Failure()
@ -598,12 +595,12 @@ func (w Wrapper) RepoMaintenance(
) error { ) error {
kopiaSafety, err := translateSafety(opts.Safety) kopiaSafety, err := translateSafety(opts.Safety)
if err != nil { if err != nil {
return clues.Wrap(err, "identifying safety level").WithClues(ctx) return clues.WrapWC(ctx, err, "identifying safety level")
} }
mode, err := translateMode(opts.Type) mode, err := translateMode(opts.Type)
if err != nil { if err != nil {
return clues.Wrap(err, "identifying maintenance mode").WithClues(ctx) return clues.WrapWC(ctx, err, "identifying maintenance mode")
} }
currentOwner := w.c.ClientOptions().UsernameAtHost() currentOwner := w.c.ClientOptions().UsernameAtHost()
@ -633,7 +630,7 @@ func (w Wrapper) RepoMaintenance(
dr, ok := w.c.Repository.(repo.DirectRepository) dr, ok := w.c.Repository.(repo.DirectRepository)
if !ok { if !ok {
return clues.New("unable to get valid handle to repo").WithClues(ctx) return clues.NewWC(ctx, "unable to get valid handle to repo")
} }
// Below write session options pulled from kopia's CLI code that runs // Below write session options pulled from kopia's CLI code that runs
@ -647,7 +644,7 @@ func (w Wrapper) RepoMaintenance(
func(ctx context.Context, dw repo.DirectRepositoryWriter) error { func(ctx context.Context, dw repo.DirectRepositoryWriter) error {
params, err := maintenance.GetParams(ctx, w.c) params, err := maintenance.GetParams(ctx, w.c)
if err != nil { if err != nil {
return clues.Wrap(err, "getting maintenance user@host").WithClues(ctx) return clues.WrapWC(ctx, err, "getting maintenance user@host")
} }
// Need to do some fixup here as the user/host may not have been set. // Need to do some fixup here as the user/host may not have been set.
@ -658,8 +655,7 @@ func (w Wrapper) RepoMaintenance(
clues.Hide(currentOwner)) clues.Hide(currentOwner))
if err := w.setMaintenanceParams(ctx, dw, params, currentOwner); err != nil { if err := w.setMaintenanceParams(ctx, dw, params, currentOwner); err != nil {
return clues.Wrap(err, "updating maintenance parameters"). return clues.WrapWC(ctx, err, "updating maintenance parameters")
WithClues(ctx)
} }
} }
@ -669,7 +665,7 @@ func (w Wrapper) RepoMaintenance(
err = snapshotmaintenance.Run(ctx, dw, mode, opts.Force, kopiaSafety) err = snapshotmaintenance.Run(ctx, dw, mode, opts.Force, kopiaSafety)
if err != nil { if err != nil {
return clues.Wrap(err, "running kopia maintenance").WithClues(ctx) return clues.WrapWC(ctx, err, "running kopia maintenance")
} }
return nil return nil

View File

@ -54,7 +54,7 @@ func (ctrl *Controller) ProduceBackupCollections(
err := verifyBackupInputs(bpc.Selector, ctrl.IDNameLookup.IDs()) err := verifyBackupInputs(bpc.Selector, ctrl.IDNameLookup.IDs())
if err != nil { if err != nil {
return nil, nil, false, clues.Stack(err).WithClues(ctx) return nil, nil, false, clues.StackWC(ctx, err)
} }
var ( var (
@ -118,7 +118,7 @@ func (ctrl *Controller) ProduceBackupCollections(
canUsePreviousBackup = true canUsePreviousBackup = true
default: default:
return nil, nil, false, clues.Wrap(clues.New(service.String()), "service not supported").WithClues(ctx) return nil, nil, false, clues.Wrap(clues.NewWC(ctx, service.String()), "service not supported")
} }
for _, c := range colls { for _, c := range colls {
@ -152,7 +152,7 @@ func (ctrl *Controller) IsServiceEnabled(
return groups.IsServiceEnabled(ctx, ctrl.AC.Groups(), resourceOwner) return groups.IsServiceEnabled(ctx, ctrl.AC.Groups(), resourceOwner)
} }
return false, clues.Wrap(clues.New(service.String()), "service not supported").WithClues(ctx) return false, clues.Wrap(clues.NewWC(ctx, service.String()), "service not supported")
} }
func verifyBackupInputs(sels selectors.Selector, cachedIDs []string) error { func verifyBackupInputs(sels selectors.Selector, cachedIDs []string) error {

View File

@ -309,8 +309,7 @@ func (oc *Collection) getDriveItemContent(
errs.AddRecoverable( errs.AddRecoverable(
ctx, ctx,
clues.Wrap(err, "downloading item content"). clues.WrapWC(ctx, err, "downloading item content").
WithClues(ctx).
Label(fault.LabelForceNoBackupCreation)) Label(fault.LabelForceNoBackupCreation))
// return err, not el.Err(), because the lazy reader needs to communicate to // return err, not el.Err(), because the lazy reader needs to communicate to
@ -508,8 +507,7 @@ func (lig *lazyItemGetter) GetData(
*lig.info, *lig.info,
lig.itemExtensionFactory) lig.itemExtensionFactory)
if err != nil { if err != nil {
err := clues.Wrap(err, "adding extensions"). err := clues.WrapWC(ctx, err, "adding extensions").
WithClues(ctx).
Label(fault.LabelForceNoBackupCreation) Label(fault.LabelForceNoBackupCreation)
return nil, nil, false, err return nil, nil, false, err
@ -637,8 +635,7 @@ func (oc *Collection) streamDriveItem(
// permissions change does not update mod time. // permissions change does not update mod time.
time.Now()) time.Now())
if err != nil { if err != nil {
errs.AddRecoverable(ctx, clues.Stack(err). errs.AddRecoverable(ctx, clues.StackWC(ctx, err).
WithClues(ctx).
Label(fault.LabelForceNoBackupCreation)) Label(fault.LabelForceNoBackupCreation))
return return

View File

@ -177,7 +177,7 @@ func DeserializeMetadata(
for breakLoop := false; !breakLoop; { for breakLoop := false; !breakLoop; {
select { select {
case <-ctx.Done(): case <-ctx.Done():
return nil, nil, false, clues.Wrap(ctx.Err(), "deserializing previous backup metadata").WithClues(ctx) return nil, nil, false, clues.WrapWC(ctx, ctx.Err(), "deserializing previous backup metadata")
case item, ok := <-items: case item, ok := <-items:
if !ok { if !ok {
@ -212,7 +212,7 @@ func DeserializeMetadata(
// these cases. We can make the logic for deciding when to continue vs. // these cases. We can make the logic for deciding when to continue vs.
// when to fail less strict in the future if needed. // when to fail less strict in the future if needed.
if err != nil { if err != nil {
errs.Fail(clues.Stack(err).WithClues(ictx)) errs.Fail(clues.StackWC(ictx, err))
return map[string]string{}, map[string]map[string]string{}, false, nil return map[string]string{}, map[string]map[string]string{}, false, nil
} }
@ -408,7 +408,7 @@ func (c *Collections) Get(
p, err := c.handler.CanonicalPath(odConsts.DriveFolderPrefixBuilder(driveID), c.tenantID) p, err := c.handler.CanonicalPath(odConsts.DriveFolderPrefixBuilder(driveID), c.tenantID)
if err != nil { if err != nil {
return nil, false, clues.Wrap(err, "making exclude prefix").WithClues(ictx) return nil, false, clues.WrapWC(ictx, err, "making exclude prefix")
} }
ssmb.Add(p.String(), excludedItemIDs) ssmb.Add(p.String(), excludedItemIDs)
@ -433,7 +433,7 @@ func (c *Collections) Get(
prevPath, err := path.FromDataLayerPath(p, false) prevPath, err := path.FromDataLayerPath(p, false)
if err != nil { if err != nil {
err = clues.Wrap(err, "invalid previous path").WithClues(ictx).With("deleted_path", p) err = clues.WrapWC(ictx, err, "invalid previous path").With("deleted_path", p)
return nil, false, err return nil, false, err
} }
@ -449,7 +449,7 @@ func (c *Collections) Get(
true, true,
nil) nil)
if err != nil { if err != nil {
return nil, false, clues.Wrap(err, "making collection").WithClues(ictx) return nil, false, clues.WrapWC(ictx, err, "making collection")
} }
c.CollectionMap[driveID][fldID] = col c.CollectionMap[driveID][fldID] = col
@ -471,7 +471,7 @@ func (c *Collections) Get(
for driveID := range driveTombstones { for driveID := range driveTombstones {
prevDrivePath, err := c.handler.PathPrefix(c.tenantID, driveID) prevDrivePath, err := c.handler.PathPrefix(c.tenantID, driveID)
if err != nil { if err != nil {
return nil, false, clues.Wrap(err, "making drive tombstone for previous path").WithClues(ctx) return nil, false, clues.WrapWC(ctx, err, "making drive tombstone for previous path")
} }
coll, err := NewCollection( coll, err := NewCollection(
@ -486,7 +486,7 @@ func (c *Collections) Get(
true, true,
nil) nil)
if err != nil { if err != nil {
return nil, false, clues.Wrap(err, "making drive tombstone").WithClues(ctx) return nil, false, clues.WrapWC(ctx, err, "making drive tombstone")
} }
collections = append(collections, coll) collections = append(collections, coll)
@ -814,13 +814,14 @@ func (c *Collections) processItem(
itemID = ptr.Val(item.GetId()) itemID = ptr.Val(item.GetId())
itemName = ptr.Val(item.GetName()) itemName = ptr.Val(item.GetName())
isFolder = item.GetFolder() != nil || item.GetPackageEscaped() != nil isFolder = item.GetFolder() != nil || item.GetPackageEscaped() != nil
ictx = clues.Add(
ctx,
"item_id", itemID,
"item_name", clues.Hide(itemName),
"item_is_folder", isFolder)
) )
ctx = clues.Add(
ctx,
"item_id", itemID,
"item_name", clues.Hide(itemName),
"item_is_folder", isFolder)
if item.GetMalware() != nil { if item.GetMalware() != nil {
addtl := graph.ItemInfo(item) addtl := graph.ItemInfo(item)
skip := fault.FileSkip(fault.SkipMalware, driveID, itemID, itemName, addtl) skip := fault.FileSkip(fault.SkipMalware, driveID, itemID, itemName, addtl)
@ -847,19 +848,18 @@ func (c *Collections) processItem(
excludedItemIDs, excludedItemIDs,
invalidPrevDelta) invalidPrevDelta)
return clues.Stack(err).WithClues(ictx).OrNil() return clues.StackWC(ctx, err).OrNil()
} }
collectionPath, err := c.getCollectionPath(driveID, item) collectionPath, err := c.getCollectionPath(driveID, item)
if err != nil { if err != nil {
return clues.Stack(err). return clues.StackWC(ctx, err).
WithClues(ictx).
Label(fault.LabelForceNoBackupCreation) Label(fault.LabelForceNoBackupCreation)
} }
// Skip items that don't match the folder selectors we were given. // Skip items that don't match the folder selectors we were given.
if shouldSkip(ctx, collectionPath, c.handler, driveName) { if shouldSkip(ctx, collectionPath, c.handler, driveName) {
logger.Ctx(ictx).Debugw("path not selected", "skipped_path", collectionPath.String()) logger.Ctx(ctx).Debugw("path not selected", "skipped_path", collectionPath.String())
return nil return nil
} }
@ -872,8 +872,7 @@ func (c *Collections) processItem(
if ok { if ok {
prevPath, err = path.FromDataLayerPath(prevPathStr, false) prevPath, err = path.FromDataLayerPath(prevPathStr, false)
if err != nil { if err != nil {
return clues.Wrap(err, "invalid previous path"). return clues.WrapWC(ctx, err, "invalid previous path").
WithClues(ictx).
With("prev_path_string", path.LoggableDir(prevPathStr)) With("prev_path_string", path.LoggableDir(prevPathStr))
} }
} else if item.GetRoot() != nil { } else if item.GetRoot() != nil {
@ -892,7 +891,7 @@ func (c *Collections) processItem(
c.CollectionMap, c.CollectionMap,
collectionPath) collectionPath)
if err != nil { if err != nil {
return clues.Stack(err).WithClues(ictx) return clues.StackWC(ctx, err)
} }
if found { if found {
@ -948,7 +947,7 @@ func (c *Collections) processItem(
invalidPrevDelta || collPathAlreadyExists, invalidPrevDelta || collPathAlreadyExists,
nil) nil)
if err != nil { if err != nil {
return clues.Stack(err).WithClues(ictx) return clues.StackWC(ctx, err)
} }
col.driveName = driveName col.driveName = driveName
@ -970,16 +969,16 @@ func (c *Collections) processItem(
case item.GetFile() != nil: case item.GetFile() != nil:
// Deletions are handled above so this is just moves/renames. // Deletions are handled above so this is just moves/renames.
if len(ptr.Val(item.GetParentReference().GetId())) == 0 { if len(ptr.Val(item.GetParentReference().GetId())) == 0 {
return clues.New("file without parent ID").WithClues(ictx) return clues.NewWC(ctx, "file without parent ID")
} }
// Get the collection for this item. // Get the collection for this item.
parentID := ptr.Val(item.GetParentReference().GetId()) parentID := ptr.Val(item.GetParentReference().GetId())
ictx = clues.Add(ictx, "parent_id", parentID) ctx = clues.Add(ctx, "parent_id", parentID)
collection, ok := c.CollectionMap[driveID][parentID] collection, ok := c.CollectionMap[driveID][parentID]
if !ok { if !ok {
return clues.New("item seen before parent folder").WithClues(ictx) return clues.NewWC(ctx, "item seen before parent folder")
} }
// This will only kick in if the file was moved multiple times // This will only kick in if the file was moved multiple times
@ -989,15 +988,13 @@ func (c *Collections) processItem(
if ok { if ok {
prevColl, found := c.CollectionMap[driveID][prevParentContainerID] prevColl, found := c.CollectionMap[driveID][prevParentContainerID]
if !found { if !found {
return clues.New("previous collection not found"). return clues.NewWC(ctx, "previous collection not found").
With("prev_parent_container_id", prevParentContainerID). With("prev_parent_container_id", prevParentContainerID)
WithClues(ictx)
} }
if ok := prevColl.Remove(itemID); !ok { if ok := prevColl.Remove(itemID); !ok {
return clues.New("removing item from prev collection"). return clues.NewWC(ctx, "removing item from prev collection").
With("prev_parent_container_id", prevParentContainerID). With("prev_parent_container_id", prevParentContainerID)
WithClues(ictx)
} }
} }
@ -1022,8 +1019,7 @@ func (c *Collections) processItem(
} }
default: default:
return clues.New("item is neither folder nor file"). return clues.NewWC(ctx, "item is neither folder nor file").
WithClues(ictx).
Label(fault.LabelForceNoBackupCreation) Label(fault.LabelForceNoBackupCreation)
} }

View File

@ -124,11 +124,11 @@ func getItemName(
meta, err := FetchAndReadMetadata(ctx, fin, metaName) meta, err := FetchAndReadMetadata(ctx, fin, metaName)
if err != nil { if err != nil {
return "", clues.Wrap(err, "getting metadata").WithClues(ctx) return "", clues.WrapWC(ctx, err, "getting metadata")
} }
return meta.FileName, nil return meta.FileName, nil
} }
return "", clues.New("invalid item id").WithClues(ctx) return "", clues.NewWC(ctx, "invalid item id")
} }

View File

@ -126,7 +126,7 @@ func downloadFile(
url string, url string,
) (io.ReadCloser, error) { ) (io.ReadCloser, error) {
if len(url) == 0 { if len(url) == 0 {
return nil, clues.New("empty file url").WithClues(ctx) return nil, clues.NewWC(ctx, "empty file url")
} }
// Precheck for url expiry before we make a call to graph to download the // Precheck for url expiry before we make a call to graph to download the
@ -178,7 +178,7 @@ func downloadItemMeta(
metaJSON, err := json.Marshal(meta) metaJSON, err := json.Marshal(meta)
if err != nil { if err != nil {
return nil, 0, clues.Wrap(err, "serializing item metadata").WithClues(ctx) return nil, 0, clues.WrapWC(ctx, err, "serializing item metadata")
} }
return io.NopCloser(bytes.NewReader(metaJSON)), len(metaJSON), nil return io.NopCloser(bytes.NewReader(metaJSON)), len(metaJSON), nil
@ -231,14 +231,14 @@ func isURLExpired(
if err != nil { if err != nil {
logger.CtxErr(ctx, err).Info("query param not found") logger.CtxErr(ctx, err).Info("query param not found")
return false, clues.Stack(err).WithClues(ctx) return false, clues.StackWC(ctx, err)
} }
expired, err := jwt.IsJWTExpired(rawJWT) expired, err := jwt.IsJWTExpired(rawJWT)
if err != nil { if err != nil {
logger.CtxErr(ctx, err).Info("checking jwt expiry") logger.CtxErr(ctx, err).Info("checking jwt expiry")
return false, clues.Stack(err).WithClues(ctx) return false, clues.StackWC(ctx, err)
} }
return expired, nil return expired, nil

View File

@ -98,7 +98,7 @@ func computePreviousLinkShares(
parent, err := originDir.Dir() parent, err := originDir.Dir()
if err != nil { if err != nil {
return nil, clues.New("getting parent").WithClues(ctx) return nil, clues.NewWC(ctx, "getting parent")
} }
for len(parent.Elements()) > 0 { for len(parent.Elements()) > 0 {
@ -106,7 +106,7 @@ func computePreviousLinkShares(
drivePath, err := path.ToDrivePath(parent) drivePath, err := path.ToDrivePath(parent)
if err != nil { if err != nil {
return nil, clues.New("transforming dir to drivePath").WithClues(ictx) return nil, clues.NewWC(ictx, "transforming dir to drivePath")
} }
if len(drivePath.Folders) == 0 { if len(drivePath.Folders) == 0 {
@ -115,7 +115,7 @@ func computePreviousLinkShares(
meta, ok := parentMetas.Load(parent.String()) meta, ok := parentMetas.Load(parent.String())
if !ok { if !ok {
return nil, clues.New("no metadata found in parent").WithClues(ictx) return nil, clues.NewWC(ictx, "no metadata found in parent")
} }
// Any change in permissions would change it to custom // Any change in permissions would change it to custom
@ -126,7 +126,7 @@ func computePreviousLinkShares(
parent, err = parent.Dir() parent, err = parent.Dir()
if err != nil { if err != nil {
return nil, clues.New("getting parent").WithClues(ctx) return nil, clues.NewWC(ictx, "getting parent")
} }
} }
@ -156,14 +156,14 @@ func computePreviousMetadata(
for { for {
parent, err = parent.Dir() parent, err = parent.Dir()
if err != nil { if err != nil {
return metadata.Metadata{}, clues.New("getting parent").WithClues(ctx) return metadata.Metadata{}, clues.NewWC(ctx, "getting parent")
} }
ictx := clues.Add(ctx, "parent_dir", parent) ictx := clues.Add(ctx, "parent_dir", parent)
drivePath, err := path.ToDrivePath(parent) drivePath, err := path.ToDrivePath(parent)
if err != nil { if err != nil {
return metadata.Metadata{}, clues.New("transforming dir to drivePath").WithClues(ictx) return metadata.Metadata{}, clues.NewWC(ictx, "transforming dir to drivePath")
} }
if len(drivePath.Folders) == 0 { if len(drivePath.Folders) == 0 {
@ -172,7 +172,7 @@ func computePreviousMetadata(
meta, ok = parentMetas.Load(parent.String()) meta, ok = parentMetas.Load(parent.String())
if !ok { if !ok {
return metadata.Metadata{}, clues.New("no metadata found for parent folder: " + parent.String()).WithClues(ictx) return metadata.Metadata{}, clues.NewWC(ictx, "no metadata found for parent folder: "+parent.String())
} }
if meta.SharingMode == metadata.SharingModeCustom { if meta.SharingMode == metadata.SharingModeCustom {
@ -214,7 +214,7 @@ func UpdatePermissions(
pid, ok := oldPermIDToNewID.Load(p.ID) pid, ok := oldPermIDToNewID.Load(p.ID)
if !ok { if !ok {
return clues.New("no new permission id").WithClues(ctx) return clues.NewWC(ictx, "no new permission id")
} }
err := udip.DeleteItemPermission( err := udip.DeleteItemPermission(

View File

@ -69,7 +69,7 @@ func RestoreCollection(
drivePath, err := path.ToDrivePath(directory) drivePath, err := path.ToDrivePath(directory)
if err != nil { if err != nil {
return metrics, clues.Wrap(err, "creating drive path").WithClues(ctx) return metrics, clues.WrapWC(ctx, err, "creating drive path")
} }
di, err := ensureDriveExists( di, err := ensureDriveExists(
@ -118,7 +118,7 @@ func RestoreCollection(
rcc.BackupVersion, rcc.BackupVersion,
rcc.RestoreConfig.IncludePermissions) rcc.RestoreConfig.IncludePermissions)
if err != nil { if err != nil {
return metrics, clues.Wrap(err, "getting permissions").WithClues(ctx) return metrics, clues.Wrap(err, "getting permissions")
} }
// Create restore folders and get the folder ID of the folder the data stream will be restored in // Create restore folders and get the folder ID of the folder the data stream will be restored in
@ -193,16 +193,16 @@ func RestoreCollection(
defer caches.pool.Put(copyBufferPtr) defer caches.pool.Put(copyBufferPtr)
copyBuffer := *copyBufferPtr copyBuffer := *copyBufferPtr
ictx := clues.Add(ctx, "restore_item_id", itemData.ID()) ctx = clues.Add(ctx, "restore_item_id", itemData.ID())
itemPath, err := dc.FullPath().AppendItem(itemData.ID()) itemPath, err := dc.FullPath().AppendItem(itemData.ID())
if err != nil { if err != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "appending item to full path").WithClues(ictx)) el.AddRecoverable(ctx, clues.WrapWC(ctx, err, "appending item to full path"))
return return
} }
itemInfo, skipped, err := restoreItem( itemInfo, skipped, err := restoreItem(
ictx, ctx,
rh, rh,
rcc, rcc,
dc, dc,
@ -227,12 +227,12 @@ func RestoreCollection(
} }
if skipped { if skipped {
logger.Ctx(ictx).With("item_path", itemPath).Debug("did not restore item") logger.Ctx(ctx).With("item_path", itemPath).Debug("did not restore item")
return return
} }
// TODO: implement locationRef // TODO: implement locationRef
updateDeets(ictx, itemPath, &path.Builder{}, itemInfo) updateDeets(ctx, itemPath, &path.Builder{}, itemInfo)
atomic.AddInt64(&metricsSuccess, 1) atomic.AddInt64(&metricsSuccess, 1)
}(ctx, itemData) }(ctx, itemData)
@ -312,7 +312,7 @@ func restoreItem(
meta, err := getMetadata(metaReader) meta, err := getMetadata(metaReader)
if err != nil { if err != nil {
return details.ItemInfo{}, true, clues.Wrap(err, "getting directory metadata").WithClues(ctx) return details.ItemInfo{}, true, clues.WrapWC(ctx, err, "getting directory metadata")
} }
trimmedPath := strings.TrimSuffix(itemPath.String(), metadata.DirMetaFileSuffix) trimmedPath := strings.TrimSuffix(itemPath.String(), metadata.DirMetaFileSuffix)
@ -729,7 +729,7 @@ func restoreFile(
// Get the stream size (needed to create the upload session) // Get the stream size (needed to create the upload session)
ss, ok := itemData.(data.ItemSize) ss, ok := itemData.(data.ItemSize)
if !ok { if !ok {
return "", details.ItemInfo{}, clues.New("item does not implement DataStreamInfo").WithClues(ctx) return "", details.ItemInfo{}, clues.NewWC(ctx, "item does not implement DataStreamInfo")
} }
var ( var (

View File

@ -201,7 +201,7 @@ func (uc *urlCache) readCache(
props, ok := uc.idToProps[itemID] props, ok := uc.idToProps[itemID]
if !ok { if !ok {
return itemProps{}, clues.New("item not found in cache").WithClues(ctx) return itemProps{}, clues.NewWC(ctx, "item not found in cache")
} }
return props, nil return props, nil

View File

@ -99,7 +99,7 @@ func uploadAttachment(
// Max attachment size is 150MB. // Max attachment size is 150MB.
content, err := api.GetAttachmentContent(attachment) content, err := api.GetAttachmentContent(attachment)
if err != nil { if err != nil {
return clues.Wrap(err, "serializing attachment content").WithClues(ctx) return clues.WrapWC(ctx, err, "serializing attachment content")
} }
_, err = ap.PostLargeAttachment(ctx, userID, containerID, parentItemID, name, content) _, err = ap.PostLargeAttachment(ctx, userID, containerID, parentItemID, name, content)

View File

@ -46,7 +46,7 @@ func CreateCollections(
handler, ok := handlers[category] handler, ok := handlers[category]
if !ok { if !ok {
return nil, clues.New("unsupported backup category type").WithClues(ctx) return nil, clues.NewWC(ctx, "unsupported backup category type")
} }
foldersComplete := observe.MessageWithCompletion( foldersComplete := observe.MessageWithCompletion(
@ -233,7 +233,7 @@ func populateCollections(
) )
if collections[id] != nil { if collections[id] != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "conflict: tombstone exists for a live collection").WithClues(ictx)) el.AddRecoverable(ctx, clues.WrapWC(ictx, err, "conflict: tombstone exists for a live collection"))
continue continue
} }

View File

@ -76,14 +76,13 @@ func getItemAndInfo(
useImmutableIDs, useImmutableIDs,
fault.New(true)) // temporary way to force a failFast error fault.New(true)) // temporary way to force a failFast error
if err != nil { if err != nil {
return nil, nil, clues.Wrap(err, "fetching item"). return nil, nil, clues.WrapWC(ctx, err, "fetching item").
WithClues(ctx).
Label(fault.LabelForceNoBackupCreation) Label(fault.LabelForceNoBackupCreation)
} }
itemData, err := getter.Serialize(ctx, item, userID, id) itemData, err := getter.Serialize(ctx, item, userID, id)
if err != nil { if err != nil {
return nil, nil, clues.Wrap(err, "serializing item").WithClues(ctx) return nil, nil, clues.WrapWC(ctx, err, "serializing item")
} }
// In case of mail the size of itemData is calc as- size of body content+size of attachment // In case of mail the size of itemData is calc as- size of body content+size of attachment
@ -285,8 +284,7 @@ func (col *prefetchCollection) streamItems(
if err != nil { if err != nil {
el.AddRecoverable( el.AddRecoverable(
ctx, ctx,
clues.Stack(err). clues.StackWC(ctx, err).
WithClues(ctx).
Label(fault.LabelForceNoBackupCreation)) Label(fault.LabelForceNoBackupCreation))
return return

View File

@ -49,7 +49,7 @@ func (cfc *contactContainerCache) init(
baseContainerPath []string, baseContainerPath []string,
) error { ) error {
if len(baseNode) == 0 { if len(baseNode) == 0 {
return clues.New("m365 folderID required for base contact folder").WithClues(ctx) return clues.NewWC(ctx, "m365 folderID required for base contact folder")
} }
if cfc.containerResolver == nil { if cfc.containerResolver == nil {
@ -77,7 +77,7 @@ func (cfc *contactContainerCache) populateContactRoot(
path.Builder{}.Append(ptr.Val(f.GetId())), // path of IDs path.Builder{}.Append(ptr.Val(f.GetId())), // path of IDs
path.Builder{}.Append(baseContainerPath...)) // display location path.Builder{}.Append(baseContainerPath...)) // display location
if err := cfc.addFolder(&temp); err != nil { if err := cfc.addFolder(&temp); err != nil {
return clues.Wrap(err, "adding resolver dir").WithClues(ctx) return clues.WrapWC(ctx, err, "adding resolver dir")
} }
return nil return nil

View File

@ -68,12 +68,12 @@ func (cr *containerResolver) IDToPath(
c, ok := cr.cache[folderID] c, ok := cr.cache[folderID]
if !ok { if !ok {
return nil, nil, clues.New("container not cached").WithClues(ctx) return nil, nil, clues.NewWC(ctx, "container not cached")
} }
p := c.Path() p := c.Path()
if p == nil { if p == nil {
return nil, nil, clues.New("cached container has no path").WithClues(ctx) return nil, nil, clues.NewWC(ctx, "cached container has no path")
} }
return p, c.Location(), nil return p, c.Location(), nil
@ -91,7 +91,7 @@ func (cr *containerResolver) refreshContainer(
logger.Ctx(ctx).Debug("refreshing container") logger.Ctx(ctx).Debug("refreshing container")
if cr.refresher == nil { if cr.refresher == nil {
return nil, false, clues.New("nil refresher").WithClues(ctx) return nil, false, clues.NewWC(ctx, "nil refresher")
} }
c, err := cr.refresher.refreshContainer(ctx, id) c, err := cr.refresher.refreshContainer(ctx, id)
@ -100,7 +100,7 @@ func (cr *containerResolver) refreshContainer(
return nil, true, nil return nil, true, nil
} else if err != nil { } else if err != nil {
// This is some other error, just return it. // This is some other error, just return it.
return nil, false, clues.Wrap(err, "refreshing container").WithClues(ctx) return nil, false, clues.WrapWC(ctx, err, "refreshing container")
} }
return c, false, nil return c, false, nil
@ -131,7 +131,7 @@ func (cr *containerResolver) recoverContainer(
} }
if err := cr.addFolder(c); err != nil { if err := cr.addFolder(c); err != nil {
return nil, nil, false, clues.Wrap(err, "adding new container").WithClues(ctx) return nil, nil, false, clues.WrapWC(ctx, err, "adding new container")
} }
// Retry populating this container's paths. // Retry populating this container's paths.
@ -162,11 +162,12 @@ func (cr *containerResolver) idToPath(
if depth >= maxIterations { if depth >= maxIterations {
return resolvedPath{ return resolvedPath{
idPath: nil, idPath: nil,
locPath: nil, locPath: nil,
cached: false, cached: false,
deleted: false, deleted: false,
}, clues.New("path contains cycle or is too tall").WithClues(ctx) },
clues.NewWC(ctx, "path contains cycle or is too tall")
} }
c, ok := cr.cache[folderID] c, ok := cr.cache[folderID]
@ -217,7 +218,7 @@ func (cr *containerResolver) idToPath(
locPath: nil, locPath: nil,
cached: true, cached: true,
deleted: false, deleted: false,
}, clues.Wrap(err, "refreshing container").WithClues(ctx) }, clues.WrapWC(ctx, err, "refreshing container")
} }
if shouldDelete { if shouldDelete {
@ -249,7 +250,7 @@ func (cr *containerResolver) idToPath(
locPath: nil, locPath: nil,
cached: false, cached: false,
deleted: false, deleted: false,
}, clues.Wrap(err, "updating cached container").WithClues(ctx) }, clues.WrapWC(ctx, err, "updating cached container")
} }
return cr.idToPath(ctx, folderID, depth) return cr.idToPath(ctx, folderID, depth)
@ -378,7 +379,7 @@ func (cr *containerResolver) AddToCache(
Container: f, Container: f,
} }
if err := cr.addFolder(temp); err != nil { if err := cr.addFolder(temp); err != nil {
return clues.Wrap(err, "adding cache folder").WithClues(ctx) return clues.WrapWC(ctx, err, "adding cache folder")
} }
// Populate the path for this entry so calls to PathInCache succeed no matter // Populate the path for this entry so calls to PathInCache succeed no matter
@ -475,13 +476,12 @@ func newRankedContainerResolver(
c, err := getter.GetContainerByID(ctx, userID, id) c, err := getter.GetContainerByID(ctx, userID, id)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "getting ranked container").WithClues(ictx) return nil, clues.WrapWC(ictx, err, "getting ranked container")
} }
gotID := ptr.Val(c.GetId()) gotID := ptr.Val(c.GetId())
if len(gotID) == 0 { if len(gotID) == 0 {
return nil, clues.New("ranked include container missing ID"). return nil, clues.NewWC(ictx, "ranked include container missing ID")
WithClues(ictx)
} }
cr.resolvedInclude = append(cr.resolvedInclude, gotID) cr.resolvedInclude = append(cr.resolvedInclude, gotID)
@ -492,13 +492,12 @@ func newRankedContainerResolver(
c, err := getter.GetContainerByID(ctx, userID, id) c, err := getter.GetContainerByID(ctx, userID, id)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "getting exclude container").WithClues(ictx) return nil, clues.WrapWC(ictx, err, "getting exclude container")
} }
gotID := ptr.Val(c.GetId()) gotID := ptr.Val(c.GetId())
if len(gotID) == 0 { if len(gotID) == 0 {
return nil, clues.New("exclude container missing ID"). return nil, clues.NewWC(ictx, "exclude container missing ID")
WithClues(ictx)
} }
cr.resolvedExclude[gotID] = struct{}{} cr.resolvedExclude[gotID] = struct{}{}

View File

@ -52,7 +52,7 @@ func (ecc *eventContainerCache) populateEventRoot(ctx context.Context) error {
path.Builder{}.Append(ptr.Val(f.GetId())), // storage path path.Builder{}.Append(ptr.Val(f.GetId())), // storage path
path.Builder{}.Append(ptr.Val(f.GetDisplayName()))) // display location path.Builder{}.Append(ptr.Val(f.GetDisplayName()))) // display location
if err := ecc.addFolder(&temp); err != nil { if err := ecc.addFolder(&temp); err != nil {
return clues.Wrap(err, "initializing calendar resolver").WithClues(ctx) return clues.WrapWC(ctx, err, "initializing calendar resolver")
} }
return nil return nil
@ -111,7 +111,7 @@ func (ecc *eventContainerCache) Populate(
// @returns error iff the required values are not accessible. // @returns error iff the required values are not accessible.
func (ecc *eventContainerCache) AddToCache(ctx context.Context, f graph.Container) error { func (ecc *eventContainerCache) AddToCache(ctx context.Context, f graph.Container) error {
if err := checkIDAndName(f); err != nil { if err := checkIDAndName(f); err != nil {
return clues.Wrap(err, "validating container").WithClues(ctx) return clues.WrapWC(ctx, err, "validating container")
} }
temp := graph.NewCacheFolder( temp := graph.NewCacheFolder(
@ -120,7 +120,7 @@ func (ecc *eventContainerCache) AddToCache(ctx context.Context, f graph.Containe
path.Builder{}.Append(ptr.Val(f.GetDisplayName()))) // display location path.Builder{}.Append(ptr.Val(f.GetDisplayName()))) // display location
if err := ecc.addFolder(&temp); err != nil { if err := ecc.addFolder(&temp); err != nil {
return clues.Wrap(err, "adding container").WithClues(ctx) return clues.WrapWC(ctx, err, "adding container")
} }
// Populate the path for this entry so calls to PathInCache succeed no matter // Populate the path for this entry so calls to PathInCache succeed no matter

View File

@ -107,7 +107,7 @@ func restoreEvent(
) (*details.ExchangeInfo, error) { ) (*details.ExchangeInfo, error) {
event, err := api.BytesToEventable(body) event, err := api.BytesToEventable(body)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "creating event from bytes").WithClues(ctx) return nil, clues.WrapWC(ctx, err, "creating event from bytes")
} }
ctx = clues.Add(ctx, "item_id", ptr.Val(event.GetId())) ctx = clues.Add(ctx, "item_id", ptr.Val(event.GetId()))
@ -176,7 +176,7 @@ func restoreEvent(
// removed cancelled and exceptions events form it // removed cancelled and exceptions events form it
event, err = api.BytesToEventable(body) event, err = api.BytesToEventable(body)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "creating event from bytes").WithClues(ctx) return nil, clues.WrapWC(ctx, err, "creating event from bytes")
} }
// Fix up event instances in case we have a recurring event // Fix up event instances in case we have a recurring event
@ -276,8 +276,7 @@ func updateAttachments(
err = agdp.DeleteAttachment(ctx, userID, containerID, eventID, id) err = agdp.DeleteAttachment(ctx, userID, containerID, eventID, id)
if err != nil { if err != nil {
logger.CtxErr(ctx, err).With("attachment_name", name).Info("attachment delete failed") logger.CtxErr(ctx, err).With("attachment_name", name).Info("attachment delete failed")
el.AddRecoverable(ctx, clues.Wrap(err, "deleting event attachment"). el.AddRecoverable(ctx, clues.WrapWC(ctx, err, "deleting event attachment").With("attachment_name", name))
WithClues(ctx).With("attachment_name", name))
} }
} }
} }

View File

@ -80,7 +80,7 @@ func (mc *mailContainerCache) populateMailRoot(ctx context.Context) error {
path.Builder{}.Append(), // path of IDs path.Builder{}.Append(), // path of IDs
path.Builder{}.Append()) // display location path.Builder{}.Append()) // display location
if err := mc.addFolder(&temp); err != nil { if err := mc.addFolder(&temp); err != nil {
return clues.Wrap(err, "adding resolver dir").WithClues(ctx) return clues.WrapWC(ctx, err, "adding resolver dir")
} }
return nil return nil

View File

@ -107,7 +107,7 @@ func restoreMail(
) (*details.ExchangeInfo, error) { ) (*details.ExchangeInfo, error) {
msg, err := api.BytesToMessageable(body) msg, err := api.BytesToMessageable(body)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "creating mail from bytes").WithClues(ctx) return nil, clues.WrapWC(ctx, err, "creating mail from bytes")
} }
ctx = clues.Add(ctx, "item_id", ptr.Val(msg.GetId())) ctx = clues.Add(ctx, "item_id", ptr.Val(msg.GetId()))

View File

@ -48,7 +48,7 @@ func ParseMetadataCollections(
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
return nil, false, clues.Wrap(ctx.Err(), "parsing collection metadata").WithClues(ctx) return nil, false, clues.WrapWC(ctx, ctx.Err(), "parsing collection metadata")
case item, ok := <-items: case item, ok := <-items:
if !ok || errs.Failure() != nil { if !ok || errs.Failure() != nil {
@ -63,13 +63,13 @@ func ParseMetadataCollections(
err := json.NewDecoder(item.ToReader()).Decode(&m) err := json.NewDecoder(item.ToReader()).Decode(&m)
if err != nil { if err != nil {
return nil, false, clues.New("decoding metadata json").WithClues(ctx) return nil, false, clues.NewWC(ctx, "decoding metadata json")
} }
switch item.ID() { switch item.ID() {
case metadata.PreviousPathFileName: case metadata.PreviousPathFileName:
if _, ok := found[category][metadata.PathKey]; ok { if _, ok := found[category][metadata.PathKey]; ok {
return nil, false, clues.Wrap(clues.New(category.String()), "multiple versions of path metadata").WithClues(ctx) return nil, false, clues.Wrap(clues.NewWC(ctx, category.String()), "multiple versions of path metadata")
} }
for k, p := range m { for k, p := range m {
@ -80,7 +80,7 @@ func ParseMetadataCollections(
case metadata.DeltaURLsFileName: case metadata.DeltaURLsFileName:
if _, ok := found[category][metadata.DeltaKey]; ok { if _, ok := found[category][metadata.DeltaKey]; ok {
return nil, false, clues.Wrap(clues.New(category.String()), "multiple versions of delta metadata").WithClues(ctx) return nil, false, clues.Wrap(clues.NewWC(ctx, category.String()), "multiple versions of delta metadata")
} }
for k, d := range m { for k, d := range m {

View File

@ -54,7 +54,7 @@ func RestoreCollection(
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
return metrics, clues.Wrap(ctx.Err(), "context cancelled").WithClues(ctx) return metrics, clues.WrapWC(ctx, ctx.Err(), "context cancelled")
case itemData, ok := <-items: case itemData, ok := <-items:
if !ok || el.Failure() != nil { if !ok || el.Failure() != nil {
@ -69,7 +69,7 @@ func RestoreCollection(
_, err := buf.ReadFrom(itemData.ToReader()) _, err := buf.ReadFrom(itemData.ToReader())
if err != nil { if err != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "reading item bytes").WithClues(ictx)) el.AddRecoverable(ictx, clues.WrapWC(ictx, err, "reading item bytes"))
continue continue
} }
@ -99,7 +99,7 @@ func RestoreCollection(
// destination folder, then the restore path no longer matches the fullPath. // destination folder, then the restore path no longer matches the fullPath.
itemPath, err := fullPath.AppendItem(itemData.ID()) itemPath, err := fullPath.AppendItem(itemData.ID())
if err != nil { if err != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "adding item to collection path").WithClues(ctx)) el.AddRecoverable(ictx, clues.WrapWC(ictx, err, "adding item to collection path"))
continue continue
} }
@ -114,7 +114,7 @@ func RestoreCollection(
if err != nil { if err != nil {
// These deets additions are for cli display purposes only. // These deets additions are for cli display purposes only.
// no need to fail out on error. // no need to fail out on error.
logger.Ctx(ctx).Infow("accounting for restored item", "error", err) logger.Ctx(ictx).Infow("accounting for restored item", "error", err)
} }
colProgress <- struct{}{} colProgress <- struct{}{}
@ -247,7 +247,7 @@ func uploadAttachments(
continue continue
} }
el.AddRecoverable(ctx, clues.Wrap(err, "uploading mail attachment").WithClues(ctx)) el.AddRecoverable(ctx, clues.WrapWC(ctx, err, "uploading mail attachment"))
} }
} }

View File

@ -219,7 +219,7 @@ func populateCollections(
) )
if collections[id] != nil { if collections[id] != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "conflict: tombstone exists for a live collection").WithClues(ictx)) el.AddRecoverable(ictx, clues.WrapWC(ictx, err, "conflict: tombstone exists for a live collection"))
continue continue
} }

View File

@ -183,9 +183,7 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) {
if err != nil { if err != nil {
el.AddRecoverable( el.AddRecoverable(
ctx, ctx,
clues.Stack(err). clues.StackWC(ctx, err).Label(fault.LabelForceNoBackupCreation))
WithClues(ctx).
Label(fault.LabelForceNoBackupCreation))
return return
} }

View File

@ -44,7 +44,7 @@ func parseMetadataCollections(
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
return nil, false, clues.Wrap(ctx.Err(), "parsing collection metadata").WithClues(ctx) return nil, false, clues.WrapWC(ctx, ctx.Err(), "parsing collection metadata")
case item, ok := <-items: case item, ok := <-items:
if !ok || errs.Failure() != nil { if !ok || errs.Failure() != nil {
@ -64,13 +64,13 @@ func parseMetadataCollections(
err := json.NewDecoder(item.ToReader()).Decode(&m) err := json.NewDecoder(item.ToReader()).Decode(&m)
if err != nil { if err != nil {
return nil, false, clues.New("decoding metadata json").WithClues(ctx) return nil, false, clues.NewWC(ctx, "decoding metadata json")
} }
switch item.ID() { switch item.ID() {
case metadata.PreviousPathFileName: case metadata.PreviousPathFileName:
if _, ok := found[category][metadata.PathKey]; ok { if _, ok := found[category][metadata.PathKey]; ok {
return nil, false, clues.Wrap(clues.New(category.String()), "multiple versions of path metadata").WithClues(ctx) return nil, false, clues.Wrap(clues.NewWC(ctx, category.String()), "multiple versions of path metadata")
} }
for k, p := range m { for k, p := range m {
@ -81,7 +81,7 @@ func parseMetadataCollections(
case metadata.DeltaURLsFileName: case metadata.DeltaURLsFileName:
if _, ok := found[category][metadata.DeltaKey]; ok { if _, ok := found[category][metadata.DeltaKey]; ok {
return nil, false, clues.Wrap(clues.New(category.String()), "multiple versions of delta metadata").WithClues(ctx) return nil, false, clues.Wrap(clues.NewWC(ctx, category.String()), "multiple versions of delta metadata")
} }
for k, d := range m { for k, d := range m {

View File

@ -102,7 +102,7 @@ func CollectPages(
false, false,
tuple.Name) tuple.Name)
if err != nil { if err != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "creating page collection path").WithClues(ctx)) el.AddRecoverable(ctx, clues.WrapWC(ctx, err, "creating page collection path"))
} }
collection := NewCollection( collection := NewCollection(
@ -154,7 +154,7 @@ func CollectLists(
false, false,
tuple.Name) tuple.Name)
if err != nil { if err != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "creating list collection path").WithClues(ctx)) el.AddRecoverable(ctx, clues.WrapWC(ctx, err, "creating list collection path"))
} }
collection := NewCollection( collection := NewCollection(

View File

@ -201,7 +201,7 @@ func (sc *Collection) retrieveLists(
byteArray, err := serializeContent(ctx, wtr, lst) byteArray, err := serializeContent(ctx, wtr, lst)
if err != nil { if err != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "serializing list").WithClues(ctx).Label(fault.LabelForceNoBackupCreation)) el.AddRecoverable(ctx, clues.WrapWC(ctx, err, "serializing list").Label(fault.LabelForceNoBackupCreation))
continue continue
} }
@ -217,7 +217,7 @@ func (sc *Collection) retrieveLists(
ptr.Val(lst.GetId()), ptr.Val(lst.GetId()),
details.ItemInfo{SharePoint: ListToSPInfo(lst, size)}) details.ItemInfo{SharePoint: ListToSPInfo(lst, size)})
if err != nil { if err != nil {
el.AddRecoverable(ctx, clues.Stack(err).WithClues(ctx).Label(fault.LabelForceNoBackupCreation)) el.AddRecoverable(ctx, clues.StackWC(ctx, err).Label(fault.LabelForceNoBackupCreation))
continue continue
} }
@ -243,7 +243,7 @@ func (sc *Collection) retrievePages(
betaService := sc.betaService betaService := sc.betaService
if betaService == nil { if betaService == nil {
return metrics, clues.New("beta service required").WithClues(ctx) return metrics, clues.NewWC(ctx, "beta service required")
} }
parent, err := as.GetByID(ctx, sc.fullPath.ProtectedResource(), api.CallConfig{}) parent, err := as.GetByID(ctx, sc.fullPath.ProtectedResource(), api.CallConfig{})
@ -269,7 +269,7 @@ func (sc *Collection) retrievePages(
byteArray, err := serializeContent(ctx, wtr, pg) byteArray, err := serializeContent(ctx, wtr, pg)
if err != nil { if err != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "serializing page").WithClues(ctx).Label(fault.LabelForceNoBackupCreation)) el.AddRecoverable(ctx, clues.WrapWC(ctx, err, "serializing page").Label(fault.LabelForceNoBackupCreation))
continue continue
} }
@ -284,7 +284,7 @@ func (sc *Collection) retrievePages(
ptr.Val(pg.GetId()), ptr.Val(pg.GetId()),
details.ItemInfo{SharePoint: pageToSPInfo(pg, root, size)}) details.ItemInfo{SharePoint: pageToSPInfo(pg, root, size)})
if err != nil { if err != nil {
el.AddRecoverable(ctx, clues.Stack(err).WithClues(ctx).Label(fault.LabelForceNoBackupCreation)) el.AddRecoverable(ctx, clues.StackWC(ctx, err).Label(fault.LabelForceNoBackupCreation))
continue continue
} }

View File

@ -151,12 +151,12 @@ func restoreListItem(
byteArray, err := io.ReadAll(itemData.ToReader()) byteArray, err := io.ReadAll(itemData.ToReader())
if err != nil { if err != nil {
return dii, clues.Wrap(err, "reading backup data").WithClues(ctx) return dii, clues.WrapWC(ctx, err, "reading backup data")
} }
oldList, err := betaAPI.CreateListFromBytes(byteArray) oldList, err := betaAPI.CreateListFromBytes(byteArray)
if err != nil { if err != nil {
return dii, clues.Wrap(err, "creating item").WithClues(ctx) return dii, clues.WrapWC(ctx, err, "creating item")
} }
if name, ok := ptr.ValOK(oldList.GetDisplayName()); ok { if name, ok := ptr.ValOK(oldList.GetDisplayName()); ok {
@ -233,7 +233,7 @@ func RestoreListCollection(
select { select {
case <-ctx.Done(): case <-ctx.Done():
return metrics, clues.Stack(ctx.Err()).WithClues(ctx) return metrics, clues.StackWC(ctx, ctx.Err())
case itemData, ok := <-items: case itemData, ok := <-items:
if !ok { if !ok {
@ -256,7 +256,7 @@ func RestoreListCollection(
itemPath, err := dc.FullPath().AppendItem(itemData.ID()) itemPath, err := dc.FullPath().AppendItem(itemData.ID())
if err != nil { if err != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "appending item to full path").WithClues(ctx)) el.AddRecoverable(ctx, clues.WrapWC(ctx, err, "appending item to full path"))
continue continue
} }
@ -312,7 +312,7 @@ func RestorePageCollection(
select { select {
case <-ctx.Done(): case <-ctx.Done():
return metrics, clues.Stack(ctx.Err()).WithClues(ctx) return metrics, clues.StackWC(ctx, ctx.Err())
case itemData, ok := <-items: case itemData, ok := <-items:
if !ok { if !ok {
@ -335,7 +335,7 @@ func RestorePageCollection(
itemPath, err := dc.FullPath().AppendItem(itemData.ID()) itemPath, err := dc.FullPath().AppendItem(itemData.ID())
if err != nil { if err != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "appending item to full path").WithClues(ctx)) el.AddRecoverable(ctx, clues.WrapWC(ctx, err, "appending item to full path"))
continue continue
} }

View File

@ -75,12 +75,12 @@ func NewController(
creds, err := acct.M365Config() creds, err := acct.M365Config()
if err != nil { if err != nil {
return nil, clues.Wrap(err, "retrieving m365 account configuration").WithClues(ctx) return nil, clues.WrapWC(ctx, err, "retrieving m365 account configuration")
} }
ac, err := api.NewClient(creds, co, counter) ac, err := api.NewClient(creds, co, counter)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "creating api client").WithClues(ctx) return nil, clues.WrapWC(ctx, err, "creating api client")
} }
ctrl := Controller{ ctrl := Controller{
@ -287,7 +287,7 @@ func (ctrl *Controller) PopulateProtectedResourceIDAndName(
ins idname.Cacher, ins idname.Cacher,
) (idname.Provider, error) { ) (idname.Provider, error) {
if ctrl.resourceHandler == nil { if ctrl.resourceHandler == nil {
return nil, clues.Stack(ErrNoResourceLookup).WithClues(ctx) return nil, clues.StackWC(ctx, ErrNoResourceLookup)
} }
pr, err := ctrl.resourceHandler.GetResourceIDAndNameFrom(ctx, resourceID, ins) pr, err := ctrl.resourceHandler.GetResourceIDAndNameFrom(ctx, resourceID, ins)

View File

@ -34,6 +34,6 @@ func (ctrl *Controller) DeserializeMetadataFiles(
case path.GroupsService, path.GroupsMetadataService: case path.GroupsService, path.GroupsMetadataService:
return groups.DeserializeMetadataFiles(ctx, colls) return groups.DeserializeMetadataFiles(ctx, colls)
default: default:
return nil, clues.New("unrecognized service").With("service", service).WithClues(ctx) return nil, clues.NewWC(ctx, "unrecognized service").With("service", service)
} }
} }

View File

@ -29,7 +29,7 @@ func ProduceBackupCollections(
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, bool, error) { ) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, bool, error) {
eb, err := bpc.Selector.ToExchangeBackup() eb, err := bpc.Selector.ToExchangeBackup()
if err != nil { if err != nil {
return nil, nil, false, clues.Wrap(err, "exchange dataCollection selector").WithClues(ctx) return nil, nil, false, clues.WrapWC(ctx, err, "exchange dataCollection selector")
} }
var ( var (

View File

@ -55,13 +55,13 @@ func ConsumeRestoreCollections(
handler, ok := handlers[category] handler, ok := handlers[category]
if !ok { if !ok {
el.AddRecoverable(ctx, clues.New("unsupported restore path category").WithClues(ictx)) el.AddRecoverable(ictx, clues.NewWC(ictx, "unsupported restore path category"))
continue continue
} }
if directoryCache[category] == nil { if directoryCache[category] == nil {
gcr := handler.NewContainerCache(resourceID) gcr := handler.NewContainerCache(resourceID)
if err := gcr.Populate(ctx, errs, handler.DefaultRootContainer()); err != nil { if err := gcr.Populate(ictx, errs, handler.DefaultRootContainer()); err != nil {
return nil, clues.Wrap(err, "populating container cache") return nil, clues.Wrap(err, "populating container cache")
} }
@ -76,16 +76,16 @@ func ConsumeRestoreCollections(
directoryCache[category], directoryCache[category],
errs) errs)
if err != nil { if err != nil {
el.AddRecoverable(ctx, err) el.AddRecoverable(ictx, err)
continue continue
} }
directoryCache[category] = gcc directoryCache[category] = gcc
ictx = clues.Add(ictx, "restore_destination_id", containerID) ictx = clues.Add(ictx, "restore_destination_id", containerID)
collisionKeyToItemID, err := handler.GetItemsInContainerByCollisionKey(ctx, resourceID, containerID) collisionKeyToItemID, err := handler.GetItemsInContainerByCollisionKey(ictx, resourceID, containerID)
if err != nil { if err != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "building item collision cache")) el.AddRecoverable(ictx, clues.Wrap(err, "building item collision cache"))
continue continue
} }
@ -108,7 +108,7 @@ func ConsumeRestoreCollections(
break break
} }
el.AddRecoverable(ctx, err) el.AddRecoverable(ictx, err)
} }
} }

View File

@ -60,7 +60,7 @@ func ProduceBackupCollections(
bpc.ProtectedResource.ID(), bpc.ProtectedResource.ID(),
api.CallConfig{}) api.CallConfig{})
if err != nil { if err != nil {
return nil, nil, clues.Wrap(err, "getting group").WithClues(ctx) return nil, nil, clues.WrapWC(ctx, err, "getting group")
} }
isTeam := api.IsTeam(ctx, group) isTeam := api.IsTeam(ctx, group)
@ -307,9 +307,7 @@ func deserializeSiteMetadata(
for breakLoop := false; !breakLoop; { for breakLoop := false; !breakLoop; {
select { select {
case <-ctx.Done(): case <-ctx.Done():
return nil, clues.Wrap( return nil, clues.WrapWC(ctx, ctx.Err(), "deserializing previous sites metadata")
ctx.Err(),
"deserializing previous sites metadata").WithClues(ctx)
case item, ok := <-items: case item, ok := <-items:
if !ok { if !ok {
@ -340,7 +338,7 @@ func deserializeSiteMetadata(
} }
if err != nil { if err != nil {
return nil, clues.Stack(err).WithClues(ictx) return nil, clues.StackWC(ictx, err)
} }
} }
} }

View File

@ -17,7 +17,7 @@ func IsServiceEnabled(
) (bool, error) { ) (bool, error) {
resp, err := gbi.GetByID(ctx, resource, api.CallConfig{}) resp, err := gbi.GetByID(ctx, resource, api.CallConfig{})
if err != nil { if err != nil {
return false, clues.Wrap(err, "getting group").WithClues(ctx) return false, clues.WrapWC(ctx, err, "getting group")
} }
// according to graph api docs: https://learn.microsoft.com/en-us/graph/api/resources/group?view=graph-rest-1.0 // according to graph api docs: https://learn.microsoft.com/en-us/graph/api/resources/group?view=graph-rest-1.0

View File

@ -84,7 +84,7 @@ func (h *baseGroupsHandler) ProduceExportCollections(
case path.LibrariesCategory: case path.LibrariesCategory:
drivePath, err := path.ToDrivePath(restoreColl.FullPath()) drivePath, err := path.ToDrivePath(restoreColl.FullPath())
if err != nil { if err != nil {
return nil, clues.Wrap(err, "transforming path to drive path").WithClues(ctx) return nil, clues.WrapWC(ctx, err, "transforming path to drive path")
} }
driveName, ok := h.backupDriveIDNames.NameOf(drivePath.DriveID) driveName, ok := h.backupDriveIDNames.NameOf(drivePath.DriveID)

View File

@ -73,12 +73,12 @@ func ConsumeRestoreCollections(
webURL, ok := backupSiteIDWebURL.NameOf(siteID) webURL, ok := backupSiteIDWebURL.NameOf(siteID)
if !ok { if !ok {
// This should not happen, but just in case // This should not happen, but just in case
logger.Ctx(ctx).With("site_id", siteID).Info("site weburl not found, using site id") logger.Ctx(ictx).With("site_id", siteID).Info("site weburl not found, using site id")
} }
siteName, err = getSiteName(ctx, siteID, webURL, ac.Sites(), webURLToSiteNames) siteName, err = getSiteName(ictx, siteID, webURL, ac.Sites(), webURLToSiteNames)
if err != nil { if err != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "getting site"). el.AddRecoverable(ictx, clues.Wrap(err, "getting site").
With("web_url", webURL, "site_id", siteID)) With("web_url", webURL, "site_id", siteID))
} else if len(siteName) == 0 { } else if len(siteName) == 0 {
// Site was deleted in between and restore and is not // Site was deleted in between and restore and is not
@ -95,7 +95,7 @@ func ConsumeRestoreCollections(
Selector: rcc.Selector, Selector: rcc.Selector,
} }
err = caches.Populate(ctx, lrh, srcc.ProtectedResource.ID()) err = caches.Populate(ictx, lrh, srcc.ProtectedResource.ID())
if err != nil { if err != nil {
return nil, clues.Wrap(err, "initializing restore caches") return nil, clues.Wrap(err, "initializing restore caches")
} }
@ -112,17 +112,16 @@ func ConsumeRestoreCollections(
ctr) ctr)
case path.ChannelMessagesCategory: case path.ChannelMessagesCategory:
// Message cannot be restored as of now using Graph API. // Message cannot be restored as of now using Graph API.
logger.Ctx(ctx).Debug("Skipping restore for channel messages") logger.Ctx(ictx).Debug("Skipping restore for channel messages")
default: default:
return nil, clues.New("data category not supported"). return nil, clues.NewWC(ictx, "data category not supported").
With("category", category). With("category", category)
WithClues(ictx)
} }
restoreMetrics = support.CombineMetrics(restoreMetrics, metrics) restoreMetrics = support.CombineMetrics(restoreMetrics, metrics)
if err != nil { if err != nil {
el.AddRecoverable(ctx, err) el.AddRecoverable(ictx, err)
} }
if errors.Is(err, context.Canceled) { if errors.Is(err, context.Canceled) {

View File

@ -28,7 +28,7 @@ func ProduceBackupCollections(
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, bool, error) { ) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, bool, error) {
odb, err := bpc.Selector.ToOneDriveBackup() odb, err := bpc.Selector.ToOneDriveBackup()
if err != nil { if err != nil {
return nil, nil, false, clues.Wrap(err, "parsing selector").WithClues(ctx) return nil, nil, false, clues.WrapWC(ctx, err, "parsing selector")
} }
var ( var (

View File

@ -49,7 +49,7 @@ func (h *baseOnedriveHandler) ProduceExportCollections(
for _, dc := range dcs { for _, dc := range dcs {
drivePath, err := path.ToDrivePath(dc.FullPath()) drivePath, err := path.ToDrivePath(dc.FullPath())
if err != nil { if err != nil {
return nil, clues.Wrap(err, "transforming path to drive path").WithClues(ctx) return nil, clues.WrapWC(ctx, err, "transforming path to drive path")
} }
baseDir := path.Builder{}.Append(drivePath.Folders...) baseDir := path.Builder{}.Append(drivePath.Folders...)

View File

@ -185,13 +185,13 @@ func RestoreSitePage(
byteArray, err := io.ReadAll(itemData.ToReader()) byteArray, err := io.ReadAll(itemData.ToReader())
if err != nil { if err != nil {
return dii, clues.Wrap(err, "reading sharepoint data").WithClues(ctx) return dii, clues.WrapWC(ctx, err, "reading sharepoint data")
} }
// Hydrate Page // Hydrate Page
page, err := CreatePageFromBytes(byteArray) page, err := CreatePageFromBytes(byteArray)
if err != nil { if err != nil {
return dii, clues.Wrap(err, "creating Page object").WithClues(ctx) return dii, clues.WrapWC(ctx, err, "creating Page object")
} }
name, ok := ptr.ValOK(page.GetName()) name, ok := ptr.ValOK(page.GetName())
@ -217,7 +217,7 @@ func RestoreSitePage(
// Publish page to make visible // Publish page to make visible
// See https://learn.microsoft.com/en-us/graph/api/sitepage-publish?view=graph-rest-beta // See https://learn.microsoft.com/en-us/graph/api/sitepage-publish?view=graph-rest-beta
if restoredPage.GetWebUrl() == nil { if restoredPage.GetWebUrl() == nil {
return dii, clues.New("webURL not populated during page creation").WithClues(ctx) return dii, clues.NewWC(ctx, "webURL not populated during page creation")
} }
err = service.Client(). err = service.Client().

View File

@ -62,7 +62,7 @@ func (h *baseSharepointHandler) ProduceExportCollections(
for _, dc := range dcs { for _, dc := range dcs {
drivePath, err := path.ToDrivePath(dc.FullPath()) drivePath, err := path.ToDrivePath(dc.FullPath())
if err != nil { if err != nil {
return nil, clues.Wrap(err, "transforming path to drive path").WithClues(ctx) return nil, clues.WrapWC(ctx, err, "transforming path to drive path")
} }
driveName, ok := h.backupDriveIDNames.NameOf(drivePath.DriveID) driveName, ok := h.backupDriveIDNames.NameOf(drivePath.DriveID)

View File

@ -591,7 +591,7 @@ func consumeBackupCollections(
"kopia_expected_ignored_errors", kopiaStats.ExpectedIgnoredErrorCount) "kopia_expected_ignored_errors", kopiaStats.ExpectedIgnoredErrorCount)
if kopiaStats.ErrorCount > 0 { if kopiaStats.ErrorCount > 0 {
err = clues.New("building kopia snapshot").WithClues(ctx) err = clues.NewWC(ctx, "building kopia snapshot")
} else if kopiaStats.IgnoredErrorCount > kopiaStats.ExpectedIgnoredErrorCount { } else if kopiaStats.IgnoredErrorCount > kopiaStats.ExpectedIgnoredErrorCount {
logger.Ctx(ctx).Info("recoverable errors were seen during backup") logger.Ctx(ctx).Info("recoverable errors were seen during backup")
} }
@ -671,7 +671,7 @@ func mergeItemsFromBase(
errs) errs)
if err != nil { if err != nil {
return manifestAddedEntries, return manifestAddedEntries,
clues.New("fetching base details for backup").WithClues(ctx) clues.NewWC(ctx, "fetching base details for backup")
} }
for _, entry := range baseDeets.Items() { for _, entry := range baseDeets.Items() {
@ -681,8 +681,7 @@ func mergeItemsFromBase(
rr, err := path.FromDataLayerPath(entry.RepoRef, true) rr, err := path.FromDataLayerPath(entry.RepoRef, true)
if err != nil { if err != nil {
return manifestAddedEntries, clues.New("parsing base item info path"). return manifestAddedEntries, clues.NewWC(ctx, "parsing base item info path").
WithClues(ctx).
With("repo_ref", path.LoggableDir(entry.RepoRef)) With("repo_ref", path.LoggableDir(entry.RepoRef))
} }
@ -713,7 +712,7 @@ func mergeItemsFromBase(
baseBackup.Backup.Version) baseBackup.Backup.Version)
if err != nil { if err != nil {
return manifestAddedEntries, return manifestAddedEntries,
clues.Wrap(err, "getting updated info for entry").WithClues(ictx) clues.WrapWC(ictx, err, "getting updated info for entry")
} }
// This entry isn't merged. // This entry isn't merged.
@ -731,7 +730,7 @@ func mergeItemsFromBase(
item) item)
if err != nil { if err != nil {
return manifestAddedEntries, return manifestAddedEntries,
clues.Wrap(err, "adding item to details").WithClues(ictx) clues.WrapWC(ictx, err, "adding item to details")
} }
// Make sure we won't add this again in another base. // Make sure we won't add this again in another base.
@ -836,8 +835,7 @@ func mergeDetails(
checkCount := dataFromBackup.ItemsToMerge() checkCount := dataFromBackup.ItemsToMerge()
if addedEntries != checkCount { if addedEntries != checkCount {
return clues.New("incomplete migration of backup details"). return clues.NewWC(ctx, "incomplete migration of backup details").
WithClues(ctx).
With( With(
"item_count", addedEntries, "item_count", addedEntries,
"expected_item_count", checkCount) "expected_item_count", checkCount)
@ -918,32 +916,32 @@ func (op *BackupOperation) createBackupModels(
// during the operation, regardless of the failure policy. Unlikely we'd // during the operation, regardless of the failure policy. Unlikely we'd
// hit this here as the preceding code should already take care of it. // hit this here as the preceding code should already take care of it.
if op.Errors.Failure() != nil { if op.Errors.Failure() != nil {
return clues.Wrap(op.Errors.Failure(), "non-recoverable failure").WithClues(ctx) return clues.WrapWC(ctx, op.Errors.Failure(), "non-recoverable failure")
} }
if deets == nil { if deets == nil {
return clues.New("no backup details to record").WithClues(ctx) return clues.NewWC(ctx, "no backup details to record")
} }
ctx = clues.Add(ctx, "details_entry_count", len(deets.Entries)) ctx = clues.Add(ctx, "details_entry_count", len(deets.Entries))
if len(snapID) == 0 { if len(snapID) == 0 {
return clues.New("no snapshot ID to record").WithClues(ctx) return clues.NewWC(ctx, "no snapshot ID to record")
} }
err := sscw.Collect(ctx, streamstore.DetailsCollector(deets)) err := sscw.Collect(ctx, streamstore.DetailsCollector(deets))
if err != nil { if err != nil {
return clues.Wrap(err, "collecting details for persistence").WithClues(ctx) return clues.Wrap(err, "collecting details for persistence")
} }
err = sscw.Collect(ctx, streamstore.FaultErrorsCollector(op.Errors.Errors())) err = sscw.Collect(ctx, streamstore.FaultErrorsCollector(op.Errors.Errors()))
if err != nil { if err != nil {
return clues.Wrap(err, "collecting errors for persistence").WithClues(ctx) return clues.Wrap(err, "collecting errors for persistence")
} }
ssid, err := sscw.Write(ctx, errs) ssid, err := sscw.Write(ctx, errs)
if err != nil { if err != nil {
return clues.Wrap(err, "persisting details and errors").WithClues(ctx) return clues.Wrap(err, "persisting details and errors")
} }
ctx = clues.Add(ctx, "streamstore_snapshot_id", ssid) ctx = clues.Add(ctx, "streamstore_snapshot_id", ssid)
@ -967,7 +965,7 @@ func (op *BackupOperation) createBackupModels(
ssid, ssid,
op.Options.FailureHandling, op.Options.FailureHandling,
op.Errors) { op.Errors) {
return clues.New("failed preview backup").WithClues(ctx) return clues.NewWC(ctx, "failed preview backup")
} }
tags[model.BackupTypeTag] = model.PreviewBackup tags[model.BackupTypeTag] = model.PreviewBackup
@ -988,13 +986,12 @@ func (op *BackupOperation) createBackupModels(
tags[model.BackupTypeTag] = model.AssistBackup tags[model.BackupTypeTag] = model.AssistBackup
default: default:
return clues.New("unable to determine backup type due to operation errors"). return clues.NewWC(ctx, "unable to determine backup type due to operation errors")
WithClues(ctx)
} }
// Additional defensive check to make sure we tag things as expected above. // Additional defensive check to make sure we tag things as expected above.
if len(tags[model.BackupTypeTag]) == 0 { if len(tags[model.BackupTypeTag]) == 0 {
return clues.New("empty backup type tag").WithClues(ctx) return clues.NewWC(ctx, "empty backup type tag")
} }
ctx = clues.Add(ctx, model.BackupTypeTag, tags[model.BackupTypeTag]) ctx = clues.Add(ctx, model.BackupTypeTag, tags[model.BackupTypeTag])
@ -1015,7 +1012,7 @@ func (op *BackupOperation) createBackupModels(
logger.Ctx(ctx).Info("creating new backup") logger.Ctx(ctx).Info("creating new backup")
if err = op.store.Put(ctx, model.BackupSchema, b); err != nil { if err = op.store.Put(ctx, model.BackupSchema, b); err != nil {
return clues.Wrap(err, "creating backup model").WithClues(ctx) return clues.Wrap(err, "creating backup model")
} }
return nil return nil

View File

@ -50,7 +50,7 @@ func getDetailsFromBackup(
} }
if len(ssid) == 0 { if len(ssid) == 0 {
return nil, clues.New("no details or errors in backup").WithClues(ctx) return nil, clues.NewWC(ctx, "no details or errors in backup")
} }
if err := detailsStore.Read(ctx, ssid, umt, errs); err != nil { if err := detailsStore.Read(ctx, ssid, umt, errs); err != nil {

View File

@ -115,8 +115,7 @@ func makeRestorePathsForEntry(
repoRef, err := path.FromDataLayerPath(ent.RepoRef, true) repoRef, err := path.FromDataLayerPath(ent.RepoRef, true)
if err != nil { if err != nil {
err = clues.Wrap(err, "parsing RepoRef"). err = clues.WrapWC(ctx, err, "parsing RepoRef").
WithClues(ctx).
With("repo_ref", clues.Hide(ent.RepoRef), "location_ref", clues.Hide(ent.LocationRef)) With("repo_ref", clues.Hide(ent.RepoRef), "location_ref", clues.Hide(ent.LocationRef))
return res, err return res, err
@ -128,8 +127,7 @@ func makeRestorePathsForEntry(
// Get the LocationRef so we can munge it onto our path. // Get the LocationRef so we can munge it onto our path.
locRef, err := locationRef(ent, repoRef, backupVersion) locRef, err := locationRef(ent, repoRef, backupVersion)
if err != nil { if err != nil {
err = clues.Wrap(err, "parsing LocationRef after reduction"). err = clues.WrapWC(ctx, err, "parsing LocationRef after reduction").
WithClues(ctx).
With("location_ref", clues.Hide(ent.LocationRef)) With("location_ref", clues.Hide(ent.LocationRef))
return res, err return res, err
@ -154,11 +152,11 @@ func makeRestorePathsForEntry(
(ent.Groups != nil && ent.Groups.ItemType == details.SharePointLibrary): (ent.Groups != nil && ent.Groups.ItemType == details.SharePointLibrary):
res.RestorePath, err = drivePathMerge(ent, repoRef, locRef) res.RestorePath, err = drivePathMerge(ent, repoRef, locRef)
default: default:
return res, clues.New("unknown entry type").WithClues(ctx) return res, clues.NewWC(ctx, "unknown entry type")
} }
if err != nil { if err != nil {
return res, clues.Wrap(err, "generating RestorePath").WithClues(ctx) return res, clues.WrapWC(ctx, err, "generating RestorePath")
} }
return res, nil return res, nil

View File

@ -243,13 +243,11 @@ func (op *RestoreOperation) do(
op.Selectors.PathService(), op.Selectors.PathService(),
restoreToProtectedResource.ID()) restoreToProtectedResource.ID())
if err != nil { if err != nil {
return nil, clues.Wrap(err, "verifying service restore is enabled").WithClues(ctx) return nil, clues.Wrap(err, "verifying service restore is enabled")
} }
if !enabled { if !enabled {
return nil, clues.Wrap( return nil, clues.WrapWC(ctx, graph.ErrServiceNotEnabled, "service not enabled for restore")
graph.ErrServiceNotEnabled,
"service not enabled for restore").WithClues(ctx)
} }
observe.Message(ctx, "Restoring", observe.Bullet, clues.Hide(restoreToProtectedResource.Name())) observe.Message(ctx, "Restoring", observe.Bullet, clues.Hide(restoreToProtectedResource.Name()))

View File

@ -42,11 +42,11 @@ func (ms Streamer) Read(
case streamstore.FaultErrorsType: case streamstore.FaultErrorsType:
mr = ms.Errors[snapshotID] mr = ms.Errors[snapshotID]
default: default:
return clues.New("unknown type: " + col.Type).WithClues(ctx) return clues.NewWC(ctx, "unknown type: "+col.Type)
} }
if mr == nil { if mr == nil {
return clues.New("collectable " + col.Type + " has no marshaller").WithClues(ctx) return clues.NewWC(ctx, "collectable "+col.Type+" has no marshaller")
} }
bs, err := mr.Marshal() bs, err := mr.Marshal()

View File

@ -173,14 +173,14 @@ func collect(
// construct the path of the container // construct the path of the container
p, err := path.Builder{}.ToStreamStorePath(tenantID, col.purpose, service, false) p, err := path.Builder{}.ToStreamStorePath(tenantID, col.purpose, service, false)
if err != nil { if err != nil {
return nil, clues.Stack(err).WithClues(ctx) return nil, clues.StackWC(ctx, err)
} }
// TODO: We could use an io.Pipe here to avoid a double copy but that // TODO: We could use an io.Pipe here to avoid a double copy but that
// makes error handling a bit complicated // makes error handling a bit complicated
bs, err := col.mr.Marshal() bs, err := col.mr.Marshal()
if err != nil { if err != nil {
return nil, clues.Wrap(err, "marshalling body").WithClues(ctx) return nil, clues.WrapWC(ctx, err, "marshalling body")
} }
item, err := data.NewPrefetchedItem( item, err := data.NewPrefetchedItem(
@ -188,7 +188,7 @@ func collect(
col.itemName, col.itemName,
time.Now()) time.Now())
if err != nil { if err != nil {
return nil, clues.Stack(err).WithClues(ctx) return nil, clues.StackWC(ctx, err)
} }
dc := streamCollection{ dc := streamCollection{
@ -240,12 +240,12 @@ func read(
Append(col.itemName). Append(col.itemName).
ToStreamStorePath(tenantID, col.purpose, service, true) ToStreamStorePath(tenantID, col.purpose, service, true)
if err != nil { if err != nil {
return clues.Stack(err).WithClues(ctx) return clues.StackWC(ctx, err)
} }
pd, err := p.Dir() pd, err := p.Dir()
if err != nil { if err != nil {
return clues.Stack(err).WithClues(ctx) return clues.StackWC(ctx, err)
} }
ctx = clues.Add(ctx, "snapshot_id", snapshotID) ctx = clues.Add(ctx, "snapshot_id", snapshotID)
@ -267,8 +267,7 @@ func read(
// Expect only 1 data collection // Expect only 1 data collection
if len(cs) != 1 { if len(cs) != 1 {
return clues.New("unexpected collection count"). return clues.NewWC(ctx, "unexpected collection count").
WithClues(ctx).
With("collection_count", len(cs)) With("collection_count", len(cs))
} }
@ -281,19 +280,19 @@ func read(
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
return clues.New("context cancelled waiting for data").WithClues(ctx) return clues.NewWC(ctx, "context cancelled waiting for data")
case itemData, ok := <-items: case itemData, ok := <-items:
if !ok { if !ok {
if !found { if !found {
return clues.New("no data found").WithClues(ctx) return clues.NewWC(ctx, "no data found")
} }
return nil return nil
} }
if err := col.Unmr(itemData.ToReader()); err != nil { if err := col.Unmr(itemData.ToReader()); err != nil {
return clues.Wrap(err, "unmarshalling data").WithClues(ctx) return clues.WrapWC(ctx, err, "unmarshalling data")
} }
found = true found = true

View File

@ -28,15 +28,15 @@ func ConsumeExportCollections(
folder := filepath.Join(exportLocation, col.BasePath()) folder := filepath.Join(exportLocation, col.BasePath())
ictx := clues.Add(ctx, "dir_name", folder) ictx := clues.Add(ctx, "dir_name", folder)
for item := range col.Items(ctx) { for item := range col.Items(ictx) {
if item.Error != nil { if item.Error != nil {
el.AddRecoverable(ictx, clues.Wrap(item.Error, "getting item").WithClues(ctx)) el.AddRecoverable(ictx, clues.Wrap(item.Error, "getting item"))
} }
if err := writeItem(ictx, item, folder); err != nil { if err := writeItem(ictx, item, folder); err != nil {
el.AddRecoverable( el.AddRecoverable(
ictx, ictx,
clues.Wrap(err, "writing item").With("file_name", item.Name).WithClues(ctx)) clues.Wrap(err, "writing item").With("file_name", item.Name))
} }
} }
} }
@ -60,19 +60,19 @@ func writeItem(ctx context.Context, item Item, folder string) error {
err := os.MkdirAll(folder, os.ModePerm) err := os.MkdirAll(folder, os.ModePerm)
if err != nil { if err != nil {
return clues.Wrap(err, "creating directory") return clues.WrapWC(ctx, err, "creating directory")
} }
// In case the user tries to restore to a non-clean // In case the user tries to restore to a non-clean
// directory, we might run into collisions an fail. // directory, we might run into collisions an fail.
f, err := os.Create(fpath) f, err := os.Create(fpath)
if err != nil { if err != nil {
return clues.Wrap(err, "creating file") return clues.WrapWC(ctx, err, "creating file")
} }
_, err = io.Copy(f, progReader) _, err = io.Copy(f, progReader)
if err != nil { if err != nil {
return clues.Wrap(err, "writing data") return clues.WrapWC(ctx, err, "writing data")
} }
return nil return nil

View File

@ -137,7 +137,7 @@ func Example_logger_clues_standards() {
// 2. The last func to handle a context must add the clues to the error. // 2. The last func to handle a context must add the clues to the error.
// //
// preferred // preferred
err := clues.Wrap(err, "reason").WithClues(ctx) err := clues.WrapWC(ctx, err, "reason")
// this dereference added for linter happiness // this dereference added for linter happiness
_ = err _ = err

View File

@ -210,7 +210,7 @@ func getBackupDetails(
} }
if len(ssid) == 0 { if len(ssid) == 0 {
return nil, b, clues.New("no streamstore id in backup").WithClues(ctx) return nil, b, clues.NewWC(ctx, "no streamstore id in backup")
} }
var ( var (
@ -276,7 +276,7 @@ func getBackupErrors(
ssid := b.StreamStoreID ssid := b.StreamStoreID
if len(ssid) == 0 { if len(ssid) == 0 {
return nil, b, clues.New("missing streamstore id in backup").WithClues(ctx) return nil, b, clues.NewWC(ctx, "missing streamstore id in backup")
} }
var ( var (
@ -335,9 +335,7 @@ func deleteBackups(
continue continue
} }
return clues.Stack(errWrapper(err)). return clues.StackWC(ctx, errWrapper(err)).With("delete_backup_id", id)
WithClues(ctx).
With("delete_backup_id", id)
} }
toDelete = append(toDelete, b.ModelStoreID) toDelete = append(toDelete, b.ModelStoreID)

View File

@ -57,7 +57,7 @@ func (r *repository) ConnectDataProvider(
case account.ProviderM365: case account.ProviderM365:
provider, err = connectToM365(ctx, *r, pst) provider, err = connectToM365(ctx, *r, pst)
default: default:
err = clues.New("unrecognized provider").WithClues(ctx) err = clues.NewWC(ctx, "unrecognized provider")
} }
if err != nil { if err != nil {

View File

@ -101,7 +101,7 @@ func New(
bus, err := events.NewBus(ctx, st, acct.ID(), opts) bus, err := events.NewBus(ctx, st, acct.ID(), opts)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "constructing event bus").WithClues(ctx) return nil, clues.WrapWC(ctx, err, "constructing event bus")
} }
repoID := configFileRepoID repoID := configFileRepoID
@ -310,7 +310,7 @@ func (r *repository) setupKopia(
if err := kopiaRef.Initialize(ctx, r.Opts.Repo, retentionOpts, repoHashName); err != nil { if err := kopiaRef.Initialize(ctx, r.Opts.Repo, retentionOpts, repoHashName); err != nil {
// Replace common internal errors so that SDK users can check results with errors.Is() // Replace common internal errors so that SDK users can check results with errors.Is()
if errors.Is(err, kopia.ErrorRepoAlreadyExists) { if errors.Is(err, kopia.ErrorRepoAlreadyExists) {
return clues.Stack(ErrorRepoAlreadyExists, err).WithClues(ctx) return clues.Stack(ErrorRepoAlreadyExists, err)
} }
return clues.Wrap(err, "initializing kopia") return clues.Wrap(err, "initializing kopia")
@ -326,12 +326,12 @@ func (r *repository) setupKopia(
r.dataLayer, err = kopia.NewWrapper(kopiaRef) r.dataLayer, err = kopia.NewWrapper(kopiaRef)
if err != nil { if err != nil {
return clues.Stack(err).WithClues(ctx) return clues.StackWC(ctx, err)
} }
r.modelStore, err = kopia.NewModelStore(kopiaRef) r.modelStore, err = kopia.NewModelStore(kopiaRef)
if err != nil { if err != nil {
return clues.Stack(err).WithClues(ctx) return clues.StackWC(ctx, err)
} }
if r.ID == events.RepoIDNotFound { if r.ID == events.RepoIDNotFound {

View File

@ -540,7 +540,7 @@ func reduce[T scopeT, C categoryT](
repoPath, err := path.FromDataLayerPath(ent.RepoRef, true) repoPath, err := path.FromDataLayerPath(ent.RepoRef, true)
if err != nil { if err != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "transforming repoRef to path").WithClues(ictx)) el.AddRecoverable(ictx, clues.WrapWC(ictx, err, "transforming repoRef to path"))
continue continue
} }
@ -563,7 +563,7 @@ func reduce[T scopeT, C categoryT](
pv, err := dc.pathValues(repoPath, *ent, s.Cfg) pv, err := dc.pathValues(repoPath, *ent, s.Cfg)
if err != nil { if err != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "getting path values").WithClues(ictx)) el.AddRecoverable(ictx, clues.WrapWC(ictx, err, "getting path values"))
continue continue
} }

View File

@ -77,12 +77,12 @@ func (c Channels) GetChannelByName(
Channels(). Channels().
Get(ctx, options) Get(ctx, options)
if err != nil { if err != nil {
return nil, graph.Stack(ctx, err).WithClues(ctx) return nil, graph.Stack(ctx, err)
} }
gv := resp.GetValue() gv := resp.GetValue()
if len(gv) == 0 { if len(gv) == 0 {
return nil, clues.New("channel not found").WithClues(ctx) return nil, clues.NewWC(ctx, "channel not found")
} }
// We only allow the api to match one channel with the provided name. // We only allow the api to match one channel with the provided name.
@ -93,7 +93,7 @@ func (c Channels) GetChannelByName(
cal := gv[0] cal := gv[0]
if err := checkIDAndName(cal); err != nil { if err := checkIDAndName(cal); err != nil {
return nil, clues.Stack(err).WithClues(ctx) return nil, clues.StackWC(ctx, err)
} }
return cal, nil return cal, nil

View File

@ -128,29 +128,28 @@ func (c Contacts) GetContainerByName(
ContactFolders(). ContactFolders().
Get(ctx, options) Get(ctx, options)
if err != nil { if err != nil {
return nil, graph.Stack(ctx, err).WithClues(ctx) return nil, graph.Stack(ctx, err)
} }
gv := resp.GetValue() gv := resp.GetValue()
if len(gv) == 0 { if len(gv) == 0 {
return nil, clues.New("container not found").WithClues(ctx) return nil, clues.NewWC(ctx, "container not found")
} }
// We only allow the api to match one container with the provided name. // We only allow the api to match one container with the provided name.
// Return an error if multiple container exist (unlikely) or if no container // Return an error if multiple container exist (unlikely) or if no container
// is found. // is found.
if len(gv) != 1 { if len(gv) != 1 {
return nil, clues.Stack(graph.ErrMultipleResultsMatchIdentifier). return nil, clues.StackWC(ctx, graph.ErrMultipleResultsMatchIdentifier).
With("returned_container_count", len(gv)). With("returned_container_count", len(gv))
WithClues(ctx)
} }
// Sanity check ID and name // Sanity check ID and name
container := gv[0] container := gv[0]
if err := graph.CheckIDAndName(container); err != nil { if err := graph.CheckIDAndName(container); err != nil {
return nil, clues.Stack(err).WithClues(ctx) return nil, clues.StackWC(ctx, err)
} }
return container, nil return container, nil

View File

@ -137,13 +137,13 @@ func (c Events) GetContainerByName(
Calendars(). Calendars().
Get(ctx, options) Get(ctx, options)
if err != nil { if err != nil {
return nil, graph.Stack(ctx, err).WithClues(ctx) return nil, graph.Stack(ctx, err)
} }
gv := resp.GetValue() gv := resp.GetValue()
if len(gv) == 0 { if len(gv) == 0 {
return nil, clues.New("container not found").WithClues(ctx) return nil, clues.NewWC(ctx, "container not found")
} }
// We only allow the api to match one calendar with the provided name. // We only allow the api to match one calendar with the provided name.
@ -155,7 +155,7 @@ func (c Events) GetContainerByName(
container := graph.CalendarDisplayable{Calendarable: cal} container := graph.CalendarDisplayable{Calendarable: cal}
if err := graph.CheckIDAndName(container); err != nil { if err := graph.CheckIDAndName(container); err != nil {
return nil, clues.Stack(err).WithClues(ctx) return nil, clues.StackWC(ctx, err)
} }
return container, nil return container, nil
@ -546,7 +546,7 @@ func (c Events) PostLargeAttachment(
_, err = io.CopyBuffer(w, bytes.NewReader(content), copyBuffer) _, err = io.CopyBuffer(w, bytes.NewReader(content), copyBuffer)
if err != nil { if err != nil {
return "", clues.Wrap(err, "buffering large attachment content").WithClues(ctx) return "", clues.WrapWC(ctx, err, "buffering large attachment content")
} }
return w.ID, nil return w.ID, nil

View File

@ -82,8 +82,8 @@ func BaseCollections(
full, err := path.BuildPrefix(tenant, rOwner, service, cat) full, err := path.BuildPrefix(tenant, rOwner, service, cat)
if err != nil { if err != nil {
// Shouldn't happen. // Shouldn't happen.
err = clues.Wrap(err, "making path").WithClues(ictx) err = clues.WrapWC(ictx, err, "making path")
el.AddRecoverable(ctx, err) el.AddRecoverable(ictx, err)
lastErr = err lastErr = err
continue continue

View File

@ -373,7 +373,7 @@ func Wrap(ctx context.Context, e error, msg string) *clues.Err {
var oDataError odataerrors.ODataErrorable var oDataError odataerrors.ODataErrorable
if !errors.As(e, &oDataError) { if !errors.As(e, &oDataError) {
return clues.Wrap(e, msg).WithClues(ctx).WithTrace(1) return clues.WrapWC(ctx, e, msg).WithTrace(1)
} }
mainMsg, data, innerMsg := errData(oDataError) mainMsg, data, innerMsg := errData(oDataError)
@ -382,7 +382,7 @@ func Wrap(ctx context.Context, e error, msg string) *clues.Err {
e = clues.Stack(e, clues.New(mainMsg)) e = clues.Stack(e, clues.New(mainMsg))
} }
ce := clues.Wrap(e, msg).WithClues(ctx).With(data...).WithTrace(1) ce := clues.WrapWC(ctx, e, msg).With(data...).WithTrace(1)
return setLabels(ce, innerMsg) return setLabels(ce, innerMsg)
} }
@ -396,7 +396,7 @@ func Stack(ctx context.Context, e error) *clues.Err {
var oDataError *odataerrors.ODataError var oDataError *odataerrors.ODataError
if !errors.As(e, &oDataError) { if !errors.As(e, &oDataError) {
return clues.Stack(e).WithClues(ctx).WithTrace(1) return clues.StackWC(ctx, e).WithTrace(1)
} }
mainMsg, data, innerMsg := errData(oDataError) mainMsg, data, innerMsg := errData(oDataError)
@ -405,7 +405,7 @@ func Stack(ctx context.Context, e error) *clues.Err {
e = clues.Stack(e, clues.New(mainMsg)) e = clues.Stack(e, clues.New(mainMsg))
} }
ce := clues.Stack(e).WithClues(ctx).With(data...).WithTrace(1) ce := clues.StackWC(ctx, e).With(data...).WithTrace(1)
return setLabels(ce, innerMsg) return setLabels(ce, innerMsg)
} }

View File

@ -227,7 +227,7 @@ func (mw RetryMiddleware) retryRequest(
case <-ctx.Done(): case <-ctx.Done():
// Don't retry if the context is marked as done, it will just error out // Don't retry if the context is marked as done, it will just error out
// when we attempt to send the retry anyway. // when we attempt to send the retry anyway.
return resp, clues.Stack(ctx.Err()).WithClues(ctx) return resp, clues.StackWC(ctx, ctx.Err())
case <-timer.C: case <-timer.C:
} }

View File

@ -365,17 +365,17 @@ func (aw *adapterWrap) Send(
for i := 0; i < aw.config.maxConnectionRetries+1; i++ { for i := 0; i < aw.config.maxConnectionRetries+1; i++ {
ictx := clues.Add(ctx, "request_retry_iter", i) ictx := clues.Add(ctx, "request_retry_iter", i)
sp, err = aw.RequestAdapter.Send(ctx, requestInfo, constructor, errorMappings) sp, err = aw.RequestAdapter.Send(ictx, requestInfo, constructor, errorMappings)
if err == nil { if err == nil {
break break
} }
if IsErrApplicationThrottled(err) { if IsErrApplicationThrottled(err) {
return nil, clues.Stack(ErrApplicationThrottled, err).WithTrace(1).WithClues(ictx) return nil, clues.StackWC(ictx, ErrApplicationThrottled, err).WithTrace(1)
} }
if !IsErrConnectionReset(err) && !connectionEnded.Compare(err.Error()) { if !IsErrConnectionReset(err) && !connectionEnded.Compare(err.Error()) {
return nil, clues.Stack(err).WithTrace(1).WithClues(ictx) return nil, clues.StackWC(ictx, err).WithTrace(1)
} }
logger.Ctx(ictx).Debug("http connection error") logger.Ctx(ictx).Debug("http connection error")

View File

@ -185,9 +185,9 @@ func getGroupFromResponse(ctx context.Context, resp models.GroupCollectionRespon
vs := resp.GetValue() vs := resp.GetValue()
if len(vs) == 0 { if len(vs) == 0 {
return nil, clues.Stack(graph.ErrResourceOwnerNotFound).WithClues(ctx) return nil, clues.StackWC(ctx, graph.ErrResourceOwnerNotFound)
} else if len(vs) > 1 { } else if len(vs) > 1 {
return nil, clues.Stack(graph.ErrMultipleResultsMatchIdentifier).WithClues(ctx) return nil, clues.StackWC(ctx, graph.ErrMultipleResultsMatchIdentifier)
} }
return vs[0], nil return vs[0], nil
@ -216,7 +216,7 @@ func (c Groups) GetAllSites(
identifier, identifier,
CallConfig{}) CallConfig{})
if err != nil { if err != nil {
return nil, clues.Wrap(err, "getting group").WithClues(ctx) return nil, clues.Wrap(err, "getting group")
} }
isTeam := IsTeam(ctx, group) isTeam := IsTeam(ctx, group)
@ -256,8 +256,7 @@ func (c Groups) GetAllSites(
FilesFolder(). FilesFolder().
Get(ictx, nil) Get(ictx, nil)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "getting files folder for channel"). return nil, clues.WrapWC(ictx, err, "getting files folder for channel")
WithClues(ictx)
} }
// WebURL returned here is the url to the documents folder, we // WebURL returned here is the url to the documents folder, we
@ -267,8 +266,7 @@ func (c Groups) GetAllSites(
u, err := url.Parse(documentWebURL) u, err := url.Parse(documentWebURL)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "parsing document web url"). return nil, clues.WrapWC(ictx, err, "parsing document web url")
WithClues(ictx)
} }
pathSegments := strings.Split(u.Path, "/") // pathSegments[0] == "" pathSegments := strings.Split(u.Path, "/") // pathSegments[0] == ""
@ -278,7 +276,7 @@ func (c Groups) GetAllSites(
site, err := Sites(c).GetByID(ictx, siteWebURL, CallConfig{}) site, err := Sites(c).GetByID(ictx, siteWebURL, CallConfig{})
if err != nil { if err != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "getting site")) el.AddRecoverable(ictx, clues.Wrap(err, "getting site"))
continue continue
} }

View File

@ -47,7 +47,7 @@ func (c Lists) PostDrive(
newList, err := builder.Post(ctx, list, nil) newList, err := builder.Post(ctx, list, nil)
if graph.IsErrItemAlreadyExistsConflict(err) { if graph.IsErrItemAlreadyExistsConflict(err) {
return nil, clues.Stack(graph.ErrItemAlreadyExistsConflict, err).WithClues(ctx) return nil, clues.StackWC(ctx, graph.ErrItemAlreadyExistsConflict, err)
} }
if err != nil { if err != nil {

View File

@ -156,29 +156,28 @@ func (c Mail) GetContainerByName(
} }
if err != nil { if err != nil {
return nil, graph.Stack(ctx, err).WithClues(ctx) return nil, graph.Stack(ctx, err)
} }
gv := resp.GetValue() gv := resp.GetValue()
if len(gv) == 0 { if len(gv) == 0 {
return nil, clues.New("container not found").WithClues(ctx) return nil, clues.NewWC(ctx, "container not found")
} }
// We only allow the api to match one container with the provided name. // We only allow the api to match one container with the provided name.
// Return an error if multiple container exist (unlikely) or if no container // Return an error if multiple container exist (unlikely) or if no container
// is found. // is found.
if len(gv) != 1 { if len(gv) != 1 {
return nil, clues.Stack(graph.ErrMultipleResultsMatchIdentifier). return nil, clues.StackWC(ctx, graph.ErrMultipleResultsMatchIdentifier).
With("returned_container_count", len(gv)). With("returned_container_count", len(gv))
WithClues(ctx)
} }
// Sanity check ID and name // Sanity check ID and name
container := gv[0] container := gv[0]
if err := graph.CheckIDAndName(container); err != nil { if err := graph.CheckIDAndName(container); err != nil {
return nil, clues.Stack(err).WithClues(ctx) return nil, clues.StackWC(ctx, err)
} }
return container, nil return container, nil
@ -406,7 +405,7 @@ func (c Mail) PostItem(
} }
if itm == nil { if itm == nil {
return nil, clues.New("nil response mail message creation").WithClues(ctx) return nil, clues.NewWC(ctx, "nil response mail message creation")
} }
return itm, nil return itm, nil
@ -513,7 +512,7 @@ func (c Mail) PostLargeAttachment(
_, err = io.CopyBuffer(w, bytes.NewReader(content), copyBuffer) _, err = io.CopyBuffer(w, bytes.NewReader(content), copyBuffer)
if err != nil { if err != nil {
return "", clues.Wrap(err, "buffering large attachment content").WithClues(ctx) return "", clues.WrapWC(ctx, err, "buffering large attachment content")
} }
return w.ID, nil return w.ID, nil

View File

@ -453,8 +453,7 @@ func batchWithMaxItemCount[T any](
// cancel the pager because it should see the context cancellation once we // cancel the pager because it should see the context cancellation once we
// stop attempting to fetch the next page. // stop attempting to fetch the next page.
if ctx.Err() != nil { if ctx.Err() != nil {
return nil, nil, DeltaUpdate{}, clues.Stack(ctx.Err(), context.Cause(ctx)). return nil, nil, DeltaUpdate{}, clues.StackWC(ctx, ctx.Err(), context.Cause(ctx))
WithClues(ctx)
} }
// Get the next page first thing in the loop instead of last thing so we // Get the next page first thing in the loop instead of last thing so we

View File

@ -34,7 +34,7 @@ func makeAC(
creds, err := acct.M365Config() creds, err := acct.M365Config()
if err != nil { if err != nil {
return api.Client{}, clues.Wrap(err, "getting m365 account creds").WithClues(ctx) return api.Client{}, clues.WrapWC(ctx, err, "getting m365 account creds")
} }
cli, err := api.NewClient( cli, err := api.NewClient(
@ -42,7 +42,7 @@ func makeAC(
control.DefaultOptions(), control.DefaultOptions(),
count.New()) count.New())
if err != nil { if err != nil {
return api.Client{}, clues.Wrap(err, "constructing api client").WithClues(ctx) return api.Client{}, clues.WrapWC(ctx, err, "constructing api client")
} }
return cli, nil return cli, nil

View File

@ -108,7 +108,7 @@ func usersNoInfo(ctx context.Context, acct account.Account, errs *fault.Bus) ([]
func UserAssignedLicenses(ctx context.Context, acct account.Account, userID string) (int, error) { func UserAssignedLicenses(ctx context.Context, acct account.Account, userID string) (int, error) {
ac, err := makeAC(ctx, acct, path.UnknownService) ac, err := makeAC(ctx, acct, path.UnknownService)
if err != nil { if err != nil {
return 0, clues.Stack(err).WithClues(ctx) return 0, clues.Stack(err)
} }
us, err := ac.Users().GetByID( us, err := ac.Users().GetByID(