use clues new|wrap|stackWC() (#4684)
replace all cases of new|wrap|stack(...).WithClues(ctx) with the flattened new|wrap|stackWC(ctx, ...) functions introduced in the latest clues bump. Other changes added: * remove WithClues builders when the error producer already called it. * corrected some usages of ictx within loops. no logic changes, just cleanup. --- #### Does this PR need a docs update or release note? - [x] ⛔ No #### Type of change - [x] 🧹 Tech Debt/Cleanup #### Test Plan - [x] ⚡ Unit test - [x] 💚 E2E
This commit is contained in:
parent
ea2bf19bd1
commit
4c72e9eab7
@ -53,6 +53,9 @@ linters-settings:
|
||||
# Prefer suite.Run(name, func() {}) for subtests as testify has it instead
|
||||
# of suite.T().Run(name, func(t *testing.T) {}).
|
||||
- '(T\(\)|\st[a-zA-Z0-9]*)\.Run(# prefer testify suite.Run(name, func()) )?'
|
||||
# Prefer packing ctx values into the error using NewWC, WrapWC, or StackWC
|
||||
# instead of New|Stack|Wrap().WithClues(ctx)
|
||||
- 'WithClues(# prefer the builderWC variant - ex: StackWC(ctx, ...))?'
|
||||
lll:
|
||||
line-length: 120
|
||||
revive:
|
||||
|
||||
@ -187,7 +187,7 @@ func genericCreateCommand(
|
||||
|
||||
bo, err := r.NewBackupWithLookup(ictx, discSel, ins)
|
||||
if err != nil {
|
||||
errs = append(errs, clues.Wrap(err, owner).WithClues(ictx))
|
||||
errs = append(errs, clues.WrapWC(ictx, err, owner))
|
||||
Errf(ictx, "%v\n", err)
|
||||
|
||||
continue
|
||||
@ -208,7 +208,7 @@ func genericCreateCommand(
|
||||
continue
|
||||
}
|
||||
|
||||
errs = append(errs, clues.Wrap(err, owner).WithClues(ictx))
|
||||
errs = append(errs, clues.WrapWC(ictx, err, owner))
|
||||
Errf(ictx, "%v\n", err)
|
||||
|
||||
continue
|
||||
|
||||
@ -33,14 +33,14 @@ func deleteBackups(
|
||||
|
||||
r, _, err := utils.GetAccountAndConnectWithOverrides(ctx, service, storage.ProviderS3, nil)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "connecting to account").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, err, "connecting to account")
|
||||
}
|
||||
|
||||
defer r.Close(ctx)
|
||||
|
||||
backups, err := r.BackupsByTag(ctx, store.Service(service))
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "listing backups").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, err, "listing backups")
|
||||
}
|
||||
|
||||
var (
|
||||
@ -51,11 +51,11 @@ func deleteBackups(
|
||||
for _, backup := range backups {
|
||||
if backup.StartAndEndTime.CompletedAt.Before(cutoff) {
|
||||
if err := r.DeleteBackups(ctx, true, backup.ID.String()); err != nil {
|
||||
return nil, clues.Wrap(
|
||||
return nil, clues.WrapWC(
|
||||
ctx,
|
||||
err,
|
||||
"deleting backup").
|
||||
With("backup_id", backup.ID).
|
||||
WithClues(ctx)
|
||||
With("backup_id", backup.ID)
|
||||
}
|
||||
|
||||
deleted = append(deleted, backup.ID.String())
|
||||
@ -122,7 +122,7 @@ func pitrListBackups(
|
||||
|
||||
backups, err := r.BackupsByTag(ctx, store.Service(pst))
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "listing backups").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "listing backups")
|
||||
}
|
||||
|
||||
bups := map[string]struct{}{}
|
||||
@ -135,9 +135,8 @@ func pitrListBackups(
|
||||
|
||||
for _, backupID := range backupIDs {
|
||||
if _, ok := bups[backupID]; !ok {
|
||||
return clues.New("looking for backup").
|
||||
With("search_backup_id", backupID).
|
||||
WithClues(ctx)
|
||||
return clues.NewWC(ctx, "looking for backup").
|
||||
With("search_backup_id", backupID)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -59,8 +59,7 @@ func Recovery(ctx context.Context, r any, namespace string) error {
|
||||
}
|
||||
}
|
||||
|
||||
err = clues.Wrap(err, "panic recovery"+inFile).
|
||||
WithClues(ctx).
|
||||
err = clues.WrapWC(ctx, err, "panic recovery"+inFile).
|
||||
With("stacktrace", string(debug.Stack())).
|
||||
WithTrace(2)
|
||||
logger.CtxErr(ctx, err).Error(namespace + " panic")
|
||||
|
||||
@ -128,7 +128,7 @@ func (rrh *resetRetryHandler) Read(p []byte) (int, error) {
|
||||
return read, io.EOF
|
||||
}
|
||||
|
||||
return read, clues.Stack(err).WithClues(rrh.ctx).OrNil()
|
||||
return read, clues.StackWC(rrh.ctx, err).OrNil()
|
||||
}
|
||||
|
||||
logger.Ctx(rrh.ctx).Infow(
|
||||
@ -192,8 +192,7 @@ func (rrh *resetRetryHandler) reconnect(maxRetries int) (int, error) {
|
||||
|
||||
r, err = rrh.getter.Get(ctx, headers)
|
||||
if err != nil {
|
||||
err = clues.Wrap(err, "retrying connection").
|
||||
WithClues(ctx).
|
||||
err = clues.WrapWC(ctx, err, "retrying connection").
|
||||
With("attempt_num", attempts)
|
||||
|
||||
continue
|
||||
@ -211,8 +210,7 @@ func (rrh *resetRetryHandler) reconnect(maxRetries int) (int, error) {
|
||||
if skip > 0 {
|
||||
_, err = io.CopyN(io.Discard, rrh.innerReader, skip)
|
||||
if err != nil {
|
||||
err = clues.Wrap(err, "seeking to correct offset").
|
||||
WithClues(ctx).
|
||||
err = clues.WrapWC(ctx, err, "seeking to correct offset").
|
||||
With("attempt_num", attempts)
|
||||
}
|
||||
}
|
||||
|
||||
@ -259,10 +259,9 @@ func (i *lazyItemWithInfo) Info() (details.ItemInfo, error) {
|
||||
defer i.mu.Unlock()
|
||||
|
||||
if i.delInFlight {
|
||||
return details.ItemInfo{}, clues.Stack(ErrNotFound).WithClues(i.ctx)
|
||||
return details.ItemInfo{}, clues.StackWC(i.ctx, ErrNotFound)
|
||||
} else if i.info == nil {
|
||||
return details.ItemInfo{}, clues.New("requesting ItemInfo before data retrieval").
|
||||
WithClues(i.ctx)
|
||||
return details.ItemInfo{}, clues.NewWC(i.ctx, "requesting ItemInfo before data retrieval")
|
||||
}
|
||||
|
||||
return *i.info, nil
|
||||
|
||||
@ -107,7 +107,7 @@ func NewBus(ctx context.Context, s storage.Storage, tenID string, co control.Opt
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return Bus{}, clues.Wrap(err, "configuring event bus").WithClues(ctx)
|
||||
return Bus{}, clues.WrapWC(ctx, err, "configuring event bus")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -134,7 +134,7 @@ func (b *baseFinder) getBackupModel(
|
||||
|
||||
bup, err := b.bg.GetBackup(ctx, model.StableID(bID))
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
return nil, clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
return bup, nil
|
||||
|
||||
@ -106,12 +106,12 @@ func (w *conn) Initialize(
|
||||
|
||||
cfg, err := w.storage.CommonConfig()
|
||||
if err != nil {
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
return clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
rOpts := retention.NewOpts()
|
||||
if err := rOpts.Set(retentionOpts); err != nil {
|
||||
return clues.Wrap(err, "setting retention configuration").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "setting retention configuration")
|
||||
}
|
||||
|
||||
blobCfg, _, err := rOpts.AsConfigs(ctx)
|
||||
@ -127,10 +127,10 @@ func (w *conn) Initialize(
|
||||
|
||||
if err = repo.Initialize(ctx, bst, &kopiaOpts, cfg.CorsoPassphrase); err != nil {
|
||||
if errors.Is(err, repo.ErrAlreadyInitialized) {
|
||||
return clues.Stack(ErrorRepoAlreadyExists, err).WithClues(ctx)
|
||||
return clues.StackWC(ctx, ErrorRepoAlreadyExists, err)
|
||||
}
|
||||
|
||||
return clues.Wrap(err, "initializing repo").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "initializing repo")
|
||||
}
|
||||
|
||||
err = w.commonConnect(
|
||||
@ -146,7 +146,7 @@ func (w *conn) Initialize(
|
||||
}
|
||||
|
||||
if err := w.setDefaultConfigValues(ctx); err != nil {
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
return clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
// Calling with all parameters here will set extend object locks for
|
||||
@ -164,7 +164,7 @@ func (w *conn) Connect(ctx context.Context, opts repository.Options, repoNameHas
|
||||
|
||||
cfg, err := w.storage.CommonConfig()
|
||||
if err != nil {
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
return clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
return w.commonConnect(
|
||||
@ -210,11 +210,11 @@ func (w *conn) commonConnect(
|
||||
bst,
|
||||
password,
|
||||
kopiaOpts); err != nil {
|
||||
return clues.Wrap(err, "connecting to kopia repo").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "connecting to kopia repo")
|
||||
}
|
||||
|
||||
if err := w.open(ctx, cfgFile, password); err != nil {
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
return clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -231,7 +231,7 @@ func blobStoreByProvider(
|
||||
case storage.ProviderFilesystem:
|
||||
return filesystemStorage(ctx, opts, s)
|
||||
default:
|
||||
return nil, clues.New("storage provider details are required").WithClues(ctx)
|
||||
return nil, clues.NewWC(ctx, "storage provider details are required")
|
||||
}
|
||||
}
|
||||
|
||||
@ -259,7 +259,7 @@ func (w *conn) close(ctx context.Context) error {
|
||||
w.Repository = nil
|
||||
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "closing repository connection").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "closing repository connection")
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -274,7 +274,7 @@ func (w *conn) open(ctx context.Context, configPath, password string) error {
|
||||
// TODO(ashmrtnz): issue #75: nil here should be storage.ConnectionOptions().
|
||||
rep, err := repo.Open(ctx, configPath, password, nil)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "opening repository connection").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "opening repository connection")
|
||||
}
|
||||
|
||||
w.Repository = rep
|
||||
@ -332,7 +332,7 @@ func (w *conn) Compression(ctx context.Context, compressor string) error {
|
||||
// compressor was given.
|
||||
comp := compression.Name(compressor)
|
||||
if err := checkCompressor(comp); err != nil {
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
return clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
p, err := w.getGlobalPolicyOrEmpty(ctx)
|
||||
@ -342,7 +342,7 @@ func (w *conn) Compression(ctx context.Context, compressor string) error {
|
||||
|
||||
changed, err := updateCompressionOnPolicy(compressor, p)
|
||||
if err != nil {
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
return clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
if !changed {
|
||||
@ -409,7 +409,7 @@ func (w *conn) getPolicyOrEmpty(ctx context.Context, si snapshot.SourceInfo) (*p
|
||||
return &policy.Policy{}, nil
|
||||
}
|
||||
|
||||
return nil, clues.Wrap(err, "getting backup policy").With("source_info", si).WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, err, "getting backup policy").With("source_info", si)
|
||||
}
|
||||
|
||||
return p, nil
|
||||
@ -433,16 +433,16 @@ func (w *conn) writePolicy(
|
||||
ctx = clues.Add(ctx, "source_info", si)
|
||||
|
||||
writeOpts := repo.WriteSessionOptions{Purpose: purpose}
|
||||
ctr := func(innerCtx context.Context, rw repo.RepositoryWriter) error {
|
||||
ctr := func(ictx context.Context, rw repo.RepositoryWriter) error {
|
||||
if err := policy.SetPolicy(ctx, rw, si, p); err != nil {
|
||||
return clues.Stack(err).WithClues(innerCtx)
|
||||
return clues.StackWC(ictx, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := repo.WriteSession(ctx, w.Repository, writeOpts, ctr); err != nil {
|
||||
return clues.Wrap(err, "updating policy").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "updating policy")
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -470,12 +470,12 @@ func (w *conn) setRetentionParameters(
|
||||
// it acts like we passed in only the duration and returns an error about
|
||||
// having to set both. Return a clearer error here instead.
|
||||
if ptr.Val(rrOpts.Mode) == repository.NoRetention && ptr.Val(rrOpts.Duration) != 0 {
|
||||
return clues.New("duration must be 0 if rrOpts is disabled").WithClues(ctx)
|
||||
return clues.NewWC(ctx, "duration must be 0 if rrOpts is disabled")
|
||||
}
|
||||
|
||||
dr, ok := w.Repository.(repo.DirectRepository)
|
||||
if !ok {
|
||||
return clues.New("getting handle to repo").WithClues(ctx)
|
||||
return clues.NewWC(ctx, "getting handle to repo")
|
||||
}
|
||||
|
||||
blobCfg, params, err := getRetentionConfigs(ctx, dr)
|
||||
@ -485,7 +485,7 @@ func (w *conn) setRetentionParameters(
|
||||
|
||||
opts := retention.OptsFromConfigs(*blobCfg, *params)
|
||||
if err := opts.Set(rrOpts); err != nil {
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
return clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
return clues.Stack(persistRetentionConfigs(ctx, dr, opts)).OrNil()
|
||||
@ -497,12 +497,12 @@ func getRetentionConfigs(
|
||||
) (*format.BlobStorageConfiguration, *maintenance.Params, error) {
|
||||
blobCfg, err := dr.FormatManager().BlobCfgBlob()
|
||||
if err != nil {
|
||||
return nil, nil, clues.Wrap(err, "getting storage config").WithClues(ctx)
|
||||
return nil, nil, clues.WrapWC(ctx, err, "getting storage config")
|
||||
}
|
||||
|
||||
params, err := maintenance.GetParams(ctx, dr)
|
||||
if err != nil {
|
||||
return nil, nil, clues.Wrap(err, "getting maintenance config").WithClues(ctx)
|
||||
return nil, nil, clues.WrapWC(ctx, err, "getting maintenance config")
|
||||
}
|
||||
|
||||
return &blobCfg, params, nil
|
||||
@ -525,19 +525,21 @@ func persistRetentionConfigs(
|
||||
|
||||
mp, err := dr.FormatManager().GetMutableParameters()
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "getting mutable parameters").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "getting mutable parameters")
|
||||
}
|
||||
|
||||
requiredFeatures, err := dr.FormatManager().RequiredFeatures()
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "getting required features").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "getting required features")
|
||||
}
|
||||
|
||||
// Must be the case that only blob changed.
|
||||
if !opts.ParamsChanged() {
|
||||
return clues.Wrap(
|
||||
return clues.WrapWC(
|
||||
ctx,
|
||||
dr.FormatManager().SetParameters(ctx, mp, blobCfg, requiredFeatures),
|
||||
"persisting storage config").WithClues(ctx).OrNil()
|
||||
"persisting storage config").
|
||||
OrNil()
|
||||
}
|
||||
|
||||
// Both blob and maintenance changed. A DirectWriteSession is required to
|
||||
@ -552,20 +554,21 @@ func persistRetentionConfigs(
|
||||
// Set the maintenance config first as we can bail out of the write
|
||||
// session later.
|
||||
if err := maintenance.SetParams(ctx, dw, ¶ms); err != nil {
|
||||
return clues.Wrap(err, "maintenance config").
|
||||
WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "maintenance config")
|
||||
}
|
||||
|
||||
if !opts.BlobChanged() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return clues.Wrap(
|
||||
return clues.WrapWC(
|
||||
ctx,
|
||||
dr.FormatManager().SetParameters(ctx, mp, blobCfg, requiredFeatures),
|
||||
"storage config").WithClues(ctx).OrNil()
|
||||
"storage config").
|
||||
OrNil()
|
||||
})
|
||||
|
||||
return clues.Wrap(err, "persisting config changes").WithClues(ctx).OrNil()
|
||||
return clues.WrapWC(ctx, err, "persisting config changes").OrNil()
|
||||
}
|
||||
|
||||
func (w *conn) LoadSnapshot(
|
||||
@ -574,7 +577,7 @@ func (w *conn) LoadSnapshot(
|
||||
) (*snapshot.Manifest, error) {
|
||||
man, err := snapshot.LoadSnapshot(ctx, w.Repository, id)
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
return nil, clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
return man, nil
|
||||
|
||||
@ -44,8 +44,7 @@ func (kdc *kopiaDataCollection) Items(
|
||||
for _, item := range kdc.items {
|
||||
s, err := kdc.FetchItemByName(ctx, item)
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "fetching item").
|
||||
WithClues(ctx).
|
||||
el.AddRecoverable(ctx, clues.WrapWC(ctx, err, "fetching item").
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
continue
|
||||
@ -87,7 +86,7 @@ func (kdc kopiaDataCollection) FetchItemByName(
|
||||
}
|
||||
|
||||
if len(name) == 0 {
|
||||
return nil, clues.Wrap(ErrNoRestorePath, "unknown item").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, ErrNoRestorePath, "unknown item")
|
||||
}
|
||||
|
||||
e, err := kdc.dir.Child(ctx, encodeAsPath(name))
|
||||
@ -96,12 +95,12 @@ func (kdc kopiaDataCollection) FetchItemByName(
|
||||
err = clues.Stack(data.ErrNotFound, err)
|
||||
}
|
||||
|
||||
return nil, clues.Wrap(err, "getting item").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, err, "getting item")
|
||||
}
|
||||
|
||||
f, ok := e.(fs.File)
|
||||
if !ok {
|
||||
return nil, clues.New("object is not a file").WithClues(ctx)
|
||||
return nil, clues.NewWC(ctx, "object is not a file")
|
||||
}
|
||||
|
||||
size := f.Size() - int64(readers.VersionFormatSize)
|
||||
@ -117,19 +116,18 @@ func (kdc kopiaDataCollection) FetchItemByName(
|
||||
|
||||
r, err := f.Open(ctx)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "opening file").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, err, "opening file")
|
||||
}
|
||||
|
||||
// TODO(ashmrtn): Remove this when individual services implement checks for
|
||||
// version and deleted items.
|
||||
rr, err := readers.NewVersionedRestoreReader(r)
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
return nil, clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
if rr.Format().Version != kdc.expectedVersion {
|
||||
return nil, clues.New("unexpected data format").
|
||||
WithClues(ctx).
|
||||
return nil, clues.NewWC(ctx, "unexpected data format").
|
||||
With(
|
||||
"read_version", rr.Format().Version,
|
||||
"expected_version", kdc.expectedVersion)
|
||||
@ -138,8 +136,7 @@ func (kdc kopiaDataCollection) FetchItemByName(
|
||||
// This is a conservative check, but we shouldn't be seeing items that were
|
||||
// deleted in flight during restores because there's no way to select them.
|
||||
if rr.Format().DelInFlight {
|
||||
return nil, clues.New("selected item marked as deleted in flight").
|
||||
WithClues(ctx)
|
||||
return nil, clues.NewWC(ctx, "selected item marked as deleted in flight")
|
||||
}
|
||||
|
||||
return &kopiaDataStream{
|
||||
|
||||
@ -18,7 +18,7 @@ func filesystemStorage(
|
||||
) (blob.Storage, error) {
|
||||
fsCfg, err := s.ToFilesystemConfig()
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
return nil, clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
opts := filesystem.Options{
|
||||
@ -27,7 +27,7 @@ func filesystemStorage(
|
||||
|
||||
store, err := filesystem.New(ctx, &opts, true)
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
return nil, clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
return store, nil
|
||||
|
||||
@ -114,8 +114,7 @@ func (mc *mergeCollection) FetchItemByName(
|
||||
if err == nil {
|
||||
return s, nil
|
||||
} else if err != nil && !errors.Is(err, data.ErrNotFound) {
|
||||
return nil, clues.Wrap(err, "fetching from merged collection").
|
||||
WithClues(ictx)
|
||||
return nil, clues.WrapWC(ctx, err, "fetching from merged collection")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -125,7 +125,7 @@ func putInner(
|
||||
create bool,
|
||||
) error {
|
||||
if !s.Valid() {
|
||||
return clues.Stack(errUnrecognizedSchema).WithClues(ctx)
|
||||
return clues.StackWC(ctx, errUnrecognizedSchema)
|
||||
}
|
||||
|
||||
base := m.Base()
|
||||
@ -136,13 +136,13 @@ func putInner(
|
||||
tmpTags, err := tagsForModelWithID(s, base.ID, base.ModelVersion, base.Tags)
|
||||
if err != nil {
|
||||
// Will be wrapped at a higher layer.
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
return clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
id, err := w.PutManifest(ctx, tmpTags, m)
|
||||
if err != nil {
|
||||
// Will be wrapped at a higher layer.
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
return clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
base.ModelStoreID = id
|
||||
@ -167,16 +167,16 @@ func (ms *ModelStore) Put(
|
||||
ctx,
|
||||
ms.c,
|
||||
repo.WriteSessionOptions{Purpose: "ModelStorePut"},
|
||||
func(innerCtx context.Context, w repo.RepositoryWriter) error {
|
||||
err := putInner(innerCtx, w, s, m, true)
|
||||
func(ictx context.Context, w repo.RepositoryWriter) error {
|
||||
err := putInner(ictx, w, s, m, true)
|
||||
if err != nil {
|
||||
return clues.Stack(err).WithClues(innerCtx)
|
||||
return clues.StackWC(ictx, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "putting model").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "putting model")
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -237,21 +237,21 @@ func (ms *ModelStore) GetIDsForType(
|
||||
tags map[string]string,
|
||||
) ([]*model.BaseModel, error) {
|
||||
if !s.Valid() {
|
||||
return nil, clues.Stack(errUnrecognizedSchema).WithClues(ctx)
|
||||
return nil, clues.StackWC(ctx, errUnrecognizedSchema)
|
||||
}
|
||||
|
||||
if _, ok := tags[stableIDKey]; ok {
|
||||
return nil, clues.Stack(errBadTagKey).WithClues(ctx)
|
||||
return nil, clues.StackWC(ctx, errBadTagKey)
|
||||
}
|
||||
|
||||
tmpTags, err := tagsForModel(s, tags)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "getting model metadata").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, err, "getting model metadata")
|
||||
}
|
||||
|
||||
metadata, err := ms.c.FindManifests(ctx, tmpTags)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "getting model metadata").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, err, "getting model metadata")
|
||||
}
|
||||
|
||||
res := make([]*model.BaseModel, 0, len(metadata))
|
||||
@ -259,7 +259,7 @@ func (ms *ModelStore) GetIDsForType(
|
||||
for _, m := range metadata {
|
||||
bm, err := ms.baseModelFromMetadata(m)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "parsing model metadata").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, err, "parsing model metadata")
|
||||
}
|
||||
|
||||
res = append(res, bm)
|
||||
@ -277,30 +277,30 @@ func (ms *ModelStore) getModelStoreID(
|
||||
id model.StableID,
|
||||
) (manifest.ID, error) {
|
||||
if !s.Valid() {
|
||||
return "", clues.Stack(errUnrecognizedSchema).WithClues(ctx)
|
||||
return "", clues.StackWC(ctx, errUnrecognizedSchema)
|
||||
}
|
||||
|
||||
if len(id) == 0 {
|
||||
return "", clues.Stack(errNoStableID).WithClues(ctx)
|
||||
return "", clues.StackWC(ctx, errNoStableID)
|
||||
}
|
||||
|
||||
tags := map[string]string{stableIDKey: string(id)}
|
||||
|
||||
metadata, err := ms.c.FindManifests(ctx, tags)
|
||||
if err != nil {
|
||||
return "", clues.Wrap(err, "getting ModelStoreID").WithClues(ctx)
|
||||
return "", clues.WrapWC(ctx, err, "getting ModelStoreID")
|
||||
}
|
||||
|
||||
if len(metadata) == 0 {
|
||||
return "", clues.Wrap(data.ErrNotFound, "getting ModelStoreID").WithClues(ctx)
|
||||
return "", clues.WrapWC(ctx, data.ErrNotFound, "getting ModelStoreID")
|
||||
}
|
||||
|
||||
if len(metadata) != 1 {
|
||||
return "", clues.New("multiple models with same StableID").WithClues(ctx)
|
||||
return "", clues.NewWC(ctx, "multiple models with same StableID")
|
||||
}
|
||||
|
||||
if metadata[0].Labels[manifest.TypeLabelKey] != s.String() {
|
||||
return "", clues.Stack(errModelTypeMismatch).WithClues(ctx)
|
||||
return "", clues.StackWC(ctx, errModelTypeMismatch)
|
||||
}
|
||||
|
||||
return metadata[0].ID, nil
|
||||
@ -316,7 +316,7 @@ func (ms *ModelStore) Get(
|
||||
m model.Model,
|
||||
) error {
|
||||
if !s.Valid() {
|
||||
return clues.Stack(errUnrecognizedSchema).WithClues(ctx)
|
||||
return clues.StackWC(ctx, errUnrecognizedSchema)
|
||||
}
|
||||
|
||||
modelID, err := ms.getModelStoreID(ctx, s, id)
|
||||
@ -337,11 +337,11 @@ func (ms *ModelStore) GetWithModelStoreID(
|
||||
m model.Model,
|
||||
) error {
|
||||
if !s.Valid() {
|
||||
return clues.Stack(errUnrecognizedSchema).WithClues(ctx)
|
||||
return clues.StackWC(ctx, errUnrecognizedSchema)
|
||||
}
|
||||
|
||||
if len(id) == 0 {
|
||||
return clues.Stack(errNoModelStoreID).WithClues(ctx)
|
||||
return clues.StackWC(ctx, errNoModelStoreID)
|
||||
}
|
||||
|
||||
metadata, err := ms.c.GetManifest(ctx, id, m)
|
||||
@ -350,18 +350,17 @@ func (ms *ModelStore) GetWithModelStoreID(
|
||||
err = data.ErrNotFound
|
||||
}
|
||||
|
||||
return clues.Wrap(err, "getting model data").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "getting model data")
|
||||
}
|
||||
|
||||
mdlbl := metadata.Labels[manifest.TypeLabelKey]
|
||||
if mdlbl != s.String() {
|
||||
return clues.Stack(errModelTypeMismatch).
|
||||
WithClues(ctx).
|
||||
return clues.StackWC(ctx, errModelTypeMismatch).
|
||||
With("expected_label", s, "got_label", mdlbl)
|
||||
}
|
||||
|
||||
if err := ms.populateBaseModelFromMetadata(m.Base(), metadata); err != nil {
|
||||
return clues.Wrap(err, "getting model by ID").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "getting model by ID")
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -378,30 +377,28 @@ func (ms *ModelStore) checkPrevModelVersion(
|
||||
b *model.BaseModel,
|
||||
) error {
|
||||
if !s.Valid() {
|
||||
return clues.Stack(errUnrecognizedSchema).WithClues(ctx)
|
||||
return clues.StackWC(ctx, errUnrecognizedSchema)
|
||||
}
|
||||
|
||||
id, err := ms.getModelStoreID(ctx, s, b.ID)
|
||||
if err != nil {
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
return clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
// We actually got something back during our lookup.
|
||||
meta, err := ms.c.GetManifest(ctx, id, nil)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "getting previous model version").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "getting previous model version")
|
||||
}
|
||||
|
||||
if meta.ID != b.ModelStoreID {
|
||||
return clues.New("updated model has different ModelStoreID").
|
||||
WithClues(ctx).
|
||||
return clues.NewWC(ctx, "updated model has different ModelStoreID").
|
||||
With("expected_id", meta.ID, "model_store_id", b.ModelStoreID)
|
||||
}
|
||||
|
||||
mdlbl := meta.Labels[manifest.TypeLabelKey]
|
||||
if mdlbl != s.String() {
|
||||
return clues.New("updated model has different model type").
|
||||
WithClues(ctx).
|
||||
return clues.NewWC(ctx, "updated model has different model type").
|
||||
With("expected_label", s, "got_label", mdlbl)
|
||||
}
|
||||
|
||||
@ -420,12 +417,12 @@ func (ms *ModelStore) Update(
|
||||
m model.Model,
|
||||
) error {
|
||||
if !s.Valid() {
|
||||
return clues.Stack(errUnrecognizedSchema).WithClues(ctx)
|
||||
return clues.StackWC(ctx, errUnrecognizedSchema)
|
||||
}
|
||||
|
||||
base := m.Base()
|
||||
if len(base.ModelStoreID) == 0 {
|
||||
return clues.Stack(errNoModelStoreID).WithClues(ctx)
|
||||
return clues.StackWC(ctx, errNoModelStoreID)
|
||||
}
|
||||
|
||||
base.ModelVersion = ms.modelVersion
|
||||
@ -468,13 +465,13 @@ func (ms *ModelStore) Update(
|
||||
// collected the next time kopia maintenance is run.
|
||||
innerErr = w.DeleteManifest(innerCtx, oldID)
|
||||
if innerErr != nil {
|
||||
return clues.Stack(innerErr).WithClues(ctx)
|
||||
return clues.StackWC(ctx, innerErr)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "updating model").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "updating model")
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -485,7 +482,7 @@ func (ms *ModelStore) Update(
|
||||
// have the same StableID.
|
||||
func (ms *ModelStore) Delete(ctx context.Context, s model.Schema, id model.StableID) error {
|
||||
if !s.Valid() {
|
||||
return clues.Stack(errUnrecognizedSchema).WithClues(ctx)
|
||||
return clues.StackWC(ctx, errUnrecognizedSchema)
|
||||
}
|
||||
|
||||
latest, err := ms.getModelStoreID(ctx, s, id)
|
||||
@ -511,14 +508,14 @@ func (ms *ModelStore) DeleteWithModelStoreIDs(
|
||||
ids ...manifest.ID,
|
||||
) error {
|
||||
opts := repo.WriteSessionOptions{Purpose: "ModelStoreDelete"}
|
||||
cb := func(innerCtx context.Context, w repo.RepositoryWriter) error {
|
||||
cb := func(ictx context.Context, w repo.RepositoryWriter) error {
|
||||
for _, id := range ids {
|
||||
if len(id) == 0 {
|
||||
return clues.Stack(errNoModelStoreID).WithClues(ctx)
|
||||
return clues.StackWC(ictx, errNoModelStoreID)
|
||||
}
|
||||
|
||||
if err := w.DeleteManifest(innerCtx, id); err != nil {
|
||||
return clues.Stack(err).WithClues(innerCtx).With("model_store_id", id)
|
||||
if err := w.DeleteManifest(ictx, id); err != nil {
|
||||
return clues.StackWC(ictx, err).With("model_store_id", id)
|
||||
}
|
||||
}
|
||||
|
||||
@ -526,7 +523,7 @@ func (ms *ModelStore) DeleteWithModelStoreIDs(
|
||||
}
|
||||
|
||||
if err := repo.WriteSession(ctx, ms.c, opts, cb); err != nil {
|
||||
return clues.Wrap(err, "deleting model").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "deleting model")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@ -40,9 +40,10 @@ func (r *Opts) AsConfigs(
|
||||
// Check the new config is valid.
|
||||
if r.blobCfg.IsRetentionEnabled() {
|
||||
if err := maintenance.CheckExtendRetention(ctx, r.blobCfg, &r.params); err != nil {
|
||||
return format.BlobStorageConfiguration{}, maintenance.Params{}, clues.Wrap(
|
||||
return format.BlobStorageConfiguration{}, maintenance.Params{}, clues.WrapWC(
|
||||
ctx,
|
||||
err,
|
||||
"invalid retention config").WithClues(ctx)
|
||||
"invalid retention config")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -22,7 +22,7 @@ func s3BlobStorage(
|
||||
) (blob.Storage, error) {
|
||||
cfg, err := s.ToS3Config()
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
return nil, clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
endpoint := defaultS3Endpoint
|
||||
@ -49,7 +49,7 @@ func s3BlobStorage(
|
||||
|
||||
store, err := s3.New(ctx, &opts, false)
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
return nil, clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
return store, nil
|
||||
|
||||
@ -115,8 +115,7 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
|
||||
// never had to materialize their details in-memory.
|
||||
if d.infoer == nil || d.cached {
|
||||
if d.prevPath == nil {
|
||||
cp.errs.AddRecoverable(ctx, clues.New("finished file sourced from previous backup with no previous path").
|
||||
WithClues(ctx).
|
||||
cp.errs.AddRecoverable(ctx, clues.NewWC(ctx, "finished file sourced from previous backup with no previous path").
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
return
|
||||
@ -131,8 +130,7 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
|
||||
d.repoPath,
|
||||
d.locationPath)
|
||||
if err != nil {
|
||||
cp.errs.AddRecoverable(ctx, clues.Wrap(err, "adding finished file to merge list").
|
||||
WithClues(ctx).
|
||||
cp.errs.AddRecoverable(ctx, clues.WrapWC(ctx, err, "adding finished file to merge list").
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
}
|
||||
|
||||
@ -145,27 +143,23 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
|
||||
// adding it to details since there's no data for it.
|
||||
return
|
||||
} else if err != nil {
|
||||
cp.errs.AddRecoverable(ctx, clues.Wrap(err, "getting ItemInfo").
|
||||
WithClues(ctx).
|
||||
cp.errs.AddRecoverable(ctx, clues.WrapWC(ctx, err, "getting ItemInfo").
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
return
|
||||
} else if !ptr.Val(d.modTime).Equal(info.Modified()) {
|
||||
cp.errs.AddRecoverable(ctx, clues.New("item modTime mismatch").
|
||||
WithClues(ctx).
|
||||
cp.errs.AddRecoverable(ctx, clues.NewWC(ctx, "item modTime mismatch").
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
return
|
||||
} else if info.Modified().IsZero() {
|
||||
cp.errs.AddRecoverable(ctx, clues.New("zero-valued mod time").
|
||||
WithClues(ctx).
|
||||
cp.errs.AddRecoverable(ctx, clues.NewWC(ctx, "zero-valued mod time").
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
}
|
||||
|
||||
err = cp.deets.Add(d.repoPath, d.locationPath, info)
|
||||
if err != nil {
|
||||
cp.errs.AddRecoverable(ctx, clues.Wrap(err, "adding finished file to details").
|
||||
WithClues(ctx).
|
||||
cp.errs.AddRecoverable(ctx, clues.WrapWC(ctx, err, "adding finished file to details").
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
return
|
||||
@ -275,7 +269,7 @@ func collectionEntries(
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return seen, clues.Stack(ctx.Err()).WithClues(ctx)
|
||||
return seen, clues.StackWC(ctx, ctx.Err())
|
||||
|
||||
case e, ok := <-items:
|
||||
if !ok {
|
||||
@ -357,8 +351,7 @@ func collectionEntries(
|
||||
if err != nil {
|
||||
// Kopia's uploader swallows errors in most cases, so if we see
|
||||
// something here it's probably a big issue and we should return.
|
||||
return seen, clues.Wrap(err, "executing callback").
|
||||
WithClues(ctx).
|
||||
return seen, clues.WrapWC(ctx, err, "executing callback").
|
||||
With("item_path", itemPath)
|
||||
}
|
||||
}
|
||||
@ -397,13 +390,12 @@ func streamBaseEntries(
|
||||
ctx,
|
||||
func(innerCtx context.Context, entry fs.Entry) error {
|
||||
if err := innerCtx.Err(); err != nil {
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
return clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
entName, err := decodeElement(entry.Name())
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "decoding entry name").
|
||||
WithClues(ctx).
|
||||
return clues.WrapWC(ctx, err, "decoding entry name").
|
||||
With("entry_name", entry.Name())
|
||||
}
|
||||
|
||||
@ -421,14 +413,12 @@ func streamBaseEntries(
|
||||
// LocationPath information associated with the directory.
|
||||
newP, err := params.currentPath.Append(false, entName)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "getting current directory path").
|
||||
WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "getting current directory path")
|
||||
}
|
||||
|
||||
oldP, err := params.prevPath.Append(false, entName)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "getting previous directory path").
|
||||
WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "getting previous directory path")
|
||||
}
|
||||
|
||||
e := virtualfs.NewStreamingDirectory(
|
||||
@ -445,8 +435,7 @@ func streamBaseEntries(
|
||||
globalExcludeSet,
|
||||
progress))
|
||||
|
||||
return clues.Wrap(ctr(ctx, e), "executing callback on subdirectory").
|
||||
WithClues(ctx).
|
||||
return clues.WrapWC(ctx, ctr(ctx, e), "executing callback on subdirectory").
|
||||
With("directory_path", newP).
|
||||
OrNil()
|
||||
}
|
||||
@ -467,8 +456,7 @@ func streamBaseEntries(
|
||||
// For now assuming that item IDs don't need escaping.
|
||||
itemPath, err := params.currentPath.AppendItem(entName)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "getting full item path for base entry").
|
||||
WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "getting full item path for base entry")
|
||||
}
|
||||
|
||||
// We need the previous path so we can find this item in the base snapshot's
|
||||
@ -477,8 +465,7 @@ func streamBaseEntries(
|
||||
// to look for.
|
||||
prevItemPath, err := params.prevPath.AppendItem(entName)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "getting previous full item path for base entry").
|
||||
WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "getting previous full item path for base entry")
|
||||
}
|
||||
|
||||
// Meta files aren't in backup details since it's the set of items the
|
||||
@ -502,16 +489,14 @@ func streamBaseEntries(
|
||||
}
|
||||
|
||||
if err := ctr(ctx, entry); err != nil {
|
||||
return clues.Wrap(err, "executing callback on item").
|
||||
WithClues(ctx).
|
||||
return clues.WrapWC(ctx, err, "executing callback on item").
|
||||
With("item_path", itemPath)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "traversing items in base snapshot directory").
|
||||
WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "traversing items in base snapshot directory")
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -534,8 +519,7 @@ func getStreamItemFunc(
|
||||
// Return static entries in this directory first.
|
||||
for _, d := range staticEnts {
|
||||
if err := ctr(ctx, d); err != nil {
|
||||
return clues.Wrap(err, "executing callback on static directory").
|
||||
WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "executing callback on static directory")
|
||||
}
|
||||
}
|
||||
|
||||
@ -763,15 +747,13 @@ func inflateCollectionTree(
|
||||
switch s.State() {
|
||||
case data.DeletedState:
|
||||
if s.PreviousPath() == nil {
|
||||
return nil, nil, clues.New("nil previous path on deleted collection").
|
||||
WithClues(ictx)
|
||||
return nil, nil, clues.NewWC(ictx, "nil previous path on deleted collection")
|
||||
}
|
||||
|
||||
changedPaths = append(changedPaths, s.PreviousPath())
|
||||
|
||||
if p, ok := updatedPaths[s.PreviousPath().String()]; ok {
|
||||
err := clues.New("multiple previous state changes").
|
||||
WithClues(ictx).
|
||||
err := clues.NewWC(ictx, "multiple previous state changes").
|
||||
With("updated_path", p, "current_state", data.DeletedState)
|
||||
logger.CtxErr(ictx, err).Error("previous path state collision")
|
||||
|
||||
@ -788,8 +770,7 @@ func inflateCollectionTree(
|
||||
changedPaths = append(changedPaths, s.PreviousPath())
|
||||
|
||||
if p, ok := updatedPaths[s.PreviousPath().String()]; ok {
|
||||
err := clues.New("multiple previous state changes").
|
||||
WithClues(ictx).
|
||||
err := clues.NewWC(ictx, "multiple previous state changes").
|
||||
With("updated_path", p, "current_state", data.MovedState)
|
||||
logger.CtxErr(ictx, err).Error("previous path state collision")
|
||||
|
||||
@ -809,15 +790,13 @@ func inflateCollectionTree(
|
||||
// changed via one of the ancestor folders being moved. This catches the
|
||||
// ancestor folder move.
|
||||
if err := addMergeLocation(s, toMerge); err != nil {
|
||||
return nil, nil, clues.Wrap(err, "adding merge location").
|
||||
WithClues(ictx)
|
||||
return nil, nil, clues.WrapWC(ictx, err, "adding merge location")
|
||||
}
|
||||
|
||||
case data.NotMovedState:
|
||||
p := s.PreviousPath().String()
|
||||
if p, ok := updatedPaths[p]; ok {
|
||||
err := clues.New("multiple previous state changes").
|
||||
WithClues(ictx).
|
||||
err := clues.NewWC(ictx, "multiple previous state changes").
|
||||
With("updated_path", p, "current_state", data.NotMovedState)
|
||||
logger.CtxErr(ictx, err).Error("previous path state collision")
|
||||
|
||||
@ -833,19 +812,18 @@ func inflateCollectionTree(
|
||||
}
|
||||
|
||||
if s.FullPath() == nil || len(s.FullPath().Elements()) == 0 {
|
||||
return nil, nil, clues.New("no identifier for collection").WithClues(ictx)
|
||||
return nil, nil, clues.NewWC(ictx, "no identifier for collection")
|
||||
}
|
||||
|
||||
node := getTreeNode(roots, s.FullPath().Elements())
|
||||
if node == nil {
|
||||
return nil, nil, clues.New("getting tree node").WithClues(ictx)
|
||||
return nil, nil, clues.NewWC(ictx, "getting tree node")
|
||||
}
|
||||
|
||||
// Make sure there's only a single collection adding items for any given
|
||||
// path in the new hierarchy.
|
||||
if node.collection != nil {
|
||||
return nil, nil, clues.New("multiple instances of collection").
|
||||
WithClues(ictx)
|
||||
return nil, nil, clues.NewWC(ictx, "multiple instances of collection")
|
||||
}
|
||||
|
||||
node.collection = s
|
||||
@ -863,8 +841,7 @@ func inflateCollectionTree(
|
||||
}
|
||||
|
||||
if node.collection != nil && node.collection.State() == data.NotMovedState {
|
||||
err := clues.New("conflicting states for collection").
|
||||
WithClues(ctx)
|
||||
err := clues.NewWC(ctx, "conflicting states for collection")
|
||||
logger.CtxErr(ctx, err).Error("adding node to tree")
|
||||
|
||||
if firstErr == nil {
|
||||
@ -947,7 +924,7 @@ func traverseBaseDir(
|
||||
"expected_parent_dir_path", expectedDirPath)
|
||||
|
||||
if depth >= maxInflateTraversalDepth {
|
||||
return clues.New("base snapshot tree too tall").WithClues(ctx)
|
||||
return clues.NewWC(ctx, "base snapshot tree too tall")
|
||||
}
|
||||
|
||||
// Wrapper base64 encodes all file and folder names to avoid issues with
|
||||
@ -955,8 +932,7 @@ func traverseBaseDir(
|
||||
// from kopia we need to do the decoding here.
|
||||
dirName, err := decodeElement(dir.Name())
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "decoding base directory name").
|
||||
WithClues(ctx).
|
||||
return clues.WrapWC(ctx, err, "decoding base directory name").
|
||||
With("dir_name", clues.Hide(dir.Name()))
|
||||
}
|
||||
|
||||
@ -1029,7 +1005,7 @@ func traverseBaseDir(
|
||||
stats)
|
||||
})
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "traversing base directory").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "traversing base directory")
|
||||
}
|
||||
} else {
|
||||
stats.Inc(statPruned)
|
||||
@ -1049,7 +1025,7 @@ func traverseBaseDir(
|
||||
// in the if-block though as that is an optimization.
|
||||
node := getTreeNode(roots, currentPath.Elements())
|
||||
if node == nil {
|
||||
return clues.New("getting tree node").WithClues(ctx)
|
||||
return clues.NewWC(ctx, "getting tree node")
|
||||
}
|
||||
|
||||
// Now that we have the node we need to check if there is a collection
|
||||
@ -1075,12 +1051,12 @@ func traverseBaseDir(
|
||||
|
||||
curP, err := path.PrefixOrPathFromDataLayerPath(currentPath.String(), false)
|
||||
if err != nil {
|
||||
return clues.New("converting current path to path.Path").WithClues(ctx)
|
||||
return clues.NewWC(ctx, "converting current path to path.Path")
|
||||
}
|
||||
|
||||
oldP, err := path.PrefixOrPathFromDataLayerPath(oldDirPath.String(), false)
|
||||
if err != nil {
|
||||
return clues.New("converting old path to path.Path").WithClues(ctx)
|
||||
return clues.NewWC(ctx, "converting old path to path.Path")
|
||||
}
|
||||
|
||||
node.baseDir = dir
|
||||
@ -1159,12 +1135,12 @@ func inflateBaseTree(
|
||||
|
||||
root, err := loader.SnapshotRoot(base.ItemDataSnapshot)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "getting snapshot root directory").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "getting snapshot root directory")
|
||||
}
|
||||
|
||||
dir, ok := root.(fs.Directory)
|
||||
if !ok {
|
||||
return clues.New("snapshot root is not a directory").WithClues(ctx)
|
||||
return clues.NewWC(ctx, "snapshot root is not a directory")
|
||||
}
|
||||
|
||||
// For each subtree corresponding to the tuple
|
||||
@ -1178,7 +1154,7 @@ func inflateBaseTree(
|
||||
|
||||
subtreePath, err := r.SubtreePath()
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "building subtree path").WithClues(ictx)
|
||||
return clues.WrapWC(ictx, err, "building subtree path")
|
||||
}
|
||||
|
||||
// We're starting from the root directory so don't need it in the path.
|
||||
@ -1191,12 +1167,12 @@ func inflateBaseTree(
|
||||
continue
|
||||
}
|
||||
|
||||
return clues.Wrap(err, "getting subtree root").WithClues(ictx)
|
||||
return clues.WrapWC(ictx, err, "getting subtree root")
|
||||
}
|
||||
|
||||
subtreeDir, ok := ent.(fs.Directory)
|
||||
if !ok {
|
||||
return clues.Wrap(err, "subtree root is not directory").WithClues(ictx)
|
||||
return clues.WrapWC(ictx, err, "subtree root is not directory")
|
||||
}
|
||||
|
||||
// This ensures that a migration on the directory prefix can complete.
|
||||
@ -1219,7 +1195,7 @@ func inflateBaseTree(
|
||||
subtreeDir,
|
||||
roots,
|
||||
stats); err != nil {
|
||||
return clues.Wrap(err, "traversing base snapshot").WithClues(ictx)
|
||||
return clues.WrapWC(ictx, err, "traversing base snapshot")
|
||||
}
|
||||
|
||||
logger.Ctx(ctx).Infow(
|
||||
@ -1278,7 +1254,7 @@ func inflateDirTree(
|
||||
}
|
||||
|
||||
if len(roots) > 1 {
|
||||
return nil, clues.New("multiple root directories").WithClues(ctx)
|
||||
return nil, clues.NewWC(ctx, "multiple root directories")
|
||||
}
|
||||
|
||||
var res fs.Directory
|
||||
|
||||
@ -132,7 +132,7 @@ func (w *Wrapper) Close(ctx context.Context) error {
|
||||
w.c = nil
|
||||
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "closing Wrapper").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "closing Wrapper")
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -156,7 +156,7 @@ func (w Wrapper) ConsumeBackupCollections(
|
||||
errs *fault.Bus,
|
||||
) (*BackupStats, *details.Builder, DetailsMergeInfoer, error) {
|
||||
if w.c == nil {
|
||||
return nil, nil, nil, clues.Stack(errNotConnected).WithClues(ctx)
|
||||
return nil, nil, nil, clues.StackWC(ctx, errNotConnected)
|
||||
}
|
||||
|
||||
ctx, end := diagnostics.Span(ctx, "kopia:consumeBackupCollections")
|
||||
@ -304,7 +304,7 @@ func (w Wrapper) makeSnapshotWithRoot(
|
||||
|
||||
policyTree, err := policy.TreeForSourceWithOverride(innerCtx, w.c, si, errPolicy)
|
||||
if err != nil {
|
||||
err = clues.Wrap(err, "get policy tree").WithClues(ctx)
|
||||
err = clues.WrapWC(ctx, err, "get policy tree")
|
||||
logger.CtxErr(innerCtx, err).Error("building kopia backup")
|
||||
|
||||
return err
|
||||
@ -318,7 +318,7 @@ func (w Wrapper) makeSnapshotWithRoot(
|
||||
|
||||
man, err = u.Upload(innerCtx, root, policyTree, si, prevSnaps...)
|
||||
if err != nil {
|
||||
err = clues.Wrap(err, "uploading data").WithClues(ctx)
|
||||
err = clues.WrapWC(ctx, err, "uploading data")
|
||||
logger.CtxErr(innerCtx, err).Error("uploading kopia backup")
|
||||
|
||||
return err
|
||||
@ -327,7 +327,7 @@ func (w Wrapper) makeSnapshotWithRoot(
|
||||
man.Tags = tags
|
||||
|
||||
if _, err := snapshot.SaveSnapshot(innerCtx, rw, man); err != nil {
|
||||
err = clues.Wrap(err, "saving snapshot").WithClues(ctx)
|
||||
err = clues.WrapWC(ctx, err, "saving snapshot")
|
||||
logger.CtxErr(innerCtx, err).Error("persisting kopia backup snapshot")
|
||||
|
||||
return err
|
||||
@ -338,7 +338,7 @@ func (w Wrapper) makeSnapshotWithRoot(
|
||||
// Telling kopia to always flush may hide other errors if it fails while
|
||||
// flushing the write session (hence logging above).
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "kopia backup").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, err, "kopia backup")
|
||||
}
|
||||
|
||||
res := manifestToStats(man, progress, bc)
|
||||
@ -352,12 +352,12 @@ func (w Wrapper) getSnapshotRoot(
|
||||
) (fs.Entry, error) {
|
||||
man, err := snapshot.LoadSnapshot(ctx, w.c, manifest.ID(snapshotID))
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "getting snapshot handle").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, err, "getting snapshot handle")
|
||||
}
|
||||
|
||||
rootDirEntry, err := snapshotfs.SnapshotRoot(w.c, man)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "getting root directory").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, err, "getting root directory")
|
||||
}
|
||||
|
||||
return rootDirEntry, nil
|
||||
@ -373,7 +373,7 @@ func getDir(
|
||||
snapshotRoot fs.Entry,
|
||||
) (fs.Directory, error) {
|
||||
if dirPath == nil {
|
||||
return nil, clues.Wrap(ErrNoRestorePath, "getting directory").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, ErrNoRestorePath, "getting directory")
|
||||
}
|
||||
|
||||
toGet := dirPath.PopFront()
|
||||
@ -387,15 +387,15 @@ func getDir(
|
||||
encodeElements(toGet.Elements()...))
|
||||
if err != nil {
|
||||
if isErrEntryNotFound(err) {
|
||||
err = clues.Stack(data.ErrNotFound, err).WithClues(ctx)
|
||||
err = clues.StackWC(ctx, data.ErrNotFound, err)
|
||||
}
|
||||
|
||||
return nil, clues.Wrap(err, "getting nested object handle").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, err, "getting nested object handle")
|
||||
}
|
||||
|
||||
f, ok := e.(fs.Directory)
|
||||
if !ok {
|
||||
return nil, clues.New("requested object is not a directory").WithClues(ctx)
|
||||
return nil, clues.NewWC(ctx, "requested object is not a directory")
|
||||
}
|
||||
|
||||
return f, nil
|
||||
@ -452,8 +452,7 @@ func loadDirsAndItems(
|
||||
|
||||
dir, err := getDir(ictx, dirItems.dir, snapshotRoot)
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "loading storage directory").
|
||||
WithClues(ictx).
|
||||
el.AddRecoverable(ctx, clues.WrapWC(ictx, err, "loading storage directory").
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
continue
|
||||
@ -468,8 +467,7 @@ func loadDirsAndItems(
|
||||
}
|
||||
|
||||
if err := mergeCol.addCollection(dirItems.dir.String(), dc); err != nil {
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "adding collection to merge collection").
|
||||
WithClues(ctx).
|
||||
el.AddRecoverable(ctx, clues.WrapWC(ictx, err, "adding collection to merge collection").
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
continue
|
||||
@ -498,14 +496,14 @@ func (w Wrapper) ProduceRestoreCollections(
|
||||
defer end()
|
||||
|
||||
if len(paths) == 0 {
|
||||
return nil, clues.Stack(ErrNoRestorePath).WithClues(ctx)
|
||||
return nil, clues.StackWC(ctx, ErrNoRestorePath)
|
||||
}
|
||||
|
||||
// Used later on, but less confusing to follow error propagation if we just
|
||||
// load it here.
|
||||
snapshotRoot, err := w.getSnapshotRoot(ctx, snapshotID)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "loading snapshot root").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, err, "loading snapshot root")
|
||||
}
|
||||
|
||||
var (
|
||||
@ -530,8 +528,7 @@ func (w Wrapper) ProduceRestoreCollections(
|
||||
|
||||
parentStoragePath, err := itemPaths.StoragePath.Dir()
|
||||
if err != nil {
|
||||
el.AddRecoverable(ictx, clues.Wrap(err, "getting storage directory path").
|
||||
WithClues(ictx).
|
||||
el.AddRecoverable(ictx, clues.WrapWC(ictx, err, "getting storage directory path").
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
continue
|
||||
@ -570,7 +567,7 @@ func (w Wrapper) ProduceRestoreCollections(
|
||||
// then load the items from the directory.
|
||||
res, err := loadDirsAndItems(ctx, snapshotRoot, bcounter, dirsToItems, errs)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "loading items").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, err, "loading items")
|
||||
}
|
||||
|
||||
return res, el.Failure()
|
||||
@ -598,12 +595,12 @@ func (w Wrapper) RepoMaintenance(
|
||||
) error {
|
||||
kopiaSafety, err := translateSafety(opts.Safety)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "identifying safety level").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "identifying safety level")
|
||||
}
|
||||
|
||||
mode, err := translateMode(opts.Type)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "identifying maintenance mode").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "identifying maintenance mode")
|
||||
}
|
||||
|
||||
currentOwner := w.c.ClientOptions().UsernameAtHost()
|
||||
@ -633,7 +630,7 @@ func (w Wrapper) RepoMaintenance(
|
||||
|
||||
dr, ok := w.c.Repository.(repo.DirectRepository)
|
||||
if !ok {
|
||||
return clues.New("unable to get valid handle to repo").WithClues(ctx)
|
||||
return clues.NewWC(ctx, "unable to get valid handle to repo")
|
||||
}
|
||||
|
||||
// Below write session options pulled from kopia's CLI code that runs
|
||||
@ -647,7 +644,7 @@ func (w Wrapper) RepoMaintenance(
|
||||
func(ctx context.Context, dw repo.DirectRepositoryWriter) error {
|
||||
params, err := maintenance.GetParams(ctx, w.c)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "getting maintenance user@host").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "getting maintenance user@host")
|
||||
}
|
||||
|
||||
// Need to do some fixup here as the user/host may not have been set.
|
||||
@ -658,8 +655,7 @@ func (w Wrapper) RepoMaintenance(
|
||||
clues.Hide(currentOwner))
|
||||
|
||||
if err := w.setMaintenanceParams(ctx, dw, params, currentOwner); err != nil {
|
||||
return clues.Wrap(err, "updating maintenance parameters").
|
||||
WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "updating maintenance parameters")
|
||||
}
|
||||
}
|
||||
|
||||
@ -669,7 +665,7 @@ func (w Wrapper) RepoMaintenance(
|
||||
|
||||
err = snapshotmaintenance.Run(ctx, dw, mode, opts.Force, kopiaSafety)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "running kopia maintenance").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "running kopia maintenance")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@ -54,7 +54,7 @@ func (ctrl *Controller) ProduceBackupCollections(
|
||||
|
||||
err := verifyBackupInputs(bpc.Selector, ctrl.IDNameLookup.IDs())
|
||||
if err != nil {
|
||||
return nil, nil, false, clues.Stack(err).WithClues(ctx)
|
||||
return nil, nil, false, clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
var (
|
||||
@ -118,7 +118,7 @@ func (ctrl *Controller) ProduceBackupCollections(
|
||||
canUsePreviousBackup = true
|
||||
|
||||
default:
|
||||
return nil, nil, false, clues.Wrap(clues.New(service.String()), "service not supported").WithClues(ctx)
|
||||
return nil, nil, false, clues.Wrap(clues.NewWC(ctx, service.String()), "service not supported")
|
||||
}
|
||||
|
||||
for _, c := range colls {
|
||||
@ -152,7 +152,7 @@ func (ctrl *Controller) IsServiceEnabled(
|
||||
return groups.IsServiceEnabled(ctx, ctrl.AC.Groups(), resourceOwner)
|
||||
}
|
||||
|
||||
return false, clues.Wrap(clues.New(service.String()), "service not supported").WithClues(ctx)
|
||||
return false, clues.Wrap(clues.NewWC(ctx, service.String()), "service not supported")
|
||||
}
|
||||
|
||||
func verifyBackupInputs(sels selectors.Selector, cachedIDs []string) error {
|
||||
|
||||
@ -309,8 +309,7 @@ func (oc *Collection) getDriveItemContent(
|
||||
|
||||
errs.AddRecoverable(
|
||||
ctx,
|
||||
clues.Wrap(err, "downloading item content").
|
||||
WithClues(ctx).
|
||||
clues.WrapWC(ctx, err, "downloading item content").
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
// return err, not el.Err(), because the lazy reader needs to communicate to
|
||||
@ -508,8 +507,7 @@ func (lig *lazyItemGetter) GetData(
|
||||
*lig.info,
|
||||
lig.itemExtensionFactory)
|
||||
if err != nil {
|
||||
err := clues.Wrap(err, "adding extensions").
|
||||
WithClues(ctx).
|
||||
err := clues.WrapWC(ctx, err, "adding extensions").
|
||||
Label(fault.LabelForceNoBackupCreation)
|
||||
|
||||
return nil, nil, false, err
|
||||
@ -637,8 +635,7 @@ func (oc *Collection) streamDriveItem(
|
||||
// permissions change does not update mod time.
|
||||
time.Now())
|
||||
if err != nil {
|
||||
errs.AddRecoverable(ctx, clues.Stack(err).
|
||||
WithClues(ctx).
|
||||
errs.AddRecoverable(ctx, clues.StackWC(ctx, err).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
return
|
||||
|
||||
@ -177,7 +177,7 @@ func DeserializeMetadata(
|
||||
for breakLoop := false; !breakLoop; {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, nil, false, clues.Wrap(ctx.Err(), "deserializing previous backup metadata").WithClues(ctx)
|
||||
return nil, nil, false, clues.WrapWC(ctx, ctx.Err(), "deserializing previous backup metadata")
|
||||
|
||||
case item, ok := <-items:
|
||||
if !ok {
|
||||
@ -212,7 +212,7 @@ func DeserializeMetadata(
|
||||
// these cases. We can make the logic for deciding when to continue vs.
|
||||
// when to fail less strict in the future if needed.
|
||||
if err != nil {
|
||||
errs.Fail(clues.Stack(err).WithClues(ictx))
|
||||
errs.Fail(clues.StackWC(ictx, err))
|
||||
|
||||
return map[string]string{}, map[string]map[string]string{}, false, nil
|
||||
}
|
||||
@ -408,7 +408,7 @@ func (c *Collections) Get(
|
||||
|
||||
p, err := c.handler.CanonicalPath(odConsts.DriveFolderPrefixBuilder(driveID), c.tenantID)
|
||||
if err != nil {
|
||||
return nil, false, clues.Wrap(err, "making exclude prefix").WithClues(ictx)
|
||||
return nil, false, clues.WrapWC(ictx, err, "making exclude prefix")
|
||||
}
|
||||
|
||||
ssmb.Add(p.String(), excludedItemIDs)
|
||||
@ -433,7 +433,7 @@ func (c *Collections) Get(
|
||||
|
||||
prevPath, err := path.FromDataLayerPath(p, false)
|
||||
if err != nil {
|
||||
err = clues.Wrap(err, "invalid previous path").WithClues(ictx).With("deleted_path", p)
|
||||
err = clues.WrapWC(ictx, err, "invalid previous path").With("deleted_path", p)
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
@ -449,7 +449,7 @@ func (c *Collections) Get(
|
||||
true,
|
||||
nil)
|
||||
if err != nil {
|
||||
return nil, false, clues.Wrap(err, "making collection").WithClues(ictx)
|
||||
return nil, false, clues.WrapWC(ictx, err, "making collection")
|
||||
}
|
||||
|
||||
c.CollectionMap[driveID][fldID] = col
|
||||
@ -471,7 +471,7 @@ func (c *Collections) Get(
|
||||
for driveID := range driveTombstones {
|
||||
prevDrivePath, err := c.handler.PathPrefix(c.tenantID, driveID)
|
||||
if err != nil {
|
||||
return nil, false, clues.Wrap(err, "making drive tombstone for previous path").WithClues(ctx)
|
||||
return nil, false, clues.WrapWC(ctx, err, "making drive tombstone for previous path")
|
||||
}
|
||||
|
||||
coll, err := NewCollection(
|
||||
@ -486,7 +486,7 @@ func (c *Collections) Get(
|
||||
true,
|
||||
nil)
|
||||
if err != nil {
|
||||
return nil, false, clues.Wrap(err, "making drive tombstone").WithClues(ctx)
|
||||
return nil, false, clues.WrapWC(ctx, err, "making drive tombstone")
|
||||
}
|
||||
|
||||
collections = append(collections, coll)
|
||||
@ -814,13 +814,14 @@ func (c *Collections) processItem(
|
||||
itemID = ptr.Val(item.GetId())
|
||||
itemName = ptr.Val(item.GetName())
|
||||
isFolder = item.GetFolder() != nil || item.GetPackageEscaped() != nil
|
||||
ictx = clues.Add(
|
||||
ctx,
|
||||
"item_id", itemID,
|
||||
"item_name", clues.Hide(itemName),
|
||||
"item_is_folder", isFolder)
|
||||
)
|
||||
|
||||
ctx = clues.Add(
|
||||
ctx,
|
||||
"item_id", itemID,
|
||||
"item_name", clues.Hide(itemName),
|
||||
"item_is_folder", isFolder)
|
||||
|
||||
if item.GetMalware() != nil {
|
||||
addtl := graph.ItemInfo(item)
|
||||
skip := fault.FileSkip(fault.SkipMalware, driveID, itemID, itemName, addtl)
|
||||
@ -847,19 +848,18 @@ func (c *Collections) processItem(
|
||||
excludedItemIDs,
|
||||
invalidPrevDelta)
|
||||
|
||||
return clues.Stack(err).WithClues(ictx).OrNil()
|
||||
return clues.StackWC(ctx, err).OrNil()
|
||||
}
|
||||
|
||||
collectionPath, err := c.getCollectionPath(driveID, item)
|
||||
if err != nil {
|
||||
return clues.Stack(err).
|
||||
WithClues(ictx).
|
||||
return clues.StackWC(ctx, err).
|
||||
Label(fault.LabelForceNoBackupCreation)
|
||||
}
|
||||
|
||||
// Skip items that don't match the folder selectors we were given.
|
||||
if shouldSkip(ctx, collectionPath, c.handler, driveName) {
|
||||
logger.Ctx(ictx).Debugw("path not selected", "skipped_path", collectionPath.String())
|
||||
logger.Ctx(ctx).Debugw("path not selected", "skipped_path", collectionPath.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -872,8 +872,7 @@ func (c *Collections) processItem(
|
||||
if ok {
|
||||
prevPath, err = path.FromDataLayerPath(prevPathStr, false)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "invalid previous path").
|
||||
WithClues(ictx).
|
||||
return clues.WrapWC(ctx, err, "invalid previous path").
|
||||
With("prev_path_string", path.LoggableDir(prevPathStr))
|
||||
}
|
||||
} else if item.GetRoot() != nil {
|
||||
@ -892,7 +891,7 @@ func (c *Collections) processItem(
|
||||
c.CollectionMap,
|
||||
collectionPath)
|
||||
if err != nil {
|
||||
return clues.Stack(err).WithClues(ictx)
|
||||
return clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
if found {
|
||||
@ -948,7 +947,7 @@ func (c *Collections) processItem(
|
||||
invalidPrevDelta || collPathAlreadyExists,
|
||||
nil)
|
||||
if err != nil {
|
||||
return clues.Stack(err).WithClues(ictx)
|
||||
return clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
col.driveName = driveName
|
||||
@ -970,16 +969,16 @@ func (c *Collections) processItem(
|
||||
case item.GetFile() != nil:
|
||||
// Deletions are handled above so this is just moves/renames.
|
||||
if len(ptr.Val(item.GetParentReference().GetId())) == 0 {
|
||||
return clues.New("file without parent ID").WithClues(ictx)
|
||||
return clues.NewWC(ctx, "file without parent ID")
|
||||
}
|
||||
|
||||
// Get the collection for this item.
|
||||
parentID := ptr.Val(item.GetParentReference().GetId())
|
||||
ictx = clues.Add(ictx, "parent_id", parentID)
|
||||
ctx = clues.Add(ctx, "parent_id", parentID)
|
||||
|
||||
collection, ok := c.CollectionMap[driveID][parentID]
|
||||
if !ok {
|
||||
return clues.New("item seen before parent folder").WithClues(ictx)
|
||||
return clues.NewWC(ctx, "item seen before parent folder")
|
||||
}
|
||||
|
||||
// This will only kick in if the file was moved multiple times
|
||||
@ -989,15 +988,13 @@ func (c *Collections) processItem(
|
||||
if ok {
|
||||
prevColl, found := c.CollectionMap[driveID][prevParentContainerID]
|
||||
if !found {
|
||||
return clues.New("previous collection not found").
|
||||
With("prev_parent_container_id", prevParentContainerID).
|
||||
WithClues(ictx)
|
||||
return clues.NewWC(ctx, "previous collection not found").
|
||||
With("prev_parent_container_id", prevParentContainerID)
|
||||
}
|
||||
|
||||
if ok := prevColl.Remove(itemID); !ok {
|
||||
return clues.New("removing item from prev collection").
|
||||
With("prev_parent_container_id", prevParentContainerID).
|
||||
WithClues(ictx)
|
||||
return clues.NewWC(ctx, "removing item from prev collection").
|
||||
With("prev_parent_container_id", prevParentContainerID)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1022,8 +1019,7 @@ func (c *Collections) processItem(
|
||||
}
|
||||
|
||||
default:
|
||||
return clues.New("item is neither folder nor file").
|
||||
WithClues(ictx).
|
||||
return clues.NewWC(ctx, "item is neither folder nor file").
|
||||
Label(fault.LabelForceNoBackupCreation)
|
||||
}
|
||||
|
||||
|
||||
@ -124,11 +124,11 @@ func getItemName(
|
||||
|
||||
meta, err := FetchAndReadMetadata(ctx, fin, metaName)
|
||||
if err != nil {
|
||||
return "", clues.Wrap(err, "getting metadata").WithClues(ctx)
|
||||
return "", clues.WrapWC(ctx, err, "getting metadata")
|
||||
}
|
||||
|
||||
return meta.FileName, nil
|
||||
}
|
||||
|
||||
return "", clues.New("invalid item id").WithClues(ctx)
|
||||
return "", clues.NewWC(ctx, "invalid item id")
|
||||
}
|
||||
|
||||
@ -126,7 +126,7 @@ func downloadFile(
|
||||
url string,
|
||||
) (io.ReadCloser, error) {
|
||||
if len(url) == 0 {
|
||||
return nil, clues.New("empty file url").WithClues(ctx)
|
||||
return nil, clues.NewWC(ctx, "empty file url")
|
||||
}
|
||||
|
||||
// Precheck for url expiry before we make a call to graph to download the
|
||||
@ -178,7 +178,7 @@ func downloadItemMeta(
|
||||
|
||||
metaJSON, err := json.Marshal(meta)
|
||||
if err != nil {
|
||||
return nil, 0, clues.Wrap(err, "serializing item metadata").WithClues(ctx)
|
||||
return nil, 0, clues.WrapWC(ctx, err, "serializing item metadata")
|
||||
}
|
||||
|
||||
return io.NopCloser(bytes.NewReader(metaJSON)), len(metaJSON), nil
|
||||
@ -231,14 +231,14 @@ func isURLExpired(
|
||||
if err != nil {
|
||||
logger.CtxErr(ctx, err).Info("query param not found")
|
||||
|
||||
return false, clues.Stack(err).WithClues(ctx)
|
||||
return false, clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
expired, err := jwt.IsJWTExpired(rawJWT)
|
||||
if err != nil {
|
||||
logger.CtxErr(ctx, err).Info("checking jwt expiry")
|
||||
|
||||
return false, clues.Stack(err).WithClues(ctx)
|
||||
return false, clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
return expired, nil
|
||||
|
||||
@ -98,7 +98,7 @@ func computePreviousLinkShares(
|
||||
|
||||
parent, err := originDir.Dir()
|
||||
if err != nil {
|
||||
return nil, clues.New("getting parent").WithClues(ctx)
|
||||
return nil, clues.NewWC(ctx, "getting parent")
|
||||
}
|
||||
|
||||
for len(parent.Elements()) > 0 {
|
||||
@ -106,7 +106,7 @@ func computePreviousLinkShares(
|
||||
|
||||
drivePath, err := path.ToDrivePath(parent)
|
||||
if err != nil {
|
||||
return nil, clues.New("transforming dir to drivePath").WithClues(ictx)
|
||||
return nil, clues.NewWC(ictx, "transforming dir to drivePath")
|
||||
}
|
||||
|
||||
if len(drivePath.Folders) == 0 {
|
||||
@ -115,7 +115,7 @@ func computePreviousLinkShares(
|
||||
|
||||
meta, ok := parentMetas.Load(parent.String())
|
||||
if !ok {
|
||||
return nil, clues.New("no metadata found in parent").WithClues(ictx)
|
||||
return nil, clues.NewWC(ictx, "no metadata found in parent")
|
||||
}
|
||||
|
||||
// Any change in permissions would change it to custom
|
||||
@ -126,7 +126,7 @@ func computePreviousLinkShares(
|
||||
|
||||
parent, err = parent.Dir()
|
||||
if err != nil {
|
||||
return nil, clues.New("getting parent").WithClues(ctx)
|
||||
return nil, clues.NewWC(ictx, "getting parent")
|
||||
}
|
||||
}
|
||||
|
||||
@ -156,14 +156,14 @@ func computePreviousMetadata(
|
||||
for {
|
||||
parent, err = parent.Dir()
|
||||
if err != nil {
|
||||
return metadata.Metadata{}, clues.New("getting parent").WithClues(ctx)
|
||||
return metadata.Metadata{}, clues.NewWC(ctx, "getting parent")
|
||||
}
|
||||
|
||||
ictx := clues.Add(ctx, "parent_dir", parent)
|
||||
|
||||
drivePath, err := path.ToDrivePath(parent)
|
||||
if err != nil {
|
||||
return metadata.Metadata{}, clues.New("transforming dir to drivePath").WithClues(ictx)
|
||||
return metadata.Metadata{}, clues.NewWC(ictx, "transforming dir to drivePath")
|
||||
}
|
||||
|
||||
if len(drivePath.Folders) == 0 {
|
||||
@ -172,7 +172,7 @@ func computePreviousMetadata(
|
||||
|
||||
meta, ok = parentMetas.Load(parent.String())
|
||||
if !ok {
|
||||
return metadata.Metadata{}, clues.New("no metadata found for parent folder: " + parent.String()).WithClues(ictx)
|
||||
return metadata.Metadata{}, clues.NewWC(ictx, "no metadata found for parent folder: "+parent.String())
|
||||
}
|
||||
|
||||
if meta.SharingMode == metadata.SharingModeCustom {
|
||||
@ -214,7 +214,7 @@ func UpdatePermissions(
|
||||
|
||||
pid, ok := oldPermIDToNewID.Load(p.ID)
|
||||
if !ok {
|
||||
return clues.New("no new permission id").WithClues(ctx)
|
||||
return clues.NewWC(ictx, "no new permission id")
|
||||
}
|
||||
|
||||
err := udip.DeleteItemPermission(
|
||||
|
||||
@ -69,7 +69,7 @@ func RestoreCollection(
|
||||
|
||||
drivePath, err := path.ToDrivePath(directory)
|
||||
if err != nil {
|
||||
return metrics, clues.Wrap(err, "creating drive path").WithClues(ctx)
|
||||
return metrics, clues.WrapWC(ctx, err, "creating drive path")
|
||||
}
|
||||
|
||||
di, err := ensureDriveExists(
|
||||
@ -118,7 +118,7 @@ func RestoreCollection(
|
||||
rcc.BackupVersion,
|
||||
rcc.RestoreConfig.IncludePermissions)
|
||||
if err != nil {
|
||||
return metrics, clues.Wrap(err, "getting permissions").WithClues(ctx)
|
||||
return metrics, clues.Wrap(err, "getting permissions")
|
||||
}
|
||||
|
||||
// Create restore folders and get the folder ID of the folder the data stream will be restored in
|
||||
@ -193,16 +193,16 @@ func RestoreCollection(
|
||||
defer caches.pool.Put(copyBufferPtr)
|
||||
|
||||
copyBuffer := *copyBufferPtr
|
||||
ictx := clues.Add(ctx, "restore_item_id", itemData.ID())
|
||||
ctx = clues.Add(ctx, "restore_item_id", itemData.ID())
|
||||
|
||||
itemPath, err := dc.FullPath().AppendItem(itemData.ID())
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "appending item to full path").WithClues(ictx))
|
||||
el.AddRecoverable(ctx, clues.WrapWC(ctx, err, "appending item to full path"))
|
||||
return
|
||||
}
|
||||
|
||||
itemInfo, skipped, err := restoreItem(
|
||||
ictx,
|
||||
ctx,
|
||||
rh,
|
||||
rcc,
|
||||
dc,
|
||||
@ -227,12 +227,12 @@ func RestoreCollection(
|
||||
}
|
||||
|
||||
if skipped {
|
||||
logger.Ctx(ictx).With("item_path", itemPath).Debug("did not restore item")
|
||||
logger.Ctx(ctx).With("item_path", itemPath).Debug("did not restore item")
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: implement locationRef
|
||||
updateDeets(ictx, itemPath, &path.Builder{}, itemInfo)
|
||||
updateDeets(ctx, itemPath, &path.Builder{}, itemInfo)
|
||||
|
||||
atomic.AddInt64(&metricsSuccess, 1)
|
||||
}(ctx, itemData)
|
||||
@ -312,7 +312,7 @@ func restoreItem(
|
||||
|
||||
meta, err := getMetadata(metaReader)
|
||||
if err != nil {
|
||||
return details.ItemInfo{}, true, clues.Wrap(err, "getting directory metadata").WithClues(ctx)
|
||||
return details.ItemInfo{}, true, clues.WrapWC(ctx, err, "getting directory metadata")
|
||||
}
|
||||
|
||||
trimmedPath := strings.TrimSuffix(itemPath.String(), metadata.DirMetaFileSuffix)
|
||||
@ -729,7 +729,7 @@ func restoreFile(
|
||||
// Get the stream size (needed to create the upload session)
|
||||
ss, ok := itemData.(data.ItemSize)
|
||||
if !ok {
|
||||
return "", details.ItemInfo{}, clues.New("item does not implement DataStreamInfo").WithClues(ctx)
|
||||
return "", details.ItemInfo{}, clues.NewWC(ctx, "item does not implement DataStreamInfo")
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@ -201,7 +201,7 @@ func (uc *urlCache) readCache(
|
||||
|
||||
props, ok := uc.idToProps[itemID]
|
||||
if !ok {
|
||||
return itemProps{}, clues.New("item not found in cache").WithClues(ctx)
|
||||
return itemProps{}, clues.NewWC(ctx, "item not found in cache")
|
||||
}
|
||||
|
||||
return props, nil
|
||||
|
||||
@ -99,7 +99,7 @@ func uploadAttachment(
|
||||
// Max attachment size is 150MB.
|
||||
content, err := api.GetAttachmentContent(attachment)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "serializing attachment content").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "serializing attachment content")
|
||||
}
|
||||
|
||||
_, err = ap.PostLargeAttachment(ctx, userID, containerID, parentItemID, name, content)
|
||||
|
||||
@ -46,7 +46,7 @@ func CreateCollections(
|
||||
|
||||
handler, ok := handlers[category]
|
||||
if !ok {
|
||||
return nil, clues.New("unsupported backup category type").WithClues(ctx)
|
||||
return nil, clues.NewWC(ctx, "unsupported backup category type")
|
||||
}
|
||||
|
||||
foldersComplete := observe.MessageWithCompletion(
|
||||
@ -233,7 +233,7 @@ func populateCollections(
|
||||
)
|
||||
|
||||
if collections[id] != nil {
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "conflict: tombstone exists for a live collection").WithClues(ictx))
|
||||
el.AddRecoverable(ctx, clues.WrapWC(ictx, err, "conflict: tombstone exists for a live collection"))
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@ -76,14 +76,13 @@ func getItemAndInfo(
|
||||
useImmutableIDs,
|
||||
fault.New(true)) // temporary way to force a failFast error
|
||||
if err != nil {
|
||||
return nil, nil, clues.Wrap(err, "fetching item").
|
||||
WithClues(ctx).
|
||||
return nil, nil, clues.WrapWC(ctx, err, "fetching item").
|
||||
Label(fault.LabelForceNoBackupCreation)
|
||||
}
|
||||
|
||||
itemData, err := getter.Serialize(ctx, item, userID, id)
|
||||
if err != nil {
|
||||
return nil, nil, clues.Wrap(err, "serializing item").WithClues(ctx)
|
||||
return nil, nil, clues.WrapWC(ctx, err, "serializing item")
|
||||
}
|
||||
|
||||
// In case of mail the size of itemData is calc as- size of body content+size of attachment
|
||||
@ -285,8 +284,7 @@ func (col *prefetchCollection) streamItems(
|
||||
if err != nil {
|
||||
el.AddRecoverable(
|
||||
ctx,
|
||||
clues.Stack(err).
|
||||
WithClues(ctx).
|
||||
clues.StackWC(ctx, err).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
return
|
||||
|
||||
@ -49,7 +49,7 @@ func (cfc *contactContainerCache) init(
|
||||
baseContainerPath []string,
|
||||
) error {
|
||||
if len(baseNode) == 0 {
|
||||
return clues.New("m365 folderID required for base contact folder").WithClues(ctx)
|
||||
return clues.NewWC(ctx, "m365 folderID required for base contact folder")
|
||||
}
|
||||
|
||||
if cfc.containerResolver == nil {
|
||||
@ -77,7 +77,7 @@ func (cfc *contactContainerCache) populateContactRoot(
|
||||
path.Builder{}.Append(ptr.Val(f.GetId())), // path of IDs
|
||||
path.Builder{}.Append(baseContainerPath...)) // display location
|
||||
if err := cfc.addFolder(&temp); err != nil {
|
||||
return clues.Wrap(err, "adding resolver dir").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "adding resolver dir")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@ -68,12 +68,12 @@ func (cr *containerResolver) IDToPath(
|
||||
|
||||
c, ok := cr.cache[folderID]
|
||||
if !ok {
|
||||
return nil, nil, clues.New("container not cached").WithClues(ctx)
|
||||
return nil, nil, clues.NewWC(ctx, "container not cached")
|
||||
}
|
||||
|
||||
p := c.Path()
|
||||
if p == nil {
|
||||
return nil, nil, clues.New("cached container has no path").WithClues(ctx)
|
||||
return nil, nil, clues.NewWC(ctx, "cached container has no path")
|
||||
}
|
||||
|
||||
return p, c.Location(), nil
|
||||
@ -91,7 +91,7 @@ func (cr *containerResolver) refreshContainer(
|
||||
logger.Ctx(ctx).Debug("refreshing container")
|
||||
|
||||
if cr.refresher == nil {
|
||||
return nil, false, clues.New("nil refresher").WithClues(ctx)
|
||||
return nil, false, clues.NewWC(ctx, "nil refresher")
|
||||
}
|
||||
|
||||
c, err := cr.refresher.refreshContainer(ctx, id)
|
||||
@ -100,7 +100,7 @@ func (cr *containerResolver) refreshContainer(
|
||||
return nil, true, nil
|
||||
} else if err != nil {
|
||||
// This is some other error, just return it.
|
||||
return nil, false, clues.Wrap(err, "refreshing container").WithClues(ctx)
|
||||
return nil, false, clues.WrapWC(ctx, err, "refreshing container")
|
||||
}
|
||||
|
||||
return c, false, nil
|
||||
@ -131,7 +131,7 @@ func (cr *containerResolver) recoverContainer(
|
||||
}
|
||||
|
||||
if err := cr.addFolder(c); err != nil {
|
||||
return nil, nil, false, clues.Wrap(err, "adding new container").WithClues(ctx)
|
||||
return nil, nil, false, clues.WrapWC(ctx, err, "adding new container")
|
||||
}
|
||||
|
||||
// Retry populating this container's paths.
|
||||
@ -162,11 +162,12 @@ func (cr *containerResolver) idToPath(
|
||||
|
||||
if depth >= maxIterations {
|
||||
return resolvedPath{
|
||||
idPath: nil,
|
||||
locPath: nil,
|
||||
cached: false,
|
||||
deleted: false,
|
||||
}, clues.New("path contains cycle or is too tall").WithClues(ctx)
|
||||
idPath: nil,
|
||||
locPath: nil,
|
||||
cached: false,
|
||||
deleted: false,
|
||||
},
|
||||
clues.NewWC(ctx, "path contains cycle or is too tall")
|
||||
}
|
||||
|
||||
c, ok := cr.cache[folderID]
|
||||
@ -217,7 +218,7 @@ func (cr *containerResolver) idToPath(
|
||||
locPath: nil,
|
||||
cached: true,
|
||||
deleted: false,
|
||||
}, clues.Wrap(err, "refreshing container").WithClues(ctx)
|
||||
}, clues.WrapWC(ctx, err, "refreshing container")
|
||||
}
|
||||
|
||||
if shouldDelete {
|
||||
@ -249,7 +250,7 @@ func (cr *containerResolver) idToPath(
|
||||
locPath: nil,
|
||||
cached: false,
|
||||
deleted: false,
|
||||
}, clues.Wrap(err, "updating cached container").WithClues(ctx)
|
||||
}, clues.WrapWC(ctx, err, "updating cached container")
|
||||
}
|
||||
|
||||
return cr.idToPath(ctx, folderID, depth)
|
||||
@ -378,7 +379,7 @@ func (cr *containerResolver) AddToCache(
|
||||
Container: f,
|
||||
}
|
||||
if err := cr.addFolder(temp); err != nil {
|
||||
return clues.Wrap(err, "adding cache folder").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "adding cache folder")
|
||||
}
|
||||
|
||||
// Populate the path for this entry so calls to PathInCache succeed no matter
|
||||
@ -475,13 +476,12 @@ func newRankedContainerResolver(
|
||||
|
||||
c, err := getter.GetContainerByID(ctx, userID, id)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "getting ranked container").WithClues(ictx)
|
||||
return nil, clues.WrapWC(ictx, err, "getting ranked container")
|
||||
}
|
||||
|
||||
gotID := ptr.Val(c.GetId())
|
||||
if len(gotID) == 0 {
|
||||
return nil, clues.New("ranked include container missing ID").
|
||||
WithClues(ictx)
|
||||
return nil, clues.NewWC(ictx, "ranked include container missing ID")
|
||||
}
|
||||
|
||||
cr.resolvedInclude = append(cr.resolvedInclude, gotID)
|
||||
@ -492,13 +492,12 @@ func newRankedContainerResolver(
|
||||
|
||||
c, err := getter.GetContainerByID(ctx, userID, id)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "getting exclude container").WithClues(ictx)
|
||||
return nil, clues.WrapWC(ictx, err, "getting exclude container")
|
||||
}
|
||||
|
||||
gotID := ptr.Val(c.GetId())
|
||||
if len(gotID) == 0 {
|
||||
return nil, clues.New("exclude container missing ID").
|
||||
WithClues(ictx)
|
||||
return nil, clues.NewWC(ictx, "exclude container missing ID")
|
||||
}
|
||||
|
||||
cr.resolvedExclude[gotID] = struct{}{}
|
||||
|
||||
@ -52,7 +52,7 @@ func (ecc *eventContainerCache) populateEventRoot(ctx context.Context) error {
|
||||
path.Builder{}.Append(ptr.Val(f.GetId())), // storage path
|
||||
path.Builder{}.Append(ptr.Val(f.GetDisplayName()))) // display location
|
||||
if err := ecc.addFolder(&temp); err != nil {
|
||||
return clues.Wrap(err, "initializing calendar resolver").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "initializing calendar resolver")
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -111,7 +111,7 @@ func (ecc *eventContainerCache) Populate(
|
||||
// @returns error iff the required values are not accessible.
|
||||
func (ecc *eventContainerCache) AddToCache(ctx context.Context, f graph.Container) error {
|
||||
if err := checkIDAndName(f); err != nil {
|
||||
return clues.Wrap(err, "validating container").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "validating container")
|
||||
}
|
||||
|
||||
temp := graph.NewCacheFolder(
|
||||
@ -120,7 +120,7 @@ func (ecc *eventContainerCache) AddToCache(ctx context.Context, f graph.Containe
|
||||
path.Builder{}.Append(ptr.Val(f.GetDisplayName()))) // display location
|
||||
|
||||
if err := ecc.addFolder(&temp); err != nil {
|
||||
return clues.Wrap(err, "adding container").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "adding container")
|
||||
}
|
||||
|
||||
// Populate the path for this entry so calls to PathInCache succeed no matter
|
||||
|
||||
@ -107,7 +107,7 @@ func restoreEvent(
|
||||
) (*details.ExchangeInfo, error) {
|
||||
event, err := api.BytesToEventable(body)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "creating event from bytes").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, err, "creating event from bytes")
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "item_id", ptr.Val(event.GetId()))
|
||||
@ -176,7 +176,7 @@ func restoreEvent(
|
||||
// removed cancelled and exceptions events form it
|
||||
event, err = api.BytesToEventable(body)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "creating event from bytes").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, err, "creating event from bytes")
|
||||
}
|
||||
|
||||
// Fix up event instances in case we have a recurring event
|
||||
@ -276,8 +276,7 @@ func updateAttachments(
|
||||
err = agdp.DeleteAttachment(ctx, userID, containerID, eventID, id)
|
||||
if err != nil {
|
||||
logger.CtxErr(ctx, err).With("attachment_name", name).Info("attachment delete failed")
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "deleting event attachment").
|
||||
WithClues(ctx).With("attachment_name", name))
|
||||
el.AddRecoverable(ctx, clues.WrapWC(ctx, err, "deleting event attachment").With("attachment_name", name))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -80,7 +80,7 @@ func (mc *mailContainerCache) populateMailRoot(ctx context.Context) error {
|
||||
path.Builder{}.Append(), // path of IDs
|
||||
path.Builder{}.Append()) // display location
|
||||
if err := mc.addFolder(&temp); err != nil {
|
||||
return clues.Wrap(err, "adding resolver dir").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "adding resolver dir")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@ -107,7 +107,7 @@ func restoreMail(
|
||||
) (*details.ExchangeInfo, error) {
|
||||
msg, err := api.BytesToMessageable(body)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "creating mail from bytes").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, err, "creating mail from bytes")
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "item_id", ptr.Val(msg.GetId()))
|
||||
|
||||
@ -48,7 +48,7 @@ func ParseMetadataCollections(
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, false, clues.Wrap(ctx.Err(), "parsing collection metadata").WithClues(ctx)
|
||||
return nil, false, clues.WrapWC(ctx, ctx.Err(), "parsing collection metadata")
|
||||
|
||||
case item, ok := <-items:
|
||||
if !ok || errs.Failure() != nil {
|
||||
@ -63,13 +63,13 @@ func ParseMetadataCollections(
|
||||
|
||||
err := json.NewDecoder(item.ToReader()).Decode(&m)
|
||||
if err != nil {
|
||||
return nil, false, clues.New("decoding metadata json").WithClues(ctx)
|
||||
return nil, false, clues.NewWC(ctx, "decoding metadata json")
|
||||
}
|
||||
|
||||
switch item.ID() {
|
||||
case metadata.PreviousPathFileName:
|
||||
if _, ok := found[category][metadata.PathKey]; ok {
|
||||
return nil, false, clues.Wrap(clues.New(category.String()), "multiple versions of path metadata").WithClues(ctx)
|
||||
return nil, false, clues.Wrap(clues.NewWC(ctx, category.String()), "multiple versions of path metadata")
|
||||
}
|
||||
|
||||
for k, p := range m {
|
||||
@ -80,7 +80,7 @@ func ParseMetadataCollections(
|
||||
|
||||
case metadata.DeltaURLsFileName:
|
||||
if _, ok := found[category][metadata.DeltaKey]; ok {
|
||||
return nil, false, clues.Wrap(clues.New(category.String()), "multiple versions of delta metadata").WithClues(ctx)
|
||||
return nil, false, clues.Wrap(clues.NewWC(ctx, category.String()), "multiple versions of delta metadata")
|
||||
}
|
||||
|
||||
for k, d := range m {
|
||||
|
||||
@ -54,7 +54,7 @@ func RestoreCollection(
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return metrics, clues.Wrap(ctx.Err(), "context cancelled").WithClues(ctx)
|
||||
return metrics, clues.WrapWC(ctx, ctx.Err(), "context cancelled")
|
||||
|
||||
case itemData, ok := <-items:
|
||||
if !ok || el.Failure() != nil {
|
||||
@ -69,7 +69,7 @@ func RestoreCollection(
|
||||
|
||||
_, err := buf.ReadFrom(itemData.ToReader())
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "reading item bytes").WithClues(ictx))
|
||||
el.AddRecoverable(ictx, clues.WrapWC(ictx, err, "reading item bytes"))
|
||||
continue
|
||||
}
|
||||
|
||||
@ -99,7 +99,7 @@ func RestoreCollection(
|
||||
// destination folder, then the restore path no longer matches the fullPath.
|
||||
itemPath, err := fullPath.AppendItem(itemData.ID())
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "adding item to collection path").WithClues(ctx))
|
||||
el.AddRecoverable(ictx, clues.WrapWC(ictx, err, "adding item to collection path"))
|
||||
continue
|
||||
}
|
||||
|
||||
@ -114,7 +114,7 @@ func RestoreCollection(
|
||||
if err != nil {
|
||||
// These deets additions are for cli display purposes only.
|
||||
// no need to fail out on error.
|
||||
logger.Ctx(ctx).Infow("accounting for restored item", "error", err)
|
||||
logger.Ctx(ictx).Infow("accounting for restored item", "error", err)
|
||||
}
|
||||
|
||||
colProgress <- struct{}{}
|
||||
@ -247,7 +247,7 @@ func uploadAttachments(
|
||||
continue
|
||||
}
|
||||
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "uploading mail attachment").WithClues(ctx))
|
||||
el.AddRecoverable(ctx, clues.WrapWC(ctx, err, "uploading mail attachment"))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -219,7 +219,7 @@ func populateCollections(
|
||||
)
|
||||
|
||||
if collections[id] != nil {
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "conflict: tombstone exists for a live collection").WithClues(ictx))
|
||||
el.AddRecoverable(ictx, clues.WrapWC(ictx, err, "conflict: tombstone exists for a live collection"))
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@ -183,9 +183,7 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) {
|
||||
if err != nil {
|
||||
el.AddRecoverable(
|
||||
ctx,
|
||||
clues.Stack(err).
|
||||
WithClues(ctx).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
clues.StackWC(ctx, err).Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@ -44,7 +44,7 @@ func parseMetadataCollections(
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, false, clues.Wrap(ctx.Err(), "parsing collection metadata").WithClues(ctx)
|
||||
return nil, false, clues.WrapWC(ctx, ctx.Err(), "parsing collection metadata")
|
||||
|
||||
case item, ok := <-items:
|
||||
if !ok || errs.Failure() != nil {
|
||||
@ -64,13 +64,13 @@ func parseMetadataCollections(
|
||||
|
||||
err := json.NewDecoder(item.ToReader()).Decode(&m)
|
||||
if err != nil {
|
||||
return nil, false, clues.New("decoding metadata json").WithClues(ctx)
|
||||
return nil, false, clues.NewWC(ctx, "decoding metadata json")
|
||||
}
|
||||
|
||||
switch item.ID() {
|
||||
case metadata.PreviousPathFileName:
|
||||
if _, ok := found[category][metadata.PathKey]; ok {
|
||||
return nil, false, clues.Wrap(clues.New(category.String()), "multiple versions of path metadata").WithClues(ctx)
|
||||
return nil, false, clues.Wrap(clues.NewWC(ctx, category.String()), "multiple versions of path metadata")
|
||||
}
|
||||
|
||||
for k, p := range m {
|
||||
@ -81,7 +81,7 @@ func parseMetadataCollections(
|
||||
|
||||
case metadata.DeltaURLsFileName:
|
||||
if _, ok := found[category][metadata.DeltaKey]; ok {
|
||||
return nil, false, clues.Wrap(clues.New(category.String()), "multiple versions of delta metadata").WithClues(ctx)
|
||||
return nil, false, clues.Wrap(clues.NewWC(ctx, category.String()), "multiple versions of delta metadata")
|
||||
}
|
||||
|
||||
for k, d := range m {
|
||||
|
||||
@ -102,7 +102,7 @@ func CollectPages(
|
||||
false,
|
||||
tuple.Name)
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "creating page collection path").WithClues(ctx))
|
||||
el.AddRecoverable(ctx, clues.WrapWC(ctx, err, "creating page collection path"))
|
||||
}
|
||||
|
||||
collection := NewCollection(
|
||||
@ -154,7 +154,7 @@ func CollectLists(
|
||||
false,
|
||||
tuple.Name)
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "creating list collection path").WithClues(ctx))
|
||||
el.AddRecoverable(ctx, clues.WrapWC(ctx, err, "creating list collection path"))
|
||||
}
|
||||
|
||||
collection := NewCollection(
|
||||
|
||||
@ -201,7 +201,7 @@ func (sc *Collection) retrieveLists(
|
||||
|
||||
byteArray, err := serializeContent(ctx, wtr, lst)
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "serializing list").WithClues(ctx).Label(fault.LabelForceNoBackupCreation))
|
||||
el.AddRecoverable(ctx, clues.WrapWC(ctx, err, "serializing list").Label(fault.LabelForceNoBackupCreation))
|
||||
continue
|
||||
}
|
||||
|
||||
@ -217,7 +217,7 @@ func (sc *Collection) retrieveLists(
|
||||
ptr.Val(lst.GetId()),
|
||||
details.ItemInfo{SharePoint: ListToSPInfo(lst, size)})
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.Stack(err).WithClues(ctx).Label(fault.LabelForceNoBackupCreation))
|
||||
el.AddRecoverable(ctx, clues.StackWC(ctx, err).Label(fault.LabelForceNoBackupCreation))
|
||||
continue
|
||||
}
|
||||
|
||||
@ -243,7 +243,7 @@ func (sc *Collection) retrievePages(
|
||||
|
||||
betaService := sc.betaService
|
||||
if betaService == nil {
|
||||
return metrics, clues.New("beta service required").WithClues(ctx)
|
||||
return metrics, clues.NewWC(ctx, "beta service required")
|
||||
}
|
||||
|
||||
parent, err := as.GetByID(ctx, sc.fullPath.ProtectedResource(), api.CallConfig{})
|
||||
@ -269,7 +269,7 @@ func (sc *Collection) retrievePages(
|
||||
|
||||
byteArray, err := serializeContent(ctx, wtr, pg)
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "serializing page").WithClues(ctx).Label(fault.LabelForceNoBackupCreation))
|
||||
el.AddRecoverable(ctx, clues.WrapWC(ctx, err, "serializing page").Label(fault.LabelForceNoBackupCreation))
|
||||
continue
|
||||
}
|
||||
|
||||
@ -284,7 +284,7 @@ func (sc *Collection) retrievePages(
|
||||
ptr.Val(pg.GetId()),
|
||||
details.ItemInfo{SharePoint: pageToSPInfo(pg, root, size)})
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.Stack(err).WithClues(ctx).Label(fault.LabelForceNoBackupCreation))
|
||||
el.AddRecoverable(ctx, clues.StackWC(ctx, err).Label(fault.LabelForceNoBackupCreation))
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@ -151,12 +151,12 @@ func restoreListItem(
|
||||
|
||||
byteArray, err := io.ReadAll(itemData.ToReader())
|
||||
if err != nil {
|
||||
return dii, clues.Wrap(err, "reading backup data").WithClues(ctx)
|
||||
return dii, clues.WrapWC(ctx, err, "reading backup data")
|
||||
}
|
||||
|
||||
oldList, err := betaAPI.CreateListFromBytes(byteArray)
|
||||
if err != nil {
|
||||
return dii, clues.Wrap(err, "creating item").WithClues(ctx)
|
||||
return dii, clues.WrapWC(ctx, err, "creating item")
|
||||
}
|
||||
|
||||
if name, ok := ptr.ValOK(oldList.GetDisplayName()); ok {
|
||||
@ -233,7 +233,7 @@ func RestoreListCollection(
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return metrics, clues.Stack(ctx.Err()).WithClues(ctx)
|
||||
return metrics, clues.StackWC(ctx, ctx.Err())
|
||||
|
||||
case itemData, ok := <-items:
|
||||
if !ok {
|
||||
@ -256,7 +256,7 @@ func RestoreListCollection(
|
||||
|
||||
itemPath, err := dc.FullPath().AppendItem(itemData.ID())
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "appending item to full path").WithClues(ctx))
|
||||
el.AddRecoverable(ctx, clues.WrapWC(ctx, err, "appending item to full path"))
|
||||
continue
|
||||
}
|
||||
|
||||
@ -312,7 +312,7 @@ func RestorePageCollection(
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return metrics, clues.Stack(ctx.Err()).WithClues(ctx)
|
||||
return metrics, clues.StackWC(ctx, ctx.Err())
|
||||
|
||||
case itemData, ok := <-items:
|
||||
if !ok {
|
||||
@ -335,7 +335,7 @@ func RestorePageCollection(
|
||||
|
||||
itemPath, err := dc.FullPath().AppendItem(itemData.ID())
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "appending item to full path").WithClues(ctx))
|
||||
el.AddRecoverable(ctx, clues.WrapWC(ctx, err, "appending item to full path"))
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@ -75,12 +75,12 @@ func NewController(
|
||||
|
||||
creds, err := acct.M365Config()
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "retrieving m365 account configuration").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, err, "retrieving m365 account configuration")
|
||||
}
|
||||
|
||||
ac, err := api.NewClient(creds, co, counter)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "creating api client").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, err, "creating api client")
|
||||
}
|
||||
|
||||
ctrl := Controller{
|
||||
@ -287,7 +287,7 @@ func (ctrl *Controller) PopulateProtectedResourceIDAndName(
|
||||
ins idname.Cacher,
|
||||
) (idname.Provider, error) {
|
||||
if ctrl.resourceHandler == nil {
|
||||
return nil, clues.Stack(ErrNoResourceLookup).WithClues(ctx)
|
||||
return nil, clues.StackWC(ctx, ErrNoResourceLookup)
|
||||
}
|
||||
|
||||
pr, err := ctrl.resourceHandler.GetResourceIDAndNameFrom(ctx, resourceID, ins)
|
||||
|
||||
@ -34,6 +34,6 @@ func (ctrl *Controller) DeserializeMetadataFiles(
|
||||
case path.GroupsService, path.GroupsMetadataService:
|
||||
return groups.DeserializeMetadataFiles(ctx, colls)
|
||||
default:
|
||||
return nil, clues.New("unrecognized service").With("service", service).WithClues(ctx)
|
||||
return nil, clues.NewWC(ctx, "unrecognized service").With("service", service)
|
||||
}
|
||||
}
|
||||
|
||||
@ -29,7 +29,7 @@ func ProduceBackupCollections(
|
||||
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, bool, error) {
|
||||
eb, err := bpc.Selector.ToExchangeBackup()
|
||||
if err != nil {
|
||||
return nil, nil, false, clues.Wrap(err, "exchange dataCollection selector").WithClues(ctx)
|
||||
return nil, nil, false, clues.WrapWC(ctx, err, "exchange dataCollection selector")
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@ -55,13 +55,13 @@ func ConsumeRestoreCollections(
|
||||
|
||||
handler, ok := handlers[category]
|
||||
if !ok {
|
||||
el.AddRecoverable(ctx, clues.New("unsupported restore path category").WithClues(ictx))
|
||||
el.AddRecoverable(ictx, clues.NewWC(ictx, "unsupported restore path category"))
|
||||
continue
|
||||
}
|
||||
|
||||
if directoryCache[category] == nil {
|
||||
gcr := handler.NewContainerCache(resourceID)
|
||||
if err := gcr.Populate(ctx, errs, handler.DefaultRootContainer()); err != nil {
|
||||
if err := gcr.Populate(ictx, errs, handler.DefaultRootContainer()); err != nil {
|
||||
return nil, clues.Wrap(err, "populating container cache")
|
||||
}
|
||||
|
||||
@ -76,16 +76,16 @@ func ConsumeRestoreCollections(
|
||||
directoryCache[category],
|
||||
errs)
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, err)
|
||||
el.AddRecoverable(ictx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
directoryCache[category] = gcc
|
||||
ictx = clues.Add(ictx, "restore_destination_id", containerID)
|
||||
|
||||
collisionKeyToItemID, err := handler.GetItemsInContainerByCollisionKey(ctx, resourceID, containerID)
|
||||
collisionKeyToItemID, err := handler.GetItemsInContainerByCollisionKey(ictx, resourceID, containerID)
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "building item collision cache"))
|
||||
el.AddRecoverable(ictx, clues.Wrap(err, "building item collision cache"))
|
||||
continue
|
||||
}
|
||||
|
||||
@ -108,7 +108,7 @@ func ConsumeRestoreCollections(
|
||||
break
|
||||
}
|
||||
|
||||
el.AddRecoverable(ctx, err)
|
||||
el.AddRecoverable(ictx, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -60,7 +60,7 @@ func ProduceBackupCollections(
|
||||
bpc.ProtectedResource.ID(),
|
||||
api.CallConfig{})
|
||||
if err != nil {
|
||||
return nil, nil, clues.Wrap(err, "getting group").WithClues(ctx)
|
||||
return nil, nil, clues.WrapWC(ctx, err, "getting group")
|
||||
}
|
||||
|
||||
isTeam := api.IsTeam(ctx, group)
|
||||
@ -307,9 +307,7 @@ func deserializeSiteMetadata(
|
||||
for breakLoop := false; !breakLoop; {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, clues.Wrap(
|
||||
ctx.Err(),
|
||||
"deserializing previous sites metadata").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, ctx.Err(), "deserializing previous sites metadata")
|
||||
|
||||
case item, ok := <-items:
|
||||
if !ok {
|
||||
@ -340,7 +338,7 @@ func deserializeSiteMetadata(
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err).WithClues(ictx)
|
||||
return nil, clues.StackWC(ictx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -17,7 +17,7 @@ func IsServiceEnabled(
|
||||
) (bool, error) {
|
||||
resp, err := gbi.GetByID(ctx, resource, api.CallConfig{})
|
||||
if err != nil {
|
||||
return false, clues.Wrap(err, "getting group").WithClues(ctx)
|
||||
return false, clues.WrapWC(ctx, err, "getting group")
|
||||
}
|
||||
|
||||
// according to graph api docs: https://learn.microsoft.com/en-us/graph/api/resources/group?view=graph-rest-1.0
|
||||
|
||||
@ -84,7 +84,7 @@ func (h *baseGroupsHandler) ProduceExportCollections(
|
||||
case path.LibrariesCategory:
|
||||
drivePath, err := path.ToDrivePath(restoreColl.FullPath())
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "transforming path to drive path").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, err, "transforming path to drive path")
|
||||
}
|
||||
|
||||
driveName, ok := h.backupDriveIDNames.NameOf(drivePath.DriveID)
|
||||
|
||||
@ -73,12 +73,12 @@ func ConsumeRestoreCollections(
|
||||
webURL, ok := backupSiteIDWebURL.NameOf(siteID)
|
||||
if !ok {
|
||||
// This should not happen, but just in case
|
||||
logger.Ctx(ctx).With("site_id", siteID).Info("site weburl not found, using site id")
|
||||
logger.Ctx(ictx).With("site_id", siteID).Info("site weburl not found, using site id")
|
||||
}
|
||||
|
||||
siteName, err = getSiteName(ctx, siteID, webURL, ac.Sites(), webURLToSiteNames)
|
||||
siteName, err = getSiteName(ictx, siteID, webURL, ac.Sites(), webURLToSiteNames)
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "getting site").
|
||||
el.AddRecoverable(ictx, clues.Wrap(err, "getting site").
|
||||
With("web_url", webURL, "site_id", siteID))
|
||||
} else if len(siteName) == 0 {
|
||||
// Site was deleted in between and restore and is not
|
||||
@ -95,7 +95,7 @@ func ConsumeRestoreCollections(
|
||||
Selector: rcc.Selector,
|
||||
}
|
||||
|
||||
err = caches.Populate(ctx, lrh, srcc.ProtectedResource.ID())
|
||||
err = caches.Populate(ictx, lrh, srcc.ProtectedResource.ID())
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "initializing restore caches")
|
||||
}
|
||||
@ -112,17 +112,16 @@ func ConsumeRestoreCollections(
|
||||
ctr)
|
||||
case path.ChannelMessagesCategory:
|
||||
// Message cannot be restored as of now using Graph API.
|
||||
logger.Ctx(ctx).Debug("Skipping restore for channel messages")
|
||||
logger.Ctx(ictx).Debug("Skipping restore for channel messages")
|
||||
default:
|
||||
return nil, clues.New("data category not supported").
|
||||
With("category", category).
|
||||
WithClues(ictx)
|
||||
return nil, clues.NewWC(ictx, "data category not supported").
|
||||
With("category", category)
|
||||
}
|
||||
|
||||
restoreMetrics = support.CombineMetrics(restoreMetrics, metrics)
|
||||
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, err)
|
||||
el.AddRecoverable(ictx, err)
|
||||
}
|
||||
|
||||
if errors.Is(err, context.Canceled) {
|
||||
|
||||
@ -28,7 +28,7 @@ func ProduceBackupCollections(
|
||||
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, bool, error) {
|
||||
odb, err := bpc.Selector.ToOneDriveBackup()
|
||||
if err != nil {
|
||||
return nil, nil, false, clues.Wrap(err, "parsing selector").WithClues(ctx)
|
||||
return nil, nil, false, clues.WrapWC(ctx, err, "parsing selector")
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@ -49,7 +49,7 @@ func (h *baseOnedriveHandler) ProduceExportCollections(
|
||||
for _, dc := range dcs {
|
||||
drivePath, err := path.ToDrivePath(dc.FullPath())
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "transforming path to drive path").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, err, "transforming path to drive path")
|
||||
}
|
||||
|
||||
baseDir := path.Builder{}.Append(drivePath.Folders...)
|
||||
|
||||
@ -185,13 +185,13 @@ func RestoreSitePage(
|
||||
|
||||
byteArray, err := io.ReadAll(itemData.ToReader())
|
||||
if err != nil {
|
||||
return dii, clues.Wrap(err, "reading sharepoint data").WithClues(ctx)
|
||||
return dii, clues.WrapWC(ctx, err, "reading sharepoint data")
|
||||
}
|
||||
|
||||
// Hydrate Page
|
||||
page, err := CreatePageFromBytes(byteArray)
|
||||
if err != nil {
|
||||
return dii, clues.Wrap(err, "creating Page object").WithClues(ctx)
|
||||
return dii, clues.WrapWC(ctx, err, "creating Page object")
|
||||
}
|
||||
|
||||
name, ok := ptr.ValOK(page.GetName())
|
||||
@ -217,7 +217,7 @@ func RestoreSitePage(
|
||||
// Publish page to make visible
|
||||
// See https://learn.microsoft.com/en-us/graph/api/sitepage-publish?view=graph-rest-beta
|
||||
if restoredPage.GetWebUrl() == nil {
|
||||
return dii, clues.New("webURL not populated during page creation").WithClues(ctx)
|
||||
return dii, clues.NewWC(ctx, "webURL not populated during page creation")
|
||||
}
|
||||
|
||||
err = service.Client().
|
||||
|
||||
@ -62,7 +62,7 @@ func (h *baseSharepointHandler) ProduceExportCollections(
|
||||
for _, dc := range dcs {
|
||||
drivePath, err := path.ToDrivePath(dc.FullPath())
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "transforming path to drive path").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, err, "transforming path to drive path")
|
||||
}
|
||||
|
||||
driveName, ok := h.backupDriveIDNames.NameOf(drivePath.DriveID)
|
||||
|
||||
@ -591,7 +591,7 @@ func consumeBackupCollections(
|
||||
"kopia_expected_ignored_errors", kopiaStats.ExpectedIgnoredErrorCount)
|
||||
|
||||
if kopiaStats.ErrorCount > 0 {
|
||||
err = clues.New("building kopia snapshot").WithClues(ctx)
|
||||
err = clues.NewWC(ctx, "building kopia snapshot")
|
||||
} else if kopiaStats.IgnoredErrorCount > kopiaStats.ExpectedIgnoredErrorCount {
|
||||
logger.Ctx(ctx).Info("recoverable errors were seen during backup")
|
||||
}
|
||||
@ -671,7 +671,7 @@ func mergeItemsFromBase(
|
||||
errs)
|
||||
if err != nil {
|
||||
return manifestAddedEntries,
|
||||
clues.New("fetching base details for backup").WithClues(ctx)
|
||||
clues.NewWC(ctx, "fetching base details for backup")
|
||||
}
|
||||
|
||||
for _, entry := range baseDeets.Items() {
|
||||
@ -681,8 +681,7 @@ func mergeItemsFromBase(
|
||||
|
||||
rr, err := path.FromDataLayerPath(entry.RepoRef, true)
|
||||
if err != nil {
|
||||
return manifestAddedEntries, clues.New("parsing base item info path").
|
||||
WithClues(ctx).
|
||||
return manifestAddedEntries, clues.NewWC(ctx, "parsing base item info path").
|
||||
With("repo_ref", path.LoggableDir(entry.RepoRef))
|
||||
}
|
||||
|
||||
@ -713,7 +712,7 @@ func mergeItemsFromBase(
|
||||
baseBackup.Backup.Version)
|
||||
if err != nil {
|
||||
return manifestAddedEntries,
|
||||
clues.Wrap(err, "getting updated info for entry").WithClues(ictx)
|
||||
clues.WrapWC(ictx, err, "getting updated info for entry")
|
||||
}
|
||||
|
||||
// This entry isn't merged.
|
||||
@ -731,7 +730,7 @@ func mergeItemsFromBase(
|
||||
item)
|
||||
if err != nil {
|
||||
return manifestAddedEntries,
|
||||
clues.Wrap(err, "adding item to details").WithClues(ictx)
|
||||
clues.WrapWC(ictx, err, "adding item to details")
|
||||
}
|
||||
|
||||
// Make sure we won't add this again in another base.
|
||||
@ -836,8 +835,7 @@ func mergeDetails(
|
||||
checkCount := dataFromBackup.ItemsToMerge()
|
||||
|
||||
if addedEntries != checkCount {
|
||||
return clues.New("incomplete migration of backup details").
|
||||
WithClues(ctx).
|
||||
return clues.NewWC(ctx, "incomplete migration of backup details").
|
||||
With(
|
||||
"item_count", addedEntries,
|
||||
"expected_item_count", checkCount)
|
||||
@ -918,32 +916,32 @@ func (op *BackupOperation) createBackupModels(
|
||||
// during the operation, regardless of the failure policy. Unlikely we'd
|
||||
// hit this here as the preceding code should already take care of it.
|
||||
if op.Errors.Failure() != nil {
|
||||
return clues.Wrap(op.Errors.Failure(), "non-recoverable failure").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, op.Errors.Failure(), "non-recoverable failure")
|
||||
}
|
||||
|
||||
if deets == nil {
|
||||
return clues.New("no backup details to record").WithClues(ctx)
|
||||
return clues.NewWC(ctx, "no backup details to record")
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "details_entry_count", len(deets.Entries))
|
||||
|
||||
if len(snapID) == 0 {
|
||||
return clues.New("no snapshot ID to record").WithClues(ctx)
|
||||
return clues.NewWC(ctx, "no snapshot ID to record")
|
||||
}
|
||||
|
||||
err := sscw.Collect(ctx, streamstore.DetailsCollector(deets))
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "collecting details for persistence").WithClues(ctx)
|
||||
return clues.Wrap(err, "collecting details for persistence")
|
||||
}
|
||||
|
||||
err = sscw.Collect(ctx, streamstore.FaultErrorsCollector(op.Errors.Errors()))
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "collecting errors for persistence").WithClues(ctx)
|
||||
return clues.Wrap(err, "collecting errors for persistence")
|
||||
}
|
||||
|
||||
ssid, err := sscw.Write(ctx, errs)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "persisting details and errors").WithClues(ctx)
|
||||
return clues.Wrap(err, "persisting details and errors")
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "streamstore_snapshot_id", ssid)
|
||||
@ -967,7 +965,7 @@ func (op *BackupOperation) createBackupModels(
|
||||
ssid,
|
||||
op.Options.FailureHandling,
|
||||
op.Errors) {
|
||||
return clues.New("failed preview backup").WithClues(ctx)
|
||||
return clues.NewWC(ctx, "failed preview backup")
|
||||
}
|
||||
|
||||
tags[model.BackupTypeTag] = model.PreviewBackup
|
||||
@ -988,13 +986,12 @@ func (op *BackupOperation) createBackupModels(
|
||||
tags[model.BackupTypeTag] = model.AssistBackup
|
||||
|
||||
default:
|
||||
return clues.New("unable to determine backup type due to operation errors").
|
||||
WithClues(ctx)
|
||||
return clues.NewWC(ctx, "unable to determine backup type due to operation errors")
|
||||
}
|
||||
|
||||
// Additional defensive check to make sure we tag things as expected above.
|
||||
if len(tags[model.BackupTypeTag]) == 0 {
|
||||
return clues.New("empty backup type tag").WithClues(ctx)
|
||||
return clues.NewWC(ctx, "empty backup type tag")
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, model.BackupTypeTag, tags[model.BackupTypeTag])
|
||||
@ -1015,7 +1012,7 @@ func (op *BackupOperation) createBackupModels(
|
||||
logger.Ctx(ctx).Info("creating new backup")
|
||||
|
||||
if err = op.store.Put(ctx, model.BackupSchema, b); err != nil {
|
||||
return clues.Wrap(err, "creating backup model").WithClues(ctx)
|
||||
return clues.Wrap(err, "creating backup model")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@ -50,7 +50,7 @@ func getDetailsFromBackup(
|
||||
}
|
||||
|
||||
if len(ssid) == 0 {
|
||||
return nil, clues.New("no details or errors in backup").WithClues(ctx)
|
||||
return nil, clues.NewWC(ctx, "no details or errors in backup")
|
||||
}
|
||||
|
||||
if err := detailsStore.Read(ctx, ssid, umt, errs); err != nil {
|
||||
|
||||
@ -115,8 +115,7 @@ func makeRestorePathsForEntry(
|
||||
|
||||
repoRef, err := path.FromDataLayerPath(ent.RepoRef, true)
|
||||
if err != nil {
|
||||
err = clues.Wrap(err, "parsing RepoRef").
|
||||
WithClues(ctx).
|
||||
err = clues.WrapWC(ctx, err, "parsing RepoRef").
|
||||
With("repo_ref", clues.Hide(ent.RepoRef), "location_ref", clues.Hide(ent.LocationRef))
|
||||
|
||||
return res, err
|
||||
@ -128,8 +127,7 @@ func makeRestorePathsForEntry(
|
||||
// Get the LocationRef so we can munge it onto our path.
|
||||
locRef, err := locationRef(ent, repoRef, backupVersion)
|
||||
if err != nil {
|
||||
err = clues.Wrap(err, "parsing LocationRef after reduction").
|
||||
WithClues(ctx).
|
||||
err = clues.WrapWC(ctx, err, "parsing LocationRef after reduction").
|
||||
With("location_ref", clues.Hide(ent.LocationRef))
|
||||
|
||||
return res, err
|
||||
@ -154,11 +152,11 @@ func makeRestorePathsForEntry(
|
||||
(ent.Groups != nil && ent.Groups.ItemType == details.SharePointLibrary):
|
||||
res.RestorePath, err = drivePathMerge(ent, repoRef, locRef)
|
||||
default:
|
||||
return res, clues.New("unknown entry type").WithClues(ctx)
|
||||
return res, clues.NewWC(ctx, "unknown entry type")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return res, clues.Wrap(err, "generating RestorePath").WithClues(ctx)
|
||||
return res, clues.WrapWC(ctx, err, "generating RestorePath")
|
||||
}
|
||||
|
||||
return res, nil
|
||||
|
||||
@ -243,13 +243,11 @@ func (op *RestoreOperation) do(
|
||||
op.Selectors.PathService(),
|
||||
restoreToProtectedResource.ID())
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "verifying service restore is enabled").WithClues(ctx)
|
||||
return nil, clues.Wrap(err, "verifying service restore is enabled")
|
||||
}
|
||||
|
||||
if !enabled {
|
||||
return nil, clues.Wrap(
|
||||
graph.ErrServiceNotEnabled,
|
||||
"service not enabled for restore").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, graph.ErrServiceNotEnabled, "service not enabled for restore")
|
||||
}
|
||||
|
||||
observe.Message(ctx, "Restoring", observe.Bullet, clues.Hide(restoreToProtectedResource.Name()))
|
||||
|
||||
@ -42,11 +42,11 @@ func (ms Streamer) Read(
|
||||
case streamstore.FaultErrorsType:
|
||||
mr = ms.Errors[snapshotID]
|
||||
default:
|
||||
return clues.New("unknown type: " + col.Type).WithClues(ctx)
|
||||
return clues.NewWC(ctx, "unknown type: "+col.Type)
|
||||
}
|
||||
|
||||
if mr == nil {
|
||||
return clues.New("collectable " + col.Type + " has no marshaller").WithClues(ctx)
|
||||
return clues.NewWC(ctx, "collectable "+col.Type+" has no marshaller")
|
||||
}
|
||||
|
||||
bs, err := mr.Marshal()
|
||||
|
||||
@ -173,14 +173,14 @@ func collect(
|
||||
// construct the path of the container
|
||||
p, err := path.Builder{}.ToStreamStorePath(tenantID, col.purpose, service, false)
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
return nil, clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
// TODO: We could use an io.Pipe here to avoid a double copy but that
|
||||
// makes error handling a bit complicated
|
||||
bs, err := col.mr.Marshal()
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "marshalling body").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, err, "marshalling body")
|
||||
}
|
||||
|
||||
item, err := data.NewPrefetchedItem(
|
||||
@ -188,7 +188,7 @@ func collect(
|
||||
col.itemName,
|
||||
time.Now())
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
return nil, clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
dc := streamCollection{
|
||||
@ -240,12 +240,12 @@ func read(
|
||||
Append(col.itemName).
|
||||
ToStreamStorePath(tenantID, col.purpose, service, true)
|
||||
if err != nil {
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
return clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
pd, err := p.Dir()
|
||||
if err != nil {
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
return clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "snapshot_id", snapshotID)
|
||||
@ -267,8 +267,7 @@ func read(
|
||||
|
||||
// Expect only 1 data collection
|
||||
if len(cs) != 1 {
|
||||
return clues.New("unexpected collection count").
|
||||
WithClues(ctx).
|
||||
return clues.NewWC(ctx, "unexpected collection count").
|
||||
With("collection_count", len(cs))
|
||||
}
|
||||
|
||||
@ -281,19 +280,19 @@ func read(
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return clues.New("context cancelled waiting for data").WithClues(ctx)
|
||||
return clues.NewWC(ctx, "context cancelled waiting for data")
|
||||
|
||||
case itemData, ok := <-items:
|
||||
if !ok {
|
||||
if !found {
|
||||
return clues.New("no data found").WithClues(ctx)
|
||||
return clues.NewWC(ctx, "no data found")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := col.Unmr(itemData.ToReader()); err != nil {
|
||||
return clues.Wrap(err, "unmarshalling data").WithClues(ctx)
|
||||
return clues.WrapWC(ctx, err, "unmarshalling data")
|
||||
}
|
||||
|
||||
found = true
|
||||
|
||||
@ -28,15 +28,15 @@ func ConsumeExportCollections(
|
||||
folder := filepath.Join(exportLocation, col.BasePath())
|
||||
ictx := clues.Add(ctx, "dir_name", folder)
|
||||
|
||||
for item := range col.Items(ctx) {
|
||||
for item := range col.Items(ictx) {
|
||||
if item.Error != nil {
|
||||
el.AddRecoverable(ictx, clues.Wrap(item.Error, "getting item").WithClues(ctx))
|
||||
el.AddRecoverable(ictx, clues.Wrap(item.Error, "getting item"))
|
||||
}
|
||||
|
||||
if err := writeItem(ictx, item, folder); err != nil {
|
||||
el.AddRecoverable(
|
||||
ictx,
|
||||
clues.Wrap(err, "writing item").With("file_name", item.Name).WithClues(ctx))
|
||||
clues.Wrap(err, "writing item").With("file_name", item.Name))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -60,19 +60,19 @@ func writeItem(ctx context.Context, item Item, folder string) error {
|
||||
|
||||
err := os.MkdirAll(folder, os.ModePerm)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "creating directory")
|
||||
return clues.WrapWC(ctx, err, "creating directory")
|
||||
}
|
||||
|
||||
// In case the user tries to restore to a non-clean
|
||||
// directory, we might run into collisions an fail.
|
||||
f, err := os.Create(fpath)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "creating file")
|
||||
return clues.WrapWC(ctx, err, "creating file")
|
||||
}
|
||||
|
||||
_, err = io.Copy(f, progReader)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "writing data")
|
||||
return clues.WrapWC(ctx, err, "writing data")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@ -137,7 +137,7 @@ func Example_logger_clues_standards() {
|
||||
// 2. The last func to handle a context must add the clues to the error.
|
||||
//
|
||||
// preferred
|
||||
err := clues.Wrap(err, "reason").WithClues(ctx)
|
||||
err := clues.WrapWC(ctx, err, "reason")
|
||||
// this dereference added for linter happiness
|
||||
_ = err
|
||||
|
||||
|
||||
@ -210,7 +210,7 @@ func getBackupDetails(
|
||||
}
|
||||
|
||||
if len(ssid) == 0 {
|
||||
return nil, b, clues.New("no streamstore id in backup").WithClues(ctx)
|
||||
return nil, b, clues.NewWC(ctx, "no streamstore id in backup")
|
||||
}
|
||||
|
||||
var (
|
||||
@ -276,7 +276,7 @@ func getBackupErrors(
|
||||
|
||||
ssid := b.StreamStoreID
|
||||
if len(ssid) == 0 {
|
||||
return nil, b, clues.New("missing streamstore id in backup").WithClues(ctx)
|
||||
return nil, b, clues.NewWC(ctx, "missing streamstore id in backup")
|
||||
}
|
||||
|
||||
var (
|
||||
@ -335,9 +335,7 @@ func deleteBackups(
|
||||
continue
|
||||
}
|
||||
|
||||
return clues.Stack(errWrapper(err)).
|
||||
WithClues(ctx).
|
||||
With("delete_backup_id", id)
|
||||
return clues.StackWC(ctx, errWrapper(err)).With("delete_backup_id", id)
|
||||
}
|
||||
|
||||
toDelete = append(toDelete, b.ModelStoreID)
|
||||
|
||||
@ -57,7 +57,7 @@ func (r *repository) ConnectDataProvider(
|
||||
case account.ProviderM365:
|
||||
provider, err = connectToM365(ctx, *r, pst)
|
||||
default:
|
||||
err = clues.New("unrecognized provider").WithClues(ctx)
|
||||
err = clues.NewWC(ctx, "unrecognized provider")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
||||
@ -101,7 +101,7 @@ func New(
|
||||
|
||||
bus, err := events.NewBus(ctx, st, acct.ID(), opts)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "constructing event bus").WithClues(ctx)
|
||||
return nil, clues.WrapWC(ctx, err, "constructing event bus")
|
||||
}
|
||||
|
||||
repoID := configFileRepoID
|
||||
@ -310,7 +310,7 @@ func (r *repository) setupKopia(
|
||||
if err := kopiaRef.Initialize(ctx, r.Opts.Repo, retentionOpts, repoHashName); err != nil {
|
||||
// Replace common internal errors so that SDK users can check results with errors.Is()
|
||||
if errors.Is(err, kopia.ErrorRepoAlreadyExists) {
|
||||
return clues.Stack(ErrorRepoAlreadyExists, err).WithClues(ctx)
|
||||
return clues.Stack(ErrorRepoAlreadyExists, err)
|
||||
}
|
||||
|
||||
return clues.Wrap(err, "initializing kopia")
|
||||
@ -326,12 +326,12 @@ func (r *repository) setupKopia(
|
||||
|
||||
r.dataLayer, err = kopia.NewWrapper(kopiaRef)
|
||||
if err != nil {
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
return clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
r.modelStore, err = kopia.NewModelStore(kopiaRef)
|
||||
if err != nil {
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
return clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
if r.ID == events.RepoIDNotFound {
|
||||
|
||||
@ -540,7 +540,7 @@ func reduce[T scopeT, C categoryT](
|
||||
|
||||
repoPath, err := path.FromDataLayerPath(ent.RepoRef, true)
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "transforming repoRef to path").WithClues(ictx))
|
||||
el.AddRecoverable(ictx, clues.WrapWC(ictx, err, "transforming repoRef to path"))
|
||||
continue
|
||||
}
|
||||
|
||||
@ -563,7 +563,7 @@ func reduce[T scopeT, C categoryT](
|
||||
|
||||
pv, err := dc.pathValues(repoPath, *ent, s.Cfg)
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "getting path values").WithClues(ictx))
|
||||
el.AddRecoverable(ictx, clues.WrapWC(ictx, err, "getting path values"))
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@ -77,12 +77,12 @@ func (c Channels) GetChannelByName(
|
||||
Channels().
|
||||
Get(ctx, options)
|
||||
if err != nil {
|
||||
return nil, graph.Stack(ctx, err).WithClues(ctx)
|
||||
return nil, graph.Stack(ctx, err)
|
||||
}
|
||||
|
||||
gv := resp.GetValue()
|
||||
if len(gv) == 0 {
|
||||
return nil, clues.New("channel not found").WithClues(ctx)
|
||||
return nil, clues.NewWC(ctx, "channel not found")
|
||||
}
|
||||
|
||||
// We only allow the api to match one channel with the provided name.
|
||||
@ -93,7 +93,7 @@ func (c Channels) GetChannelByName(
|
||||
cal := gv[0]
|
||||
|
||||
if err := checkIDAndName(cal); err != nil {
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
return nil, clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
return cal, nil
|
||||
|
||||
@ -128,29 +128,28 @@ func (c Contacts) GetContainerByName(
|
||||
ContactFolders().
|
||||
Get(ctx, options)
|
||||
if err != nil {
|
||||
return nil, graph.Stack(ctx, err).WithClues(ctx)
|
||||
return nil, graph.Stack(ctx, err)
|
||||
}
|
||||
|
||||
gv := resp.GetValue()
|
||||
|
||||
if len(gv) == 0 {
|
||||
return nil, clues.New("container not found").WithClues(ctx)
|
||||
return nil, clues.NewWC(ctx, "container not found")
|
||||
}
|
||||
|
||||
// We only allow the api to match one container with the provided name.
|
||||
// Return an error if multiple container exist (unlikely) or if no container
|
||||
// is found.
|
||||
if len(gv) != 1 {
|
||||
return nil, clues.Stack(graph.ErrMultipleResultsMatchIdentifier).
|
||||
With("returned_container_count", len(gv)).
|
||||
WithClues(ctx)
|
||||
return nil, clues.StackWC(ctx, graph.ErrMultipleResultsMatchIdentifier).
|
||||
With("returned_container_count", len(gv))
|
||||
}
|
||||
|
||||
// Sanity check ID and name
|
||||
container := gv[0]
|
||||
|
||||
if err := graph.CheckIDAndName(container); err != nil {
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
return nil, clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
return container, nil
|
||||
|
||||
@ -137,13 +137,13 @@ func (c Events) GetContainerByName(
|
||||
Calendars().
|
||||
Get(ctx, options)
|
||||
if err != nil {
|
||||
return nil, graph.Stack(ctx, err).WithClues(ctx)
|
||||
return nil, graph.Stack(ctx, err)
|
||||
}
|
||||
|
||||
gv := resp.GetValue()
|
||||
|
||||
if len(gv) == 0 {
|
||||
return nil, clues.New("container not found").WithClues(ctx)
|
||||
return nil, clues.NewWC(ctx, "container not found")
|
||||
}
|
||||
|
||||
// We only allow the api to match one calendar with the provided name.
|
||||
@ -155,7 +155,7 @@ func (c Events) GetContainerByName(
|
||||
container := graph.CalendarDisplayable{Calendarable: cal}
|
||||
|
||||
if err := graph.CheckIDAndName(container); err != nil {
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
return nil, clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
return container, nil
|
||||
@ -546,7 +546,7 @@ func (c Events) PostLargeAttachment(
|
||||
|
||||
_, err = io.CopyBuffer(w, bytes.NewReader(content), copyBuffer)
|
||||
if err != nil {
|
||||
return "", clues.Wrap(err, "buffering large attachment content").WithClues(ctx)
|
||||
return "", clues.WrapWC(ctx, err, "buffering large attachment content")
|
||||
}
|
||||
|
||||
return w.ID, nil
|
||||
|
||||
@ -82,8 +82,8 @@ func BaseCollections(
|
||||
full, err := path.BuildPrefix(tenant, rOwner, service, cat)
|
||||
if err != nil {
|
||||
// Shouldn't happen.
|
||||
err = clues.Wrap(err, "making path").WithClues(ictx)
|
||||
el.AddRecoverable(ctx, err)
|
||||
err = clues.WrapWC(ictx, err, "making path")
|
||||
el.AddRecoverable(ictx, err)
|
||||
lastErr = err
|
||||
|
||||
continue
|
||||
|
||||
@ -373,7 +373,7 @@ func Wrap(ctx context.Context, e error, msg string) *clues.Err {
|
||||
|
||||
var oDataError odataerrors.ODataErrorable
|
||||
if !errors.As(e, &oDataError) {
|
||||
return clues.Wrap(e, msg).WithClues(ctx).WithTrace(1)
|
||||
return clues.WrapWC(ctx, e, msg).WithTrace(1)
|
||||
}
|
||||
|
||||
mainMsg, data, innerMsg := errData(oDataError)
|
||||
@ -382,7 +382,7 @@ func Wrap(ctx context.Context, e error, msg string) *clues.Err {
|
||||
e = clues.Stack(e, clues.New(mainMsg))
|
||||
}
|
||||
|
||||
ce := clues.Wrap(e, msg).WithClues(ctx).With(data...).WithTrace(1)
|
||||
ce := clues.WrapWC(ctx, e, msg).With(data...).WithTrace(1)
|
||||
|
||||
return setLabels(ce, innerMsg)
|
||||
}
|
||||
@ -396,7 +396,7 @@ func Stack(ctx context.Context, e error) *clues.Err {
|
||||
|
||||
var oDataError *odataerrors.ODataError
|
||||
if !errors.As(e, &oDataError) {
|
||||
return clues.Stack(e).WithClues(ctx).WithTrace(1)
|
||||
return clues.StackWC(ctx, e).WithTrace(1)
|
||||
}
|
||||
|
||||
mainMsg, data, innerMsg := errData(oDataError)
|
||||
@ -405,7 +405,7 @@ func Stack(ctx context.Context, e error) *clues.Err {
|
||||
e = clues.Stack(e, clues.New(mainMsg))
|
||||
}
|
||||
|
||||
ce := clues.Stack(e).WithClues(ctx).With(data...).WithTrace(1)
|
||||
ce := clues.StackWC(ctx, e).With(data...).WithTrace(1)
|
||||
|
||||
return setLabels(ce, innerMsg)
|
||||
}
|
||||
|
||||
@ -227,7 +227,7 @@ func (mw RetryMiddleware) retryRequest(
|
||||
case <-ctx.Done():
|
||||
// Don't retry if the context is marked as done, it will just error out
|
||||
// when we attempt to send the retry anyway.
|
||||
return resp, clues.Stack(ctx.Err()).WithClues(ctx)
|
||||
return resp, clues.StackWC(ctx, ctx.Err())
|
||||
|
||||
case <-timer.C:
|
||||
}
|
||||
|
||||
@ -365,17 +365,17 @@ func (aw *adapterWrap) Send(
|
||||
for i := 0; i < aw.config.maxConnectionRetries+1; i++ {
|
||||
ictx := clues.Add(ctx, "request_retry_iter", i)
|
||||
|
||||
sp, err = aw.RequestAdapter.Send(ctx, requestInfo, constructor, errorMappings)
|
||||
sp, err = aw.RequestAdapter.Send(ictx, requestInfo, constructor, errorMappings)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
if IsErrApplicationThrottled(err) {
|
||||
return nil, clues.Stack(ErrApplicationThrottled, err).WithTrace(1).WithClues(ictx)
|
||||
return nil, clues.StackWC(ictx, ErrApplicationThrottled, err).WithTrace(1)
|
||||
}
|
||||
|
||||
if !IsErrConnectionReset(err) && !connectionEnded.Compare(err.Error()) {
|
||||
return nil, clues.Stack(err).WithTrace(1).WithClues(ictx)
|
||||
return nil, clues.StackWC(ictx, err).WithTrace(1)
|
||||
}
|
||||
|
||||
logger.Ctx(ictx).Debug("http connection error")
|
||||
|
||||
@ -185,9 +185,9 @@ func getGroupFromResponse(ctx context.Context, resp models.GroupCollectionRespon
|
||||
vs := resp.GetValue()
|
||||
|
||||
if len(vs) == 0 {
|
||||
return nil, clues.Stack(graph.ErrResourceOwnerNotFound).WithClues(ctx)
|
||||
return nil, clues.StackWC(ctx, graph.ErrResourceOwnerNotFound)
|
||||
} else if len(vs) > 1 {
|
||||
return nil, clues.Stack(graph.ErrMultipleResultsMatchIdentifier).WithClues(ctx)
|
||||
return nil, clues.StackWC(ctx, graph.ErrMultipleResultsMatchIdentifier)
|
||||
}
|
||||
|
||||
return vs[0], nil
|
||||
@ -216,7 +216,7 @@ func (c Groups) GetAllSites(
|
||||
identifier,
|
||||
CallConfig{})
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "getting group").WithClues(ctx)
|
||||
return nil, clues.Wrap(err, "getting group")
|
||||
}
|
||||
|
||||
isTeam := IsTeam(ctx, group)
|
||||
@ -256,8 +256,7 @@ func (c Groups) GetAllSites(
|
||||
FilesFolder().
|
||||
Get(ictx, nil)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "getting files folder for channel").
|
||||
WithClues(ictx)
|
||||
return nil, clues.WrapWC(ictx, err, "getting files folder for channel")
|
||||
}
|
||||
|
||||
// WebURL returned here is the url to the documents folder, we
|
||||
@ -267,8 +266,7 @@ func (c Groups) GetAllSites(
|
||||
|
||||
u, err := url.Parse(documentWebURL)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "parsing document web url").
|
||||
WithClues(ictx)
|
||||
return nil, clues.WrapWC(ictx, err, "parsing document web url")
|
||||
}
|
||||
|
||||
pathSegments := strings.Split(u.Path, "/") // pathSegments[0] == ""
|
||||
@ -278,7 +276,7 @@ func (c Groups) GetAllSites(
|
||||
|
||||
site, err := Sites(c).GetByID(ictx, siteWebURL, CallConfig{})
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "getting site"))
|
||||
el.AddRecoverable(ictx, clues.Wrap(err, "getting site"))
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@ -47,7 +47,7 @@ func (c Lists) PostDrive(
|
||||
|
||||
newList, err := builder.Post(ctx, list, nil)
|
||||
if graph.IsErrItemAlreadyExistsConflict(err) {
|
||||
return nil, clues.Stack(graph.ErrItemAlreadyExistsConflict, err).WithClues(ctx)
|
||||
return nil, clues.StackWC(ctx, graph.ErrItemAlreadyExistsConflict, err)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
||||
@ -156,29 +156,28 @@ func (c Mail) GetContainerByName(
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, graph.Stack(ctx, err).WithClues(ctx)
|
||||
return nil, graph.Stack(ctx, err)
|
||||
}
|
||||
|
||||
gv := resp.GetValue()
|
||||
|
||||
if len(gv) == 0 {
|
||||
return nil, clues.New("container not found").WithClues(ctx)
|
||||
return nil, clues.NewWC(ctx, "container not found")
|
||||
}
|
||||
|
||||
// We only allow the api to match one container with the provided name.
|
||||
// Return an error if multiple container exist (unlikely) or if no container
|
||||
// is found.
|
||||
if len(gv) != 1 {
|
||||
return nil, clues.Stack(graph.ErrMultipleResultsMatchIdentifier).
|
||||
With("returned_container_count", len(gv)).
|
||||
WithClues(ctx)
|
||||
return nil, clues.StackWC(ctx, graph.ErrMultipleResultsMatchIdentifier).
|
||||
With("returned_container_count", len(gv))
|
||||
}
|
||||
|
||||
// Sanity check ID and name
|
||||
container := gv[0]
|
||||
|
||||
if err := graph.CheckIDAndName(container); err != nil {
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
return nil, clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
return container, nil
|
||||
@ -406,7 +405,7 @@ func (c Mail) PostItem(
|
||||
}
|
||||
|
||||
if itm == nil {
|
||||
return nil, clues.New("nil response mail message creation").WithClues(ctx)
|
||||
return nil, clues.NewWC(ctx, "nil response mail message creation")
|
||||
}
|
||||
|
||||
return itm, nil
|
||||
@ -513,7 +512,7 @@ func (c Mail) PostLargeAttachment(
|
||||
|
||||
_, err = io.CopyBuffer(w, bytes.NewReader(content), copyBuffer)
|
||||
if err != nil {
|
||||
return "", clues.Wrap(err, "buffering large attachment content").WithClues(ctx)
|
||||
return "", clues.WrapWC(ctx, err, "buffering large attachment content")
|
||||
}
|
||||
|
||||
return w.ID, nil
|
||||
|
||||
@ -453,8 +453,7 @@ func batchWithMaxItemCount[T any](
|
||||
// cancel the pager because it should see the context cancellation once we
|
||||
// stop attempting to fetch the next page.
|
||||
if ctx.Err() != nil {
|
||||
return nil, nil, DeltaUpdate{}, clues.Stack(ctx.Err(), context.Cause(ctx)).
|
||||
WithClues(ctx)
|
||||
return nil, nil, DeltaUpdate{}, clues.StackWC(ctx, ctx.Err(), context.Cause(ctx))
|
||||
}
|
||||
|
||||
// Get the next page first thing in the loop instead of last thing so we
|
||||
|
||||
@ -34,7 +34,7 @@ func makeAC(
|
||||
|
||||
creds, err := acct.M365Config()
|
||||
if err != nil {
|
||||
return api.Client{}, clues.Wrap(err, "getting m365 account creds").WithClues(ctx)
|
||||
return api.Client{}, clues.WrapWC(ctx, err, "getting m365 account creds")
|
||||
}
|
||||
|
||||
cli, err := api.NewClient(
|
||||
@ -42,7 +42,7 @@ func makeAC(
|
||||
control.DefaultOptions(),
|
||||
count.New())
|
||||
if err != nil {
|
||||
return api.Client{}, clues.Wrap(err, "constructing api client").WithClues(ctx)
|
||||
return api.Client{}, clues.WrapWC(ctx, err, "constructing api client")
|
||||
}
|
||||
|
||||
return cli, nil
|
||||
|
||||
@ -108,7 +108,7 @@ func usersNoInfo(ctx context.Context, acct account.Account, errs *fault.Bus) ([]
|
||||
func UserAssignedLicenses(ctx context.Context, acct account.Account, userID string) (int, error) {
|
||||
ac, err := makeAC(ctx, acct, path.UnknownService)
|
||||
if err != nil {
|
||||
return 0, clues.Stack(err).WithClues(ctx)
|
||||
return 0, clues.Stack(err)
|
||||
}
|
||||
|
||||
us, err := ac.Users().GetByID(
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user