Fix shadowing lint errors
Rename errs -> bus when referring to the fault.Bus.
This commit is contained in:
parent
0ce35f4f62
commit
1ad135bd93
@ -174,7 +174,7 @@ func genericCreateCommand(
|
||||
) error {
|
||||
var (
|
||||
bIDs []string
|
||||
errs = []error{}
|
||||
bus = []error{}
|
||||
)
|
||||
|
||||
for _, discSel := range selectorSet {
|
||||
@ -187,7 +187,7 @@ func genericCreateCommand(
|
||||
|
||||
bo, err := r.NewBackupWithLookup(ictx, discSel, ins)
|
||||
if err != nil {
|
||||
errs = append(errs, clues.Wrap(err, owner).WithClues(ictx))
|
||||
bus = append(bus, clues.Wrap(err, owner).WithClues(ictx))
|
||||
Errf(ictx, "%v\n", err)
|
||||
|
||||
continue
|
||||
@ -208,7 +208,7 @@ func genericCreateCommand(
|
||||
continue
|
||||
}
|
||||
|
||||
errs = append(errs, clues.Wrap(err, owner).WithClues(ictx))
|
||||
bus = append(bus, clues.Wrap(err, owner).WithClues(ictx))
|
||||
Errf(ictx, "%v\n", err)
|
||||
|
||||
continue
|
||||
@ -235,10 +235,10 @@ func genericCreateCommand(
|
||||
|
||||
backup.PrintAll(ctx, bups)
|
||||
|
||||
if len(errs) > 0 {
|
||||
sb := fmt.Sprintf("%d of %d backups failed:\n", len(errs), len(selectorSet))
|
||||
if len(bus) > 0 {
|
||||
sb := fmt.Sprintf("%d of %d backups failed:\n", len(bus), len(selectorSet))
|
||||
|
||||
for i, e := range errs {
|
||||
for i, e := range bus {
|
||||
logger.CtxErr(ctx, e).Errorf("Backup %d of %d failed", i+1, len(selectorSet))
|
||||
sb += "∙ " + e.Error() + "\n"
|
||||
}
|
||||
@ -305,13 +305,13 @@ func genericListCommand(
|
||||
defer utils.CloseRepo(ctx, r)
|
||||
|
||||
if len(bID) > 0 {
|
||||
fe, b, errs := r.GetBackupErrors(ctx, bID)
|
||||
if errs.Failure() != nil {
|
||||
if errors.Is(errs.Failure(), errs.NotFound) {
|
||||
fe, b, bus := r.GetBackupErrors(ctx, bID)
|
||||
if bus.Failure() != nil {
|
||||
if errors.Is(bus.Failure(), errs.NotFound) {
|
||||
return Only(ctx, clues.New("No backup exists with the id "+bID))
|
||||
}
|
||||
|
||||
return Only(ctx, clues.Wrap(errs.Failure(), "Failed to list backup id "+bID))
|
||||
return Only(ctx, clues.Wrap(bus.Failure(), "Failed to list backup id "+bID))
|
||||
}
|
||||
|
||||
b.Print(ctx)
|
||||
@ -367,21 +367,21 @@ func genericDetailsCore(
|
||||
|
||||
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
|
||||
|
||||
d, _, errs := bg.GetBackupDetails(ctx, backupID)
|
||||
d, _, bus := bg.GetBackupDetails(ctx, backupID)
|
||||
// TODO: log/track recoverable errors
|
||||
if errs.Failure() != nil {
|
||||
if errors.Is(errs.Failure(), errs.NotFound) {
|
||||
if bus.Failure() != nil {
|
||||
if errors.Is(bus.Failure(), errs.NotFound) {
|
||||
return nil, clues.New("no backup exists with the id " + backupID)
|
||||
}
|
||||
|
||||
return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository")
|
||||
return nil, clues.Wrap(bus.Failure(), "Failed to get backup details in the repository")
|
||||
}
|
||||
|
||||
if opts.SkipReduce {
|
||||
return d, nil
|
||||
}
|
||||
|
||||
d, err := sel.Reduce(ctx, d, errs)
|
||||
d, err := sel.Reduce(ctx, d, bus)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "filtering backup details to selection")
|
||||
}
|
||||
|
||||
@ -137,14 +137,14 @@ func NewUnindexedLazyItem(
|
||||
itemGetter ItemDataGetter,
|
||||
itemID string,
|
||||
modTime time.Time,
|
||||
errs *fault.Bus,
|
||||
bus *fault.Bus,
|
||||
) *unindexedLazyItem {
|
||||
return &unindexedLazyItem{
|
||||
ctx: ctx,
|
||||
id: itemID,
|
||||
itemGetter: itemGetter,
|
||||
modTime: modTime,
|
||||
errs: errs,
|
||||
bus: bus,
|
||||
}
|
||||
}
|
||||
|
||||
@ -158,7 +158,7 @@ type unindexedLazyItem struct {
|
||||
ctx context.Context
|
||||
mu sync.Mutex
|
||||
id string
|
||||
errs *fault.Bus
|
||||
bus *fault.Bus
|
||||
itemGetter ItemDataGetter
|
||||
|
||||
modTime time.Time
|
||||
@ -186,7 +186,7 @@ func (i *unindexedLazyItem) ToReader() io.ReadCloser {
|
||||
i.mu.Lock()
|
||||
defer i.mu.Unlock()
|
||||
|
||||
reader, info, delInFlight, err := i.itemGetter.GetData(i.ctx, i.errs)
|
||||
reader, info, delInFlight, err := i.itemGetter.GetData(i.ctx, i.bus)
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err)
|
||||
}
|
||||
@ -233,7 +233,7 @@ func NewLazyItem(
|
||||
itemGetter ItemDataGetter,
|
||||
itemID string,
|
||||
modTime time.Time,
|
||||
errs *fault.Bus,
|
||||
bus *fault.Bus,
|
||||
) *lazyItem {
|
||||
return &lazyItem{
|
||||
unindexedLazyItem: NewUnindexedLazyItem(
|
||||
@ -241,7 +241,7 @@ func NewLazyItem(
|
||||
itemGetter,
|
||||
itemID,
|
||||
modTime,
|
||||
errs),
|
||||
bus),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -192,12 +192,12 @@ func (mid *mockItemDataGetter) check(t *testing.T, expectCalled bool) {
|
||||
|
||||
func (mid *mockItemDataGetter) GetData(
|
||||
ctx context.Context,
|
||||
errs *fault.Bus,
|
||||
bus *fault.Bus,
|
||||
) (io.ReadCloser, *details.ItemInfo, bool, error) {
|
||||
mid.getCalled = true
|
||||
|
||||
if mid.err != nil {
|
||||
errs.AddRecoverable(ctx, mid.err)
|
||||
bus.AddRecoverable(ctx, mid.err)
|
||||
}
|
||||
|
||||
return mid.reader, mid.info, mid.delInFlight, mid.err
|
||||
@ -288,7 +288,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
errs := fault.New(true)
|
||||
bus := fault.New(true)
|
||||
|
||||
defer test.mid.check(t, true)
|
||||
|
||||
@ -297,7 +297,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
|
||||
test.mid,
|
||||
id,
|
||||
now,
|
||||
errs)
|
||||
bus)
|
||||
|
||||
assert.Equal(t, id, item.ID(), "ID")
|
||||
assert.False(t, item.Deleted(), "deleted")
|
||||
@ -325,7 +325,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
|
||||
_, err = item.Info()
|
||||
test.infoErr(t, err, "Info(): %v", clues.ToCore(err))
|
||||
|
||||
e := errs.Errors()
|
||||
e := bus.Errors()
|
||||
|
||||
if !test.expectBusErr {
|
||||
assert.Nil(t, e.Failure, "hard failure")
|
||||
@ -350,12 +350,12 @@ func (suite *ItemUnitSuite) TestLazyItem_DeletedInFlight() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
errs := fault.New(true)
|
||||
bus := fault.New(true)
|
||||
|
||||
mid := &mockItemDataGetter{delInFlight: true}
|
||||
defer mid.check(t, true)
|
||||
|
||||
item := data.NewLazyItem(ctx, mid, id, now, errs)
|
||||
item := data.NewLazyItem(ctx, mid, id, now, bus)
|
||||
|
||||
assert.Equal(t, id, item.ID(), "ID")
|
||||
assert.False(t, item.Deleted(), "deleted")
|
||||
@ -379,7 +379,7 @@ func (suite *ItemUnitSuite) TestLazyItem_DeletedInFlight() {
|
||||
_, err = item.Info()
|
||||
assert.ErrorIs(t, err, errs.NotFound, "Info() error")
|
||||
|
||||
e := errs.Errors()
|
||||
e := bus.Errors()
|
||||
|
||||
assert.Nil(t, e.Failure, "hard failure")
|
||||
assert.Empty(t, e.Recovered, "recovered")
|
||||
@ -396,12 +396,12 @@ func (suite *ItemUnitSuite) TestLazyItem_InfoBeforeReadErrors() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
errs := fault.New(true)
|
||||
bus := fault.New(true)
|
||||
|
||||
mid := &mockItemDataGetter{}
|
||||
defer mid.check(t, false)
|
||||
|
||||
item := data.NewLazyItem(ctx, mid, id, now, errs)
|
||||
item := data.NewLazyItem(ctx, mid, id, now, bus)
|
||||
|
||||
assert.Equal(t, id, item.ID(), "ID")
|
||||
assert.False(t, item.Deleted(), "deleted")
|
||||
|
||||
@ -93,13 +93,13 @@ type Collection struct {
|
||||
AuxItems map[string]data.Item
|
||||
}
|
||||
|
||||
func (c Collection) Items(ctx context.Context, errs *fault.Bus) <-chan data.Item {
|
||||
func (c Collection) Items(ctx context.Context, bus *fault.Bus) <-chan data.Item {
|
||||
ch := make(chan data.Item)
|
||||
|
||||
go func() {
|
||||
defer close(ch)
|
||||
|
||||
el := errs.Local()
|
||||
el := bus.Local()
|
||||
|
||||
for _, item := range c.ItemData {
|
||||
it, ok := item.(*Item)
|
||||
@ -113,7 +113,7 @@ func (c Collection) Items(ctx context.Context, errs *fault.Bus) <-chan data.Item
|
||||
}()
|
||||
|
||||
for _, err := range c.ItemsRecoverableErrs {
|
||||
errs.AddRecoverable(ctx, err)
|
||||
bus.AddRecoverable(ctx, err)
|
||||
}
|
||||
|
||||
return ch
|
||||
@ -207,13 +207,13 @@ type unversionedRestoreCollection struct {
|
||||
|
||||
func (c *unversionedRestoreCollection) Items(
|
||||
ctx context.Context,
|
||||
errs *fault.Bus,
|
||||
bus *fault.Bus,
|
||||
) <-chan data.Item {
|
||||
res := make(chan data.Item)
|
||||
go func() {
|
||||
defer close(res)
|
||||
|
||||
for item := range c.RestoreCollection.Items(ctx, errs) {
|
||||
for item := range c.RestoreCollection.Items(ctx, bus) {
|
||||
r, err := readers.NewVersionedRestoreReader(item.ToReader())
|
||||
require.NoError(c.t, err, clues.ToCore(err))
|
||||
|
||||
@ -249,13 +249,13 @@ type versionedBackupCollection struct {
|
||||
|
||||
func (c *versionedBackupCollection) Items(
|
||||
ctx context.Context,
|
||||
errs *fault.Bus,
|
||||
bus *fault.Bus,
|
||||
) <-chan data.Item {
|
||||
res := make(chan data.Item)
|
||||
go func() {
|
||||
defer close(res)
|
||||
|
||||
for item := range c.BackupCollection.Items(ctx, errs) {
|
||||
for item := range c.BackupCollection.Items(ctx, bus) {
|
||||
r, err := readers.NewVersionedBackupReader(
|
||||
readers.SerializationFormat{
|
||||
Version: readers.DefaultSerializationVersion,
|
||||
|
||||
@ -31,11 +31,11 @@ type kopiaDataCollection struct {
|
||||
|
||||
func (kdc *kopiaDataCollection) Items(
|
||||
ctx context.Context,
|
||||
errs *fault.Bus,
|
||||
bus *fault.Bus,
|
||||
) <-chan data.Item {
|
||||
var (
|
||||
res = make(chan data.Item)
|
||||
el = errs.Local()
|
||||
el = bus.Local()
|
||||
loadCount = 0
|
||||
)
|
||||
|
||||
|
||||
@ -64,7 +64,7 @@ func (mc mergeCollection) FullPath() path.Path {
|
||||
|
||||
func (mc *mergeCollection) Items(
|
||||
ctx context.Context,
|
||||
errs *fault.Bus,
|
||||
bus *fault.Bus,
|
||||
) <-chan data.Item {
|
||||
res := make(chan data.Item)
|
||||
|
||||
@ -83,7 +83,7 @@ func (mc *mergeCollection) Items(
|
||||
"merged_collection_storage_path", path.LoggableDir(c.storagePath))
|
||||
logger.Ctx(ictx).Debug("sending items from merged collection")
|
||||
|
||||
for item := range c.Items(ictx, errs) {
|
||||
for item := range c.Items(ictx, bus) {
|
||||
res <- item
|
||||
}
|
||||
}
|
||||
|
||||
@ -60,7 +60,7 @@ type corsoProgress struct {
|
||||
toMerge *mergeDetails
|
||||
mu sync.RWMutex
|
||||
totalBytes int64
|
||||
errs *fault.Bus
|
||||
bus *fault.Bus
|
||||
// expectedIgnoredErrors is a count of error cases caught in the Error wrapper
|
||||
// which are well known and actually ignorable. At the end of a run, if the
|
||||
// manifest ignored error count is equal to this count, then everything is good.
|
||||
@ -109,7 +109,7 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
|
||||
// never had to materialize their details in-memory.
|
||||
if d.infoer == nil || d.cached {
|
||||
if d.prevPath == nil {
|
||||
cp.errs.AddRecoverable(ctx, clues.New("finished file sourced from previous backup with no previous path").
|
||||
cp.bus.AddRecoverable(ctx, clues.New("finished file sourced from previous backup with no previous path").
|
||||
WithClues(ctx).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
@ -125,7 +125,7 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
|
||||
d.repoPath,
|
||||
d.locationPath)
|
||||
if err != nil {
|
||||
cp.errs.AddRecoverable(ctx, clues.Wrap(err, "adding finished file to merge list").
|
||||
cp.bus.AddRecoverable(ctx, clues.Wrap(err, "adding finished file to merge list").
|
||||
WithClues(ctx).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
}
|
||||
@ -139,13 +139,13 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
|
||||
// adding it to details since there's no data for it.
|
||||
return
|
||||
} else if err != nil {
|
||||
cp.errs.AddRecoverable(ctx, clues.Wrap(err, "getting ItemInfo").
|
||||
cp.bus.AddRecoverable(ctx, clues.Wrap(err, "getting ItemInfo").
|
||||
WithClues(ctx).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
return
|
||||
} else if !ptr.Val(d.modTime).Equal(info.Modified()) {
|
||||
cp.errs.AddRecoverable(ctx, clues.New("item modTime mismatch").
|
||||
cp.bus.AddRecoverable(ctx, clues.New("item modTime mismatch").
|
||||
WithClues(ctx).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
@ -154,7 +154,7 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
|
||||
|
||||
err = cp.deets.Add(d.repoPath, d.locationPath, info)
|
||||
if err != nil {
|
||||
cp.errs.AddRecoverable(ctx, clues.Wrap(err, "adding finished file to details").
|
||||
cp.bus.AddRecoverable(ctx, clues.Wrap(err, "adding finished file to details").
|
||||
WithClues(ctx).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
@ -218,7 +218,7 @@ func (cp *corsoProgress) Error(relpath string, err error, isIgnored bool) {
|
||||
|
||||
defer cp.UploadProgress.Error(relpath, err, isIgnored)
|
||||
|
||||
cp.errs.AddRecoverable(cp.ctx, clues.Wrap(err, "kopia reported error").
|
||||
cp.bus.AddRecoverable(cp.ctx, clues.Wrap(err, "kopia reported error").
|
||||
With("is_ignored", isIgnored, "relative_path", relpath).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
}
|
||||
@ -252,7 +252,7 @@ func collectionEntries(
|
||||
// Track which items have already been seen so we can skip them if we see
|
||||
// them again in the data from the base snapshot.
|
||||
seen = map[string]struct{}{}
|
||||
items = streamedEnts.Items(ctx, progress.errs)
|
||||
items = streamedEnts.Items(ctx, progress.bus)
|
||||
)
|
||||
|
||||
if lp, ok := streamedEnts.(data.LocationPather); ok {
|
||||
@ -290,7 +290,7 @@ func collectionEntries(
|
||||
itemPath, err := streamedEnts.FullPath().AppendItem(e.ID())
|
||||
if err != nil {
|
||||
err = clues.Wrap(err, "getting full item path")
|
||||
progress.errs.AddRecoverable(ctx, err)
|
||||
progress.bus.AddRecoverable(ctx, err)
|
||||
|
||||
logger.CtxErr(ctx, err).Error("getting full item path")
|
||||
|
||||
|
||||
@ -378,7 +378,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFile() {
|
||||
deets: bd,
|
||||
toMerge: newMergeDetails(),
|
||||
pending: map[string]*itemDetails{},
|
||||
errs: fault.New(true),
|
||||
bus: fault.New(true),
|
||||
}
|
||||
|
||||
ci := test.cachedItems(suite.targetFileName, suite.targetFilePath)
|
||||
@ -476,7 +476,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileCachedNoPrevPathErrors() {
|
||||
UploadProgress: &snapshotfs.NullUploadProgress{},
|
||||
deets: bd,
|
||||
pending: map[string]*itemDetails{},
|
||||
errs: fault.New(true),
|
||||
bus: fault.New(true),
|
||||
}
|
||||
|
||||
for k, v := range cachedItems {
|
||||
@ -492,7 +492,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileCachedNoPrevPathErrors() {
|
||||
|
||||
assert.Empty(t, cp.pending)
|
||||
assert.Empty(t, bd.Details().Entries)
|
||||
assert.Error(t, cp.errs.Failure(), clues.ToCore(cp.errs.Failure()))
|
||||
assert.Error(t, cp.bus.Failure(), clues.ToCore(cp.bus.Failure()))
|
||||
}
|
||||
|
||||
func (suite *CorsoProgressUnitSuite) TestFinishedFileBaseItemDoesntBuildHierarchy() {
|
||||
@ -527,7 +527,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBaseItemDoesntBuildHierarch
|
||||
deets: db,
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
bus: fault.New(true),
|
||||
}
|
||||
|
||||
deets := &itemDetails{
|
||||
@ -569,7 +569,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedHashingFile() {
|
||||
UploadProgress: &snapshotfs.NullUploadProgress{},
|
||||
deets: bd,
|
||||
pending: map[string]*itemDetails{},
|
||||
errs: fault.New(true),
|
||||
bus: fault.New(true),
|
||||
}
|
||||
|
||||
ci := test.cachedItems(suite.targetFileName, suite.targetFilePath)
|
||||
@ -632,7 +632,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree() {
|
||||
ctx: ctx,
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
bus: fault.New(true),
|
||||
}
|
||||
|
||||
collections := []data.BackupCollection{
|
||||
@ -752,7 +752,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_MixedDirectory()
|
||||
ctx: ctx,
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
bus: fault.New(true),
|
||||
}
|
||||
|
||||
dirTree, err := inflateDirTree(ctx, nil, nil, test.layout, pmMock.NewPrefixMap(nil), progress)
|
||||
@ -859,7 +859,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_Fails() {
|
||||
progress := &corsoProgress{
|
||||
ctx: ctx,
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
bus: fault.New(true),
|
||||
}
|
||||
|
||||
_, err := inflateDirTree(ctx, nil, nil, test.layout, pmMock.NewPrefixMap(nil), progress)
|
||||
@ -958,7 +958,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeErrors() {
|
||||
ctx: ctx,
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
bus: fault.New(true),
|
||||
}
|
||||
|
||||
cols := []data.BackupCollection{}
|
||||
@ -1190,7 +1190,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() {
|
||||
ctx: ctx,
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
bus: fault.New(true),
|
||||
}
|
||||
msw := &mockSnapshotWalker{
|
||||
snapshotRoot: getBaseSnapshot(),
|
||||
@ -1900,7 +1900,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
||||
ctx: ctx,
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
bus: fault.New(true),
|
||||
}
|
||||
msw := &mockSnapshotWalker{
|
||||
snapshotRoot: getBaseSnapshot(),
|
||||
@ -2034,7 +2034,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSkipsDeletedSubtre
|
||||
ctx: ctx,
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
bus: fault.New(true),
|
||||
}
|
||||
mc := exchMock.NewCollection(suite.testStoragePath, suite.testStoragePath, 1)
|
||||
mc.PrevPath = mc.FullPath()
|
||||
@ -2131,7 +2131,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_HandleEmptyBase()
|
||||
ctx: ctx,
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
bus: fault.New(true),
|
||||
}
|
||||
mc := exchMock.NewCollection(archiveStorePath, archiveLocPath, 1)
|
||||
mc.ColState = data.NewState
|
||||
@ -2353,7 +2353,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsCorrectSubt
|
||||
ctx: ctx,
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
bus: fault.New(true),
|
||||
}
|
||||
|
||||
mc := exchMock.NewCollection(inboxPath, inboxPath, 1)
|
||||
@ -2509,7 +2509,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsMigrateSubt
|
||||
ctx: ctx,
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
bus: fault.New(true),
|
||||
}
|
||||
|
||||
mce := exchMock.NewCollection(newPrefixPathEmail, nil, 0)
|
||||
@ -3437,7 +3437,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_SelectiveSubtreeP
|
||||
ctx: ctx,
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
bus: fault.New(true),
|
||||
}
|
||||
snapshotRoot, counters := getBaseSnapshot()
|
||||
msw := &mockSnapshotWalker{
|
||||
|
||||
@ -146,7 +146,7 @@ func (w Wrapper) ConsumeBackupCollections(
|
||||
globalExcludeSet prefixmatcher.StringSetReader,
|
||||
additionalTags map[string]string,
|
||||
buildTreeWithBase bool,
|
||||
errs *fault.Bus,
|
||||
bus *fault.Bus,
|
||||
) (*BackupStats, *details.Builder, DetailsMergeInfoer, error) {
|
||||
if w.c == nil {
|
||||
return nil, nil, nil, clues.Stack(errNotConnected).WithClues(ctx)
|
||||
@ -164,7 +164,7 @@ func (w Wrapper) ConsumeBackupCollections(
|
||||
pending: map[string]*itemDetails{},
|
||||
deets: &details.Builder{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: errs,
|
||||
bus: bus,
|
||||
}
|
||||
|
||||
// When running an incremental backup, we need to pass the prior
|
||||
@ -217,7 +217,7 @@ func (w Wrapper) ConsumeBackupCollections(
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
return s, progress.deets, progress.toMerge, progress.errs.Failure()
|
||||
return s, progress.deets, progress.toMerge, progress.bus.Failure()
|
||||
}
|
||||
|
||||
func (w Wrapper) makeSnapshotWithRoot(
|
||||
@ -476,7 +476,7 @@ func (w Wrapper) ProduceRestoreCollections(
|
||||
snapshotID string,
|
||||
paths []path.RestorePaths,
|
||||
bcounter ByteCounter,
|
||||
errs *fault.Bus,
|
||||
bus *fault.Bus,
|
||||
) ([]data.RestoreCollection, error) {
|
||||
ctx, end := diagnostics.Span(ctx, "kopia:produceRestoreCollections")
|
||||
defer end()
|
||||
@ -497,7 +497,7 @@ func (w Wrapper) ProduceRestoreCollections(
|
||||
// RestorePath -> []StoragePath directory -> set of items to load from the
|
||||
// directory.
|
||||
dirsToItems = map[string]*restoreCollection{}
|
||||
el = errs.Local()
|
||||
el = bus.Local()
|
||||
)
|
||||
|
||||
for _, itemPaths := range paths {
|
||||
@ -552,7 +552,7 @@ func (w Wrapper) ProduceRestoreCollections(
|
||||
|
||||
// Now that we've grouped everything, go through and load each directory and
|
||||
// then load the items from the directory.
|
||||
res, err := loadDirsAndItems(ctx, snapshotRoot, bcounter, dirsToItems, errs)
|
||||
res, err := loadDirsAndItems(ctx, snapshotRoot, bcounter, dirsToItems, bus)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "loading items").WithClues(ctx)
|
||||
}
|
||||
|
||||
@ -1352,7 +1352,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
||||
},
|
||||
}
|
||||
|
||||
errs := fault.New(true)
|
||||
bus := fault.New(true)
|
||||
|
||||
stats, deets, _, err := suite.w.ConsumeBackupCollections(
|
||||
suite.ctx,
|
||||
@ -1362,13 +1362,13 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
||||
nil,
|
||||
nil,
|
||||
true,
|
||||
errs)
|
||||
bus)
|
||||
require.Error(t, err, clues.ToCore(err))
|
||||
assert.Equal(t, 0, stats.ErrorCount, "error count")
|
||||
assert.Equal(t, 5, stats.TotalFileCount, "total files")
|
||||
assert.Equal(t, 6, stats.TotalDirectoryCount, "total directories")
|
||||
assert.Equal(t, 0, stats.IgnoredErrorCount, "ignored errors")
|
||||
assert.Equal(t, 1, len(errs.Errors().Recovered), "recovered errors")
|
||||
assert.Equal(t, 1, len(bus.Errors().Recovered), "recovered errors")
|
||||
assert.False(t, stats.Incomplete, "incomplete")
|
||||
// 5 file and 2 folder entries.
|
||||
assert.Len(t, deets.Details().Entries, 5+2)
|
||||
@ -1388,8 +1388,8 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
||||
|
||||
require.Len(t, dcs, 1, "number of restore collections")
|
||||
|
||||
errs = fault.New(true)
|
||||
items := dcs[0].Items(suite.ctx, errs)
|
||||
bus = fault.New(true)
|
||||
items := dcs[0].Items(suite.ctx, bus)
|
||||
|
||||
// Get all the items from channel
|
||||
//nolint:revive
|
||||
@ -1399,7 +1399,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
||||
// Files that had an error shouldn't make a dir entry in kopia. If they do we
|
||||
// may run into kopia-assisted incrementals issues because only mod time and
|
||||
// not file size is checked for StreamingFiles.
|
||||
assert.ErrorIs(t, errs.Failure(), errs.NotFound, "errored file is restorable", clues.ToCore(err))
|
||||
assert.ErrorIs(t, bus.Failure(), errs.NotFound, "errored file is restorable", clues.ToCore(err))
|
||||
}
|
||||
|
||||
type backedupFile struct {
|
||||
@ -1752,15 +1752,15 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
||||
assert.NoError(t, err, "errors producing collection", clues.ToCore(err))
|
||||
require.Len(t, dcs, 1, "unexpected number of restore collections")
|
||||
|
||||
errs := fault.New(true)
|
||||
items := dcs[0].Items(suite.ctx, errs)
|
||||
bus := fault.New(true)
|
||||
items := dcs[0].Items(suite.ctx, bus)
|
||||
|
||||
// Get all the items from channel
|
||||
//nolint:revive
|
||||
for range items {
|
||||
}
|
||||
|
||||
test.restoreCheck(t, errs.Failure(), errs)
|
||||
test.restoreCheck(t, bus.Failure(), bus)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -1873,19 +1873,19 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections() {
|
||||
return
|
||||
}
|
||||
|
||||
errs := fault.New(true)
|
||||
bus := fault.New(true)
|
||||
|
||||
for _, dc := range result {
|
||||
// Get all the items from channel
|
||||
items := dc.Items(suite.ctx, errs)
|
||||
items := dc.Items(suite.ctx, bus)
|
||||
//nolint:revive
|
||||
for range items {
|
||||
}
|
||||
}
|
||||
|
||||
test.expectedErr(t, errs.Failure(), errs.Failure(), "getting items")
|
||||
test.expectedErr(t, bus.Failure(), bus.Failure(), "getting items")
|
||||
|
||||
if errs.Failure() != nil {
|
||||
if bus.Failure() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@ -277,7 +277,7 @@ func (suite *CollectionUnitSuite) TestPrefetchCollection_Items() {
|
||||
suite.Run(test.name, func() {
|
||||
var (
|
||||
t = suite.T()
|
||||
errs = fault.New(true)
|
||||
bus = fault.New(true)
|
||||
itemCount int
|
||||
)
|
||||
|
||||
@ -298,7 +298,7 @@ func (suite *CollectionUnitSuite) TestPrefetchCollection_Items() {
|
||||
false,
|
||||
statusUpdater)
|
||||
|
||||
for item := range col.Items(ctx, errs) {
|
||||
for item := range col.Items(ctx, bus) {
|
||||
itemCount++
|
||||
|
||||
_, rok := test.removed[item.ID()]
|
||||
@ -317,7 +317,7 @@ func (suite *CollectionUnitSuite) TestPrefetchCollection_Items() {
|
||||
assert.True(t, aok || rok, "item must be either added or removed: %q", item.ID())
|
||||
}
|
||||
|
||||
assert.NoError(t, errs.Failure())
|
||||
assert.NoError(t, bus.Failure())
|
||||
assert.Equal(
|
||||
t,
|
||||
test.expectItemCount,
|
||||
@ -337,10 +337,10 @@ func (mlg *mockLazyItemGetterSerializer) GetItem(
|
||||
user string,
|
||||
itemID string,
|
||||
immutableIDs bool,
|
||||
errs *fault.Bus,
|
||||
bus *fault.Bus,
|
||||
) (serialization.Parsable, *details.ExchangeInfo, error) {
|
||||
mlg.callIDs = append(mlg.callIDs, itemID)
|
||||
return mlg.ItemGetSerialize.GetItem(ctx, user, itemID, immutableIDs, errs)
|
||||
return mlg.ItemGetSerialize.GetItem(ctx, user, itemID, immutableIDs, bus)
|
||||
}
|
||||
|
||||
func (mlg *mockLazyItemGetterSerializer) check(t *testing.T, expectIDs []string) {
|
||||
@ -410,7 +410,7 @@ func (suite *CollectionUnitSuite) TestLazyFetchCollection_Items_LazyFetch() {
|
||||
suite.Run(test.name, func() {
|
||||
var (
|
||||
t = suite.T()
|
||||
errs = fault.New(true)
|
||||
bus = fault.New(true)
|
||||
itemCount int
|
||||
)
|
||||
|
||||
@ -436,7 +436,7 @@ func (suite *CollectionUnitSuite) TestLazyFetchCollection_Items_LazyFetch() {
|
||||
true,
|
||||
statusUpdater)
|
||||
|
||||
for item := range col.Items(ctx, errs) {
|
||||
for item := range col.Items(ctx, bus) {
|
||||
itemCount++
|
||||
|
||||
_, rok := test.removed[item.ID()]
|
||||
@ -479,7 +479,7 @@ func (suite *CollectionUnitSuite) TestLazyFetchCollection_Items_LazyFetch() {
|
||||
assert.True(t, aok || rok, "item must be either added or removed: %q", item.ID())
|
||||
}
|
||||
|
||||
assert.NoError(t, errs.Failure())
|
||||
assert.NoError(t, bus.Failure())
|
||||
assert.Equal(
|
||||
t,
|
||||
test.expectItemCount,
|
||||
|
||||
@ -76,13 +76,13 @@ func (suite *StreamStoreIntgSuite) TestStreamer() {
|
||||
table := []struct {
|
||||
name string
|
||||
deets func(*testing.T) *details.Details
|
||||
errs func(context.Context) *fault.Errors
|
||||
bus func(context.Context) *fault.Errors
|
||||
hasSnapID assert.ValueAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "none",
|
||||
deets: func(*testing.T) *details.Details { return nil },
|
||||
errs: func(context.Context) *fault.Errors { return nil },
|
||||
bus: func(context.Context) *fault.Errors { return nil },
|
||||
hasSnapID: assert.Empty,
|
||||
},
|
||||
{
|
||||
@ -100,13 +100,13 @@ func (suite *StreamStoreIntgSuite) TestStreamer() {
|
||||
}))
|
||||
return deetsBuilder.Details()
|
||||
},
|
||||
errs: func(context.Context) *fault.Errors { return nil },
|
||||
bus: func(context.Context) *fault.Errors { return nil },
|
||||
hasSnapID: assert.NotEmpty,
|
||||
},
|
||||
{
|
||||
name: "errors",
|
||||
deets: func(*testing.T) *details.Details { return nil },
|
||||
errs: func(ctx context.Context) *fault.Errors {
|
||||
bus: func(ctx context.Context) *fault.Errors {
|
||||
bus := fault.New(false)
|
||||
bus.Fail(clues.New("foo"))
|
||||
bus.AddRecoverable(ctx, clues.New("bar"))
|
||||
@ -136,7 +136,7 @@ func (suite *StreamStoreIntgSuite) TestStreamer() {
|
||||
|
||||
return deetsBuilder.Details()
|
||||
},
|
||||
errs: func(ctx context.Context) *fault.Errors {
|
||||
bus: func(ctx context.Context) *fault.Errors {
|
||||
bus := fault.New(false)
|
||||
bus.Fail(clues.New("foo"))
|
||||
bus.AddRecoverable(ctx, clues.New("bar"))
|
||||
@ -169,9 +169,9 @@ func (suite *StreamStoreIntgSuite) TestStreamer() {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
errs := test.errs(ctx)
|
||||
if errs != nil {
|
||||
err = ss.Collect(ctx, FaultErrorsCollector(errs))
|
||||
bus := test.bus(ctx)
|
||||
if bus != nil {
|
||||
err = ss.Collect(ctx, FaultErrorsCollector(bus))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@ -212,7 +212,7 @@ func (suite *StreamStoreIntgSuite) TestStreamer() {
|
||||
}
|
||||
|
||||
var readErrs fault.Errors
|
||||
if errs != nil {
|
||||
if bus != nil {
|
||||
err = ss.Read(
|
||||
ctx,
|
||||
snapid,
|
||||
@ -221,8 +221,8 @@ func (suite *StreamStoreIntgSuite) TestStreamer() {
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, readErrs)
|
||||
|
||||
assert.ElementsMatch(t, errs.Skipped, readErrs.Skipped)
|
||||
assert.ElementsMatch(t, errs.Recovered, readErrs.Recovered)
|
||||
assert.ElementsMatch(t, bus.Skipped, readErrs.Skipped)
|
||||
assert.ElementsMatch(t, bus.Recovered, readErrs.Recovered)
|
||||
} else {
|
||||
err := ss.Read(
|
||||
ctx,
|
||||
|
||||
@ -120,7 +120,7 @@ func getBackup(
|
||||
func (r repository) Backups(ctx context.Context, ids []string) ([]*backup.Backup, *fault.Bus) {
|
||||
var (
|
||||
bups []*backup.Backup
|
||||
errs = fault.New(false)
|
||||
bus = fault.New(false)
|
||||
sw = store.NewWrapper(r.modelStore)
|
||||
)
|
||||
|
||||
@ -129,13 +129,13 @@ func (r repository) Backups(ctx context.Context, ids []string) ([]*backup.Backup
|
||||
|
||||
b, err := sw.GetBackup(ictx, model.StableID(id))
|
||||
if err != nil {
|
||||
errs.AddRecoverable(ctx, errWrapper(err))
|
||||
bus.AddRecoverable(ctx, errWrapper(err))
|
||||
}
|
||||
|
||||
bups = append(bups, b)
|
||||
}
|
||||
|
||||
return bups, errs
|
||||
return bups, bus
|
||||
}
|
||||
|
||||
// BackupsByTag lists all backups in a repository that contain all the tags
|
||||
@ -177,7 +177,7 @@ func (r repository) GetBackupDetails(
|
||||
ctx context.Context,
|
||||
backupID string,
|
||||
) (*details.Details, *backup.Backup, *fault.Bus) {
|
||||
errs := fault.New(false)
|
||||
bus := fault.New(false)
|
||||
|
||||
deets, bup, err := getBackupDetails(
|
||||
ctx,
|
||||
@ -185,9 +185,9 @@ func (r repository) GetBackupDetails(
|
||||
r.Account.ID(),
|
||||
r.dataLayer,
|
||||
store.NewWrapper(r.modelStore),
|
||||
errs)
|
||||
bus)
|
||||
|
||||
return deets, bup, errs.Fail(err)
|
||||
return deets, bup, bus.Fail(err)
|
||||
}
|
||||
|
||||
// getBackupDetails handles the processing for GetBackupDetails.
|
||||
@ -196,7 +196,7 @@ func getBackupDetails(
|
||||
backupID, tenantID string,
|
||||
kw *kopia.Wrapper,
|
||||
sw store.BackupGetter,
|
||||
errs *fault.Bus,
|
||||
bus *fault.Bus,
|
||||
) (*details.Details, *backup.Backup, error) {
|
||||
b, err := sw.GetBackup(ctx, model.StableID(backupID))
|
||||
if err != nil {
|
||||
@ -221,7 +221,7 @@ func getBackupDetails(
|
||||
ctx,
|
||||
ssid,
|
||||
streamstore.DetailsReader(details.UnmarshalTo(&deets)),
|
||||
errs)
|
||||
bus)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -247,7 +247,7 @@ func (r repository) GetBackupErrors(
|
||||
ctx context.Context,
|
||||
backupID string,
|
||||
) (*fault.Errors, *backup.Backup, *fault.Bus) {
|
||||
errs := fault.New(false)
|
||||
bus := fault.New(false)
|
||||
|
||||
fe, bup, err := getBackupErrors(
|
||||
ctx,
|
||||
@ -255,9 +255,9 @@ func (r repository) GetBackupErrors(
|
||||
r.Account.ID(),
|
||||
r.dataLayer,
|
||||
store.NewWrapper(r.modelStore),
|
||||
errs)
|
||||
bus)
|
||||
|
||||
return fe, bup, errs.Fail(err)
|
||||
return fe, bup, bus.Fail(err)
|
||||
}
|
||||
|
||||
// getBackupErrors handles the processing for GetBackupErrors.
|
||||
@ -266,7 +266,7 @@ func getBackupErrors(
|
||||
backupID, tenantID string,
|
||||
kw *kopia.Wrapper,
|
||||
sw store.BackupGetter,
|
||||
errs *fault.Bus,
|
||||
bus *fault.Bus,
|
||||
) (*fault.Errors, *backup.Backup, error) {
|
||||
b, err := sw.GetBackup(ctx, model.StableID(backupID))
|
||||
if err != nil {
|
||||
@ -287,7 +287,7 @@ func getBackupErrors(
|
||||
ctx,
|
||||
ssid,
|
||||
streamstore.FaultErrorsReader(fault.UnmarshalErrorsTo(&fe)),
|
||||
errs)
|
||||
bus)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
@ -793,7 +793,7 @@ func writeBackup(
|
||||
ownerID, ownerName string,
|
||||
deets *details.Details,
|
||||
fe *fault.Errors,
|
||||
errs *fault.Bus,
|
||||
bus *fault.Bus,
|
||||
) *backup.Backup {
|
||||
var (
|
||||
serv = sel.PathService()
|
||||
@ -806,7 +806,7 @@ func writeBackup(
|
||||
err = sstore.Collect(ctx, streamstore.FaultErrorsCollector(fe))
|
||||
require.NoError(t, err, "collecting errors in streamstore")
|
||||
|
||||
ssid, err := sstore.Write(ctx, errs)
|
||||
ssid, err := sstore.Write(ctx, bus)
|
||||
require.NoError(t, err, "writing to streamstore")
|
||||
|
||||
tags := map[string]string{
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user