normalize observe usage, ensure deferred closers (#5006)

some housecleaning before reviewing deadlock issues

---

#### Does this PR need a docs update or release note?

- [x]  No

#### Type of change

- [x] 🧹 Tech Debt/Cleanup

#### Test Plan

- [x]  Unit test
This commit is contained in:
Keepers 2024-01-11 15:42:10 -07:00 committed by GitHub
parent ad783172b7
commit 279a6dab3c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 182 additions and 161 deletions

View File

@ -13,6 +13,7 @@ import (
"github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/observe" "github.com/alcionai/corso/src/internal/observe"
"github.com/alcionai/corso/src/internal/operations"
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/dttm" "github.com/alcionai/corso/src/pkg/dttm"
"github.com/alcionai/corso/src/pkg/export" "github.com/alcionai/corso/src/pkg/export"
@ -96,7 +97,7 @@ func runExport(
return Only(ctx, clues.Wrap(err, "Failed to initialize "+serviceName+" export")) return Only(ctx, clues.Wrap(err, "Failed to initialize "+serviceName+" export"))
} }
expColl, err := eo.Run(ctx) collections, err := eo.Run(ctx)
if err != nil { if err != nil {
if errors.Is(err, data.ErrNotFound) { if errors.Is(err, data.ErrNotFound) {
return Only(ctx, clues.New("Backup or backup details missing for id "+backupID)) return Only(ctx, clues.New("Backup or backup details missing for id "+backupID))
@ -105,20 +106,8 @@ func runExport(
return Only(ctx, clues.Wrap(err, "Failed to run "+serviceName+" export")) return Only(ctx, clues.Wrap(err, "Failed to run "+serviceName+" export"))
} }
// It would be better to give a progressbar than a spinner, but we if err = showExportProgress(ctx, eo, collections, exportLocation); err != nil {
// have any way of knowing how many files are available as of now. return err
diskWriteComplete := observe.MessageWithCompletion(ctx, observe.ProgressCfg{}, "Writing data to disk")
err = export.ConsumeExportCollections(ctx, exportLocation, expColl, eo.Errors)
// The progressbar has to be closed before we move on as the Infof
// below flushes progressbar to prevent clobbering the output and
// that causes the entire export operation to stall indefinitely.
// https://github.com/alcionai/corso/blob/8102523dc62c001b301cd2ab4e799f86146ab1a0/src/cli/print/print.go#L151
close(diskWriteComplete)
if err != nil {
return Only(ctx, err)
} }
if len(eo.Errors.Recovered()) > 0 { if len(eo.Errors.Recovered()) > 0 {
@ -142,3 +131,23 @@ func runExport(
return nil return nil
} }
// slim wrapper that allows us to defer the progress bar closure with the expected scope.
func showExportProgress(
ctx context.Context,
op operations.ExportOperation,
collections []export.Collectioner,
exportLocation string,
) error {
// It would be better to give a progressbar than a spinner, but we
// have any way of knowing how many files are available as of now.
progressMessage := observe.MessageWithCompletion(ctx, observe.DefaultCfg(), "Writing data to disk")
defer close(progressMessage)
err := export.ConsumeExportCollections(ctx, exportLocation, collections, op.Errors)
if err != nil {
return Only(ctx, err)
}
return nil
}

View File

@ -462,14 +462,12 @@ func (oc *Collection) streamItems(ctx context.Context, errs *fault.Bus) {
return return
} }
displayPath := oc.handler.FormatDisplayPath(oc.driveName, parentPath) progressMessage := observe.ProgressWithCount(
folderProgress := observe.ProgressWithCount(
ctx, ctx,
observe.ItemQueueMsg, observe.ItemQueueMsg,
path.NewElements(displayPath), path.NewElements(oc.handler.FormatDisplayPath(oc.driveName, parentPath)),
int64(len(oc.driveItems))) int64(len(oc.driveItems)))
defer close(folderProgress) defer close(progressMessage)
semaphoreCh := make(chan struct{}, graph.Parallelism(path.OneDriveService).Item()) semaphoreCh := make(chan struct{}, graph.Parallelism(path.OneDriveService).Item())
defer close(semaphoreCh) defer close(semaphoreCh)
@ -500,7 +498,7 @@ func (oc *Collection) streamItems(ctx context.Context, errs *fault.Bus) {
oc.ctrl.ItemExtensionFactory, oc.ctrl.ItemExtensionFactory,
errs) errs)
folderProgress <- struct{}{} progressMessage <- struct{}{}
}(item) }(item)
} }
@ -546,7 +544,7 @@ func (lig *lazyItemGetter) GetData(
lig.info.Extension.Data = extData.Data lig.info.Extension.Data = extData.Data
// display/log the item download // display/log the item download
progReader, _ := observe.ItemProgress( progReader := observe.ItemProgress(
ctx, ctx,
extRc, extRc,
observe.ItemBackupMsg, observe.ItemBackupMsg,
@ -656,7 +654,7 @@ func (oc *Collection) streamDriveItem(
} }
metaReader := lazy.NewLazyReadCloser(func() (io.ReadCloser, error) { metaReader := lazy.NewLazyReadCloser(func() (io.ReadCloser, error) {
progReader, _ := observe.ItemProgress( progReader := observe.ItemProgress(
ctx, ctx,
itemMeta, itemMeta,
observe.ItemBackupMsg, observe.ItemBackupMsg,

View File

@ -796,8 +796,7 @@ func restoreFile(
var ( var (
written int64 written int64
progReader io.ReadCloser progressReader io.ReadCloser
closeProgressBar func()
) )
// This is just to retry file upload, the uploadSession creation is // This is just to retry file upload, the uploadSession creation is
@ -824,21 +823,23 @@ func restoreFile(
iReader = itemData.ToReader() iReader = itemData.ToReader()
} }
progReader, closeProgressBar = observe.ItemProgress( progressReader = observe.ItemProgress(
ctx, ctx,
iReader, iReader,
observe.ItemRestoreMsg, observe.ItemRestoreMsg,
clues.Hide(pname), clues.Hide(pname),
ss.Size()) ss.Size())
defer progressReader.Close()
// Upload the stream data // Upload the stream data
written, err = io.CopyBuffer(w, progReader, copyBuffer) written, err = io.CopyBuffer(w, progressReader, copyBuffer)
if err == nil { if err == nil {
break break
} }
// clear out the progress bar immediately on error // close the progress bar immediately on error, else we might deadlock
closeProgressBar() // its safe to double call Close on the reader.
progressReader.Close()
// refresh the io.Writer to restart the upload // refresh the io.Writer to restart the upload
// TODO: @vkamra verify if var session is the desired input // TODO: @vkamra verify if var session is the desired input
@ -853,8 +854,6 @@ func restoreFile(
return "", details.ItemInfo{}, clues.Wrap(err, "uploading file") return "", details.ItemInfo{}, clues.Wrap(err, "uploading file")
} }
defer closeProgressBar()
dii := ir.AugmentItemInfo( dii := ir.AugmentItemInfo(
details.ItemInfo{}, details.ItemInfo{},
rcc.ProtectedResource, rcc.ProtectedResource,

View File

@ -60,16 +60,14 @@ func CreateCollections(
return nil, clues.NewWC(ctx, "unsupported backup category type") return nil, clues.NewWC(ctx, "unsupported backup category type")
} }
pcfg := observe.ProgressCfg{ progressMessage := observe.MessageWithCompletion(
ctx,
observe.ProgressCfg{
Indent: 1, Indent: 1,
CompletionMessage: func() string { return fmt.Sprintf("(found %d folders)", len(collections)) }, CompletionMessage: func() string { return fmt.Sprintf("(found %d folders)", len(collections)) },
} },
foldersComplete := observe.MessageWithCompletion(
ctx,
pcfg,
qp.Category.HumanString()) qp.Category.HumanString())
defer close(progressMessage)
defer close(foldersComplete)
rootFolder, cc := handler.NewContainerCache(bpc.ProtectedResource.ID()) rootFolder, cc := handler.NewContainerCache(bpc.ProtectedResource.ID())

View File

@ -191,8 +191,7 @@ func (col *prefetchCollection) streamItems(
success int64 success int64
totalBytes int64 totalBytes int64
wg sync.WaitGroup wg sync.WaitGroup
colProgress chan<- struct{} progressMessage chan<- struct{}
user = col.user user = col.user
) )
@ -216,11 +215,11 @@ func (col *prefetchCollection) streamItems(
}() }()
if len(col.added)+len(col.removed) > 0 { if len(col.added)+len(col.removed) > 0 {
colProgress = observe.CollectionProgress( progressMessage = observe.CollectionProgress(
ctx, ctx,
col.Category().HumanString(), col.Category().HumanString(),
col.LocationPath().Elements()) col.LocationPath().Elements())
defer close(colProgress) defer close(progressMessage)
} }
semaphoreCh := make(chan struct{}, col.Opts().Parallelism.ItemFetch) semaphoreCh := make(chan struct{}, col.Opts().Parallelism.ItemFetch)
@ -244,8 +243,8 @@ func (col *prefetchCollection) streamItems(
atomic.AddInt64(&success, 1) atomic.AddInt64(&success, 1)
if colProgress != nil { if progressMessage != nil {
colProgress <- struct{}{} progressMessage <- struct{}{}
} }
}(id) }(id)
} }
@ -333,8 +332,8 @@ func (col *prefetchCollection) streamItems(
atomic.AddInt64(&success, 1) atomic.AddInt64(&success, 1)
atomic.AddInt64(&totalBytes, info.Size) atomic.AddInt64(&totalBytes, info.Size)
if colProgress != nil { if progressMessage != nil {
colProgress <- struct{}{} progressMessage <- struct{}{}
} }
}(id) }(id)
} }
@ -390,7 +389,7 @@ func (col *lazyFetchCollection) streamItems(
) { ) {
var ( var (
success int64 success int64
colProgress chan<- struct{} progressMessage chan<- struct{}
user = col.user user = col.user
) )
@ -408,11 +407,11 @@ func (col *lazyFetchCollection) streamItems(
}() }()
if len(col.added)+len(col.removed) > 0 { if len(col.added)+len(col.removed) > 0 {
colProgress = observe.CollectionProgress( progressMessage = observe.CollectionProgress(
ctx, ctx,
col.Category().HumanString(), col.Category().HumanString(),
col.LocationPath().Elements()) col.LocationPath().Elements())
defer close(colProgress) defer close(progressMessage)
} }
// delete all removed items // delete all removed items
@ -421,8 +420,8 @@ func (col *lazyFetchCollection) streamItems(
atomic.AddInt64(&success, 1) atomic.AddInt64(&success, 1)
if colProgress != nil { if progressMessage != nil {
colProgress <- struct{}{} progressMessage <- struct{}{}
} }
} }
@ -458,8 +457,8 @@ func (col *lazyFetchCollection) streamItems(
atomic.AddInt64(&success, 1) atomic.AddInt64(&success, 1)
if colProgress != nil { if progressMessage != nil {
colProgress <- struct{}{} progressMessage <- struct{}{}
} }
} }
} }

View File

@ -47,11 +47,11 @@ func RestoreCollection(
category = fullPath.Category() category = fullPath.Category()
) )
colProgress := observe.CollectionProgress( progressMessage := observe.CollectionProgress(
ctx, ctx,
category.HumanString(), category.HumanString(),
fullPath.Folder(false)) fullPath.Folder(false))
defer close(colProgress) defer close(progressMessage)
for { for {
select { select {
@ -119,7 +119,7 @@ func RestoreCollection(
logger.Ctx(ictx).Infow("accounting for restored item", "error", err) logger.Ctx(ictx).Infow("accounting for restored item", "error", err)
} }
colProgress <- struct{}{} progressMessage <- struct{}{}
} }
} }
} }

View File

@ -131,7 +131,7 @@ func (col *prefetchCollection[C, I]) streamItems(ctx context.Context, errs *faul
streamedItems int64 streamedItems int64
totalBytes int64 totalBytes int64
wg sync.WaitGroup wg sync.WaitGroup
colProgress chan<- struct{} progressMessage chan<- struct{}
el = errs.Local() el = errs.Local()
) )
@ -154,11 +154,11 @@ func (col *prefetchCollection[C, I]) streamItems(ctx context.Context, errs *faul
}() }()
if len(col.added)+len(col.removed) > 0 { if len(col.added)+len(col.removed) > 0 {
colProgress = observe.CollectionProgress( progressMessage = observe.CollectionProgress(
ctx, ctx,
col.Category().HumanString(), col.Category().HumanString(),
col.LocationPath().Elements()) col.LocationPath().Elements())
defer close(colProgress) defer close(progressMessage)
} }
semaphoreCh := make(chan struct{}, col.Opts().Parallelism.ItemFetch) semaphoreCh := make(chan struct{}, col.Opts().Parallelism.ItemFetch)
@ -179,8 +179,8 @@ func (col *prefetchCollection[C, I]) streamItems(ctx context.Context, errs *faul
atomic.AddInt64(&streamedItems, 1) atomic.AddInt64(&streamedItems, 1)
col.Counter.Inc(count.StreamItemsRemoved) col.Counter.Inc(count.StreamItemsRemoved)
if colProgress != nil { if progressMessage != nil {
colProgress <- struct{}{} progressMessage <- struct{}{}
} }
}(id) }(id)
} }
@ -254,8 +254,8 @@ func (col *prefetchCollection[C, I]) streamItems(ctx context.Context, errs *faul
col.Counter.Add(count.StreamBytesAdded, info.Size) col.Counter.Add(count.StreamBytesAdded, info.Size)
if colProgress != nil { if progressMessage != nil {
colProgress <- struct{}{} progressMessage <- struct{}{}
} }
}(id) }(id)
} }
@ -296,7 +296,7 @@ func (col *lazyFetchCollection[C, I]) streamItems(ctx context.Context, errs *fau
var ( var (
streamedItems int64 streamedItems int64
wg sync.WaitGroup wg sync.WaitGroup
colProgress chan<- struct{} progressMessage chan<- struct{}
el = errs.Local() el = errs.Local()
) )
@ -319,11 +319,11 @@ func (col *lazyFetchCollection[C, I]) streamItems(ctx context.Context, errs *fau
}() }()
if len(col.added)+len(col.removed) > 0 { if len(col.added)+len(col.removed) > 0 {
colProgress = observe.CollectionProgress( progressMessage = observe.CollectionProgress(
ctx, ctx,
col.Category().HumanString(), col.Category().HumanString(),
col.LocationPath().Elements()) col.LocationPath().Elements())
defer close(colProgress) defer close(progressMessage)
} }
semaphoreCh := make(chan struct{}, col.Opts().Parallelism.ItemFetch) semaphoreCh := make(chan struct{}, col.Opts().Parallelism.ItemFetch)
@ -344,8 +344,8 @@ func (col *lazyFetchCollection[C, I]) streamItems(ctx context.Context, errs *fau
atomic.AddInt64(&streamedItems, 1) atomic.AddInt64(&streamedItems, 1)
col.Counter.Inc(count.StreamItemsRemoved) col.Counter.Inc(count.StreamItemsRemoved)
if colProgress != nil { if progressMessage != nil {
colProgress <- struct{}{} progressMessage <- struct{}{}
} }
}(id) }(id)
} }
@ -386,8 +386,8 @@ func (col *lazyFetchCollection[C, I]) streamItems(ctx context.Context, errs *fau
atomic.AddInt64(&streamedItems, 1) atomic.AddInt64(&streamedItems, 1)
if colProgress != nil { if progressMessage != nil {
colProgress <- struct{}{} progressMessage <- struct{}{}
} }
}(id, modTime) }(id, modTime)
} }

View File

@ -58,12 +58,14 @@ func CollectLibraries(
path.LibrariesCategory.HumanString(), path.LibrariesCategory.HumanString(),
stdpath.Base(bpc.ProtectedResource.Name())) stdpath.Base(bpc.ProtectedResource.Name()))
pcfg := observe.ProgressCfg{ progressMessage := observe.MessageWithCompletion(
ctx,
observe.ProgressCfg{
Indent: 1, Indent: 1,
CompletionMessage: func() string { return fmt.Sprintf("(found %d items)", colls.NumItems) }, CompletionMessage: func() string { return fmt.Sprintf("(found %d items)", colls.NumItems) },
} },
progressBar := observe.MessageWithCompletion(ctx, pcfg, msg) msg)
close(progressBar) close(progressMessage)
odcs, canUsePreviousBackup, err := colls.Get(ctx, bpc.MetadataCollections, ssmb, errs) odcs, canUsePreviousBackup, err := colls.Get(ctx, bpc.MetadataCollections, ssmb, errs)
if err != nil { if err != nil {

View File

@ -184,9 +184,11 @@ func (pc *prefetchCollection) streamLists(
pc.fullPath, pc.fullPath,
&metrics) &metrics)
// TODO: Insert correct ID for CollectionProgress progressMessage := observe.CollectionProgress(
progress := observe.CollectionProgress(ctx, pc.fullPath.Category().HumanString(), pc.fullPath.Folders()) ctx,
defer close(progress) pc.fullPath.Category().HumanString(),
pc.fullPath.Folders())
defer close(progressMessage)
semaphoreCh := make(chan struct{}, fetchChannelSize) semaphoreCh := make(chan struct{}, fetchChannelSize)
defer close(semaphoreCh) defer close(semaphoreCh)
@ -204,7 +206,7 @@ func (pc *prefetchCollection) streamLists(
go pc.handleListItems( go pc.handleListItems(
ctx, ctx,
semaphoreCh, semaphoreCh,
progress, progressMessage,
&wg, &wg,
listID, listID,
&objects, &objects,
@ -238,8 +240,11 @@ func (pc *prefetchCollection) streamPages(
&metrics) &metrics)
// TODO: Insert correct ID for CollectionProgress // TODO: Insert correct ID for CollectionProgress
progress := observe.CollectionProgress(ctx, pc.fullPath.Category().HumanString(), pc.fullPath.Folders()) progressMessage := observe.CollectionProgress(
defer close(progress) ctx,
pc.fullPath.Category().HumanString(),
pc.fullPath.Folders())
defer close(progressMessage)
betaService := pc.betaService betaService := pc.betaService
if betaService == nil { if betaService == nil {
@ -299,7 +304,7 @@ func (pc *prefetchCollection) streamPages(
} }
pc.stream[path.PagesCategory] <- item pc.stream[path.PagesCategory] <- item
progress <- struct{}{} progressMessage <- struct{}{}
} }
} }
@ -453,8 +458,11 @@ func (lc *lazyFetchCollection) streamItems(
lc.fullPath, lc.fullPath,
&metrics) &metrics)
progress := observe.CollectionProgress(ctx, lc.fullPath.Category().HumanString(), lc.fullPath.Folders()) progressMessage := observe.CollectionProgress(
defer close(progress) ctx,
lc.fullPath.Category().HumanString(),
lc.fullPath.Folders())
defer close(progressMessage)
for listID, modTime := range lc.items { for listID, modTime := range lc.items {
if el.Failure() != nil { if el.Failure() != nil {
@ -475,7 +483,7 @@ func (lc *lazyFetchCollection) streamItems(
metrics.Successes++ metrics.Successes++
progress <- struct{}{} progressMessage <- struct{}{}
} }
metrics.Objects += int(numLists) metrics.Objects += int(numLists)

View File

@ -264,14 +264,15 @@ func backupChannels(
canUsePreviousBackup bool canUsePreviousBackup bool
) )
pcfg := observe.ProgressCfg{ progressMessage := observe.MessageWithCompletion(
ctx,
observe.ProgressCfg{
Indent: 1, Indent: 1,
// TODO(meain): Use number of messages and not channels // TODO(meain): Use number of messages and not channels
CompletionMessage: func() string { return fmt.Sprintf("(found %d channels)", len(colls)) }, CompletionMessage: func() string { return fmt.Sprintf("(found %d channels)", len(colls)) },
} },
progressBar := observe.MessageWithCompletion(ctx, pcfg, scope.Category().PathType().HumanString()) scope.Category().PathType().HumanString())
defer close(progressMessage)
defer close(progressBar)
if !api.IsTeam(ctx, bc.group) { if !api.IsTeam(ctx, bc.group) {
return colls, nil return colls, nil
@ -325,13 +326,14 @@ func backupConversations(
colls []data.BackupCollection colls []data.BackupCollection
) )
pcfg := observe.ProgressCfg{ progressMessage := observe.MessageWithCompletion(
ctx,
observe.ProgressCfg{
Indent: 1, Indent: 1,
CompletionMessage: func() string { return fmt.Sprintf("(found %d conversations)", len(colls)) }, CompletionMessage: func() string { return fmt.Sprintf("(found %d conversations)", len(colls)) },
} },
progressBar := observe.MessageWithCompletion(ctx, pcfg, scope.Category().PathType().HumanString()) scope.Category().PathType().HumanString())
defer close(progressMessage)
defer close(progressBar)
useLazyReader := !bc.producerConfig.Options.ToggleFeatures.DisableLazyItemReader useLazyReader := !bc.producerConfig.Options.ToggleFeatures.DisableLazyItemReader

View File

@ -62,13 +62,14 @@ func ProduceBackupCollections(
bpc.Options, bpc.Options,
counter) counter)
pcfg := observe.ProgressCfg{ progressMessage := observe.MessageWithCompletion(
ctx,
observe.ProgressCfg{
Indent: 1, Indent: 1,
CompletionMessage: func() string { return fmt.Sprintf("(found %d files)", nc.NumFiles) }, CompletionMessage: func() string { return fmt.Sprintf("(found %d files)", nc.NumFiles) },
} },
progressBar := observe.MessageWithCompletion(ctx, pcfg, path.FilesCategory.HumanString()) path.FilesCategory.HumanString())
defer close(progressMessage)
defer close(progressBar)
odcs, canUsePreviousBackup, err = nc.Get(ctx, bpc.MetadataCollections, ssmb, errs) odcs, canUsePreviousBackup, err = nc.Get(ctx, bpc.MetadataCollections, ssmb, errs)
if err != nil { if err != nil {

View File

@ -178,6 +178,10 @@ type ProgressCfg struct {
CompletionMessage func() string CompletionMessage func() string
} }
func DefaultCfg() ProgressCfg {
return ProgressCfg{}
}
// Message is used to display a progress message // Message is used to display a progress message
func Message(ctx context.Context, cfg ProgressCfg, msgs ...any) { func Message(ctx context.Context, cfg ProgressCfg, msgs ...any) {
var ( var (
@ -351,10 +355,12 @@ func (ac *autoCloser) Read(p []byte) (n int, err error) {
} }
func (ac *autoCloser) Close() error { func (ac *autoCloser) Close() error {
if !ac.closed { if ac.closed {
return nil
}
ac.closed = true ac.closed = true
ac.close() ac.close()
}
return ac.rc.Close() return ac.rc.Close()
} }
@ -362,15 +368,16 @@ func (ac *autoCloser) Close() error {
// ItemProgress tracks the display of an item in a folder by counting the bytes // ItemProgress tracks the display of an item in a folder by counting the bytes
// read through the provided readcloser, up until the byte count matches // read through the provided readcloser, up until the byte count matches
// the totalBytes. // the totalBytes.
//
// The progress bar will close automatically when the reader closes. If an early // The progress bar will close automatically when the reader closes. If an early
// close is needed due to abort or other issue, the returned func can be used. // close is needed due to abort or other issue, the reader can be closed manually.
func ItemProgress( func ItemProgress(
ctx context.Context, ctx context.Context,
rc io.ReadCloser, rc io.ReadCloser,
header string, header string,
iname any, iname any,
totalBytes int64, totalBytes int64,
) (io.ReadCloser, func()) { ) io.ReadCloser {
var ( var (
obs = getObserver(ctx) obs = getObserver(ctx)
plain = plainString(iname) plain = plainString(iname)
@ -383,7 +390,7 @@ func ItemProgress(
if obs.hidden() || rc == nil { if obs.hidden() || rc == nil {
defer log.Debug("done - " + header) defer log.Debug("done - " + header)
return rc, func() {} return rc
} }
obs.wg.Add(1) obs.wg.Add(1)
@ -410,22 +417,24 @@ func ItemProgress(
closer := &autoCloser{rc: bar.ProxyReader(rc)} closer := &autoCloser{rc: bar.ProxyReader(rc)}
closer.close = func() { closer.close = func() {
closer.closed = true
bar.SetTotal(-1, true) bar.SetTotal(-1, true)
bar.Abort(true) bar.Abort(true)
} }
return closer, closer.close return closer
} }
// ItemSpinner is similar to ItemProgress, but for use in cases where // ItemSpinner is similar to ItemProgress, but for use in cases where
// we don't know the file size but want to show progress. // we don't know the file size but want to show progress.
//
// The progress bar will close automatically when the reader closes. If an early
// close is needed due to abort or other issue, the reader can be closed manually.
func ItemSpinner( func ItemSpinner(
ctx context.Context, ctx context.Context,
rc io.ReadCloser, rc io.ReadCloser,
header string, header string,
iname any, iname any,
) (io.ReadCloser, func()) { ) io.ReadCloser {
var ( var (
obs = getObserver(ctx) obs = getObserver(ctx)
plain = plainString(iname) plain = plainString(iname)
@ -436,7 +445,7 @@ func ItemSpinner(
if obs.hidden() || rc == nil { if obs.hidden() || rc == nil {
defer log.Debug("done - " + header) defer log.Debug("done - " + header)
return rc, func() {} return rc
} }
obs.wg.Add(1) obs.wg.Add(1)
@ -459,12 +468,14 @@ func ItemSpinner(
log.Debug("done - " + header) log.Debug("done - " + header)
})() })()
abort := func() { closer := &autoCloser{rc: bar.ProxyReader(rc)}
closer.close = func() {
bar.SetTotal(-1, true) bar.SetTotal(-1, true)
bar.Abort(true) bar.Abort(true)
} }
return bar.ProxyReader(rc), abort return closer
} }
// ProgressWithCount tracks the display of a bar that tracks the completion // ProgressWithCount tracks the display of a bar that tracks the completion

View File

@ -45,14 +45,13 @@ func (suite *ObserveProgressUnitSuite) TestObserve_ItemProgress() {
ctx = SeedObserver(ctx, &recorder, config{}) ctx = SeedObserver(ctx, &recorder, config{})
from := make([]byte, 100) from := make([]byte, 100)
prog, abort := ItemProgress( prog := ItemProgress(
ctx, ctx,
io.NopCloser(bytes.NewReader(from)), io.NopCloser(bytes.NewReader(from)),
"folder", "folder",
tst, tst,
100) 100)
require.NotNil(t, prog) require.NotNil(t, prog)
require.NotNil(t, abort)
var i int var i int

View File

@ -522,8 +522,8 @@ func produceBackupDataCollections(
counter *count.Bus, counter *count.Bus,
errs *fault.Bus, errs *fault.Bus,
) ([]data.BackupCollection, prefixmatcher.StringSetReader, bool, error) { ) ([]data.BackupCollection, prefixmatcher.StringSetReader, bool, error) {
progressBar := observe.MessageWithCompletion(ctx, observe.ProgressCfg{}, "Discovering items to backup") progressMessage := observe.MessageWithCompletion(ctx, observe.DefaultCfg(), "Discovering items to backup")
defer close(progressBar) defer close(progressMessage)
bpc := inject.BackupProducerConfig{ bpc := inject.BackupProducerConfig{
LastBackupVersion: lastBackupVersion, LastBackupVersion: lastBackupVersion,
@ -559,8 +559,8 @@ func consumeBackupCollections(
"collection_source", "operations", "collection_source", "operations",
"snapshot_type", "item data") "snapshot_type", "item data")
progressBar := observe.MessageWithCompletion(ctx, observe.ProgressCfg{}, "Backing up data") progressMessage := observe.MessageWithCompletion(ctx, observe.DefaultCfg(), "Backing up data")
defer close(progressBar) defer close(progressMessage)
tags := map[string]string{ tags := map[string]string{
kopia.TagBackupID: string(backupID), kopia.TagBackupID: string(backupID),

View File

@ -249,16 +249,14 @@ func (op *ExportOperation) do(
observe.ProgressCfg{}, observe.ProgressCfg{},
fmt.Sprintf("Discovered %d items in backup %s to export", len(paths), op.BackupID)) fmt.Sprintf("Discovered %d items in backup %s to export", len(paths), op.BackupID))
kopiaComplete := observe.MessageWithCompletion(ctx, observe.ProgressCfg{}, "Enumerating items in repository") progressMessage := observe.MessageWithCompletion(ctx, observe.DefaultCfg(), "Enumerating items in repository")
defer close(kopiaComplete) defer close(progressMessage)
dcs, err := op.kopia.ProduceRestoreCollections(ctx, bup.SnapshotID, paths, opStats.bytesRead, op.Errors) dcs, err := op.kopia.ProduceRestoreCollections(ctx, bup.SnapshotID, paths, opStats.bytesRead, op.Errors)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "producing collections to export") return nil, clues.Wrap(err, "producing collections to export")
} }
kopiaComplete <- struct{}{}
ctx = clues.Add(ctx, "coll_count", len(dcs)) ctx = clues.Add(ctx, "coll_count", len(dcs))
// should always be 1, since backups are 1:1 with resourceOwners. // should always be 1, since backups are 1:1 with resourceOwners.
@ -340,11 +338,8 @@ func produceExportCollections(
exportStats *metrics.ExportStats, exportStats *metrics.ExportStats,
errs *fault.Bus, errs *fault.Bus,
) ([]export.Collectioner, error) { ) ([]export.Collectioner, error) {
complete := observe.MessageWithCompletion(ctx, observe.ProgressCfg{}, "Preparing export") progressMessage := observe.MessageWithCompletion(ctx, observe.DefaultCfg(), "Preparing export")
defer func() { defer close(progressMessage)
complete <- struct{}{}
close(complete)
}()
ctx, end := diagnostics.Span(ctx, "m365:export") ctx, end := diagnostics.Span(ctx, "m365:export")
defer end() defer end()

View File

@ -282,8 +282,8 @@ func (op *RestoreOperation) do(
observe.ProgressCfg{}, observe.ProgressCfg{},
fmt.Sprintf("Discovered %d items in backup %s to restore", len(paths), op.BackupID)) fmt.Sprintf("Discovered %d items in backup %s to restore", len(paths), op.BackupID))
progressBar := observe.MessageWithCompletion(ctx, observe.ProgressCfg{}, "Enumerating items in repository") progressMessage := observe.MessageWithCompletion(ctx, observe.DefaultCfg(), "Enumerating items in repository")
defer close(progressBar) defer close(progressMessage)
dcs, err := op.kopia.ProduceRestoreCollections( dcs, err := op.kopia.ProduceRestoreCollections(
ctx, ctx,
@ -394,8 +394,8 @@ func consumeRestoreCollections(
return nil, nil, clues.New("no data collections to restore") return nil, nil, clues.New("no data collections to restore")
} }
progressBar := observe.MessageWithCompletion(ctx, observe.ProgressCfg{}, "Restoring data") progressMessage := observe.MessageWithCompletion(ctx, observe.DefaultCfg(), "Restoring data")
defer close(progressBar) defer close(progressMessage)
ctx, end := diagnostics.Span(ctx, "operations:restore") ctx, end := diagnostics.Span(ctx, "operations:restore")
defer end() defer end()

View File

@ -50,14 +50,14 @@ func writeItem(ctx context.Context, item Item, folder string) error {
name := item.Name name := item.Name
fpath := filepath.Join(folder, name) fpath := filepath.Join(folder, name)
progReader, pclose := observe.ItemSpinner( progReader := observe.ItemSpinner(
ctx, ctx,
item.Body, item.Body,
observe.ItemExportMsg, observe.ItemExportMsg,
clues.Hide(name)) clues.Hide(name))
defer item.Body.Close() defer item.Body.Close()
defer pclose() defer progReader.Close()
err := os.MkdirAll(folder, os.ModePerm) err := os.MkdirAll(folder, os.ModePerm)
if err != nil { if err != nil {

View File

@ -90,8 +90,8 @@ func connectToM365(
return ctrl, nil return ctrl, nil
} }
progressBar := observe.MessageWithCompletion(ctx, observe.ProgressCfg{}, "Connecting to M365") progressMessage := observe.MessageWithCompletion(ctx, observe.DefaultCfg(), "Connecting to M365")
defer close(progressBar) defer close(progressMessage)
ctrl, err := m365.NewController( ctrl, err := m365.NewController(
ctx, ctx,

View File

@ -191,8 +191,8 @@ func (r *repository) Connect(ctx context.Context, cfg ConnConfig) (err error) {
return clues.Stack(err) return clues.Stack(err)
} }
progressBar := observe.MessageWithCompletion(ctx, observe.ProgressCfg{}, "Connecting to repository") progressMessage := observe.MessageWithCompletion(ctx, observe.DefaultCfg(), "Connecting to repository")
defer close(progressBar) defer close(progressMessage)
if err := r.setupKopia(ctx, ctrlRepo.Retention{}, false); err != nil { if err := r.setupKopia(ctx, ctrlRepo.Retention{}, false); err != nil {
return clues.Stack(err) return clues.Stack(err)
@ -214,8 +214,8 @@ func (r *repository) UpdatePassword(ctx context.Context, password string) (err e
} }
}() }()
progressBar := observe.MessageWithCompletion(ctx, observe.ProgressCfg{}, "Connecting to repository") progressMessage := observe.MessageWithCompletion(ctx, observe.DefaultCfg(), "Connecting to repository")
defer close(progressBar) defer close(progressMessage)
repoNameHash, err := r.GenerateHashForRepositoryConfigFileName() repoNameHash, err := r.GenerateHashForRepositoryConfigFileName()
if err != nil { if err != nil {