diff --git a/src/cli/backup/backup.go b/src/cli/backup/backup.go index c95d5803d..9ac27640f 100644 --- a/src/cli/backup/backup.go +++ b/src/cli/backup/backup.go @@ -190,6 +190,8 @@ func genericCreateCommand( ictx = clues.Add(ctx, "resource_owner_selected", owner) ) + logger.Ctx(ictx).Infof("setting up backup") + bo, err := r.NewBackupWithLookup(ictx, discSel, ins) if err != nil { cerr := clues.WrapWC(ictx, err, owner) @@ -206,21 +208,23 @@ func genericCreateCommand( } ictx = clues.Add( - ctx, + ictx, "resource_owner_id", bo.ResourceOwner.ID(), - "resource_owner_name", bo.ResourceOwner.Name()) + "resource_owner_name", clues.Hide(bo.ResourceOwner.Name())) + + logger.Ctx(ictx).Infof("running backup") err = bo.Run(ictx) if err != nil { if errors.Is(err, core.ErrServiceNotEnabled) { - logger.Ctx(ctx).Infow("service not enabled", + logger.Ctx(ictx).Infow("service not enabled", "resource_owner_id", bo.ResourceOwner.ID(), "service", serviceName) continue } - cerr := clues.WrapWC(ictx, err, owner) + cerr := clues.Wrap(err, owner) errs = append(errs, cerr) meta, err := json.Marshal(cerr.Core().Values) @@ -236,10 +240,10 @@ func genericCreateCommand( bIDs = append(bIDs, string(bo.Results.BackupID)) if !DisplayJSONFormat() { - Infof(ctx, fmt.Sprintf("Backup complete %s %s", observe.Bullet, color.BlueOutput(bo.Results.BackupID))) - printBackupStats(ctx, r, string(bo.Results.BackupID)) + Infof(ictx, fmt.Sprintf("Backup complete %s %s", observe.Bullet, color.BlueOutput(bo.Results.BackupID))) + printBackupStats(ictx, r, string(bo.Results.BackupID)) } else { - Infof(ctx, "Backup complete - ID: %v\n", bo.Results.BackupID) + Infof(ictx, "Backup complete - ID: %v\n", bo.Results.BackupID) } } diff --git a/src/internal/kopia/merge_collection.go b/src/internal/kopia/merge_collection.go index 6b969f39c..f4f750c79 100644 --- a/src/internal/kopia/merge_collection.go +++ b/src/internal/kopia/merge_collection.go @@ -70,10 +70,13 @@ func (mc *mergeCollection) Items( go func() { defer close(res) - logger.Ctx(ctx).Infow( - "getting items for merged collection", + log := logger.Ctx(ctx) + log.Infow( + "streaming all merged collections' items", "merged_collection_count", len(mc.cols)) + defer log.Debug("done streaming all merged collections' items") + for _, c := range mc.cols { // Unfortunately doesn't seem to be a way right now to see if the // iteration failed and we should be exiting early. diff --git a/src/internal/observe/observe.go b/src/internal/observe/observe.go index f69deaffe..1a4c73a14 100644 --- a/src/internal/observe/observe.go +++ b/src/internal/observe/observe.go @@ -22,7 +22,7 @@ import ( const ( hideProgressBarsFN = "hide-progress" retainProgressBarsFN = "retain-progress" - progressBarWidth = 40 + progressBarWidth = 32 ) func init() { @@ -117,6 +117,10 @@ func (o *observer) resetWriter(ctx context.Context) { // opts = append(opts, mpb.WithAutoRefresh()) // } + if o.mp != nil { + logger.Ctx(ctx).Infow("resetting progress bars", "bar_count", o.mp.BarCount()) + } + o.mp = mpb.NewWithContext(ctx, opts...) } @@ -154,7 +158,12 @@ func Flush(ctx context.Context) { obs := getObserver(ctx) if obs.mp != nil { + logr := logger.Ctx(ctx) + logr.Infow("blocked flushing progress bars", "bar_count", obs.mp.BarCount()) + obs.mp.Wait() + + logr.Infow("progress bars flushed") } obs.resetWriter(ctx) diff --git a/src/internal/operations/backup.go b/src/internal/operations/backup.go index a4a1508a6..b800fc1f2 100644 --- a/src/internal/operations/backup.go +++ b/src/internal/operations/backup.go @@ -306,36 +306,42 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) { } LogFaultErrors(ctx, op.Errors.Errors(), "running backup") + op.doPersistence(ctx, &opStats, sstore, deets, startTime) + finalizeErrorHandling(ctx, op.Options, op.Errors, "running backup") - // ----- - // Persistence - // ----- + logger.Ctx(ctx).Infow( + "completed backup", + "results", op.Results, + "failure", op.Errors.Failure()) - err = op.persistResults(startTime, &opStats, op.Counter) + return op.Errors.Failure() +} + +func (op *BackupOperation) doPersistence( + ctx context.Context, + opStats *backupStats, + detailsStore streamstore.Streamer, + deets *details.Builder, + start time.Time, +) { + observe.Message(ctx, observe.ProgressCfg{}, "Finalizing storage") + + err := op.persistResults(start, opStats, op.Counter) if err != nil { op.Errors.Fail(clues.Wrap(err, "persisting backup results")) - return op.Errors.Failure() + return } err = op.createBackupModels( ctx, - sstore, - opStats, + detailsStore, + *opStats, op.Results.BackupID, op.BackupVersion, deets.Details()) if err != nil { op.Errors.Fail(clues.Wrap(err, "persisting backup models")) - return op.Errors.Failure() } - - finalizeErrorHandling(ctx, op.Options, op.Errors, "running backup") - - if op.Errors.Failure() == nil { - logger.Ctx(ctx).Infow("completed backup", "results", op.Results) - } - - return op.Errors.Failure() } // do is purely the action of running a backup. All pre/post behavior diff --git a/src/pkg/backup/backup.go b/src/pkg/backup/backup.go index ae8206069..ff1dfdd93 100644 --- a/src/pkg/backup/backup.go +++ b/src/pkg/backup/backup.go @@ -357,9 +357,9 @@ func (bs backupStats) MinimumPrintable() any { // for printing out to a terminal in a columnar display. func (bs backupStats) Headers(skipID bool) []string { headers := []string{ - "Bytes Uploaded", - "Items Uploaded", - "Items Skipped", + "Bytes uploaded", + "Items uploaded", + "Items skipped", "Errors", } diff --git a/src/pkg/backup/backup_test.go b/src/pkg/backup/backup_test.go index cfc9847cf..b040e5d28 100644 --- a/src/pkg/backup/backup_test.go +++ b/src/pkg/backup/backup_test.go @@ -364,9 +364,9 @@ func (suite *BackupUnitSuite) TestStats_headersValues() { expectHeaders := []string{ "ID", - "Bytes Uploaded", - "Items Uploaded", - "Items Skipped", + "Bytes uploaded", + "Items uploaded", + "Items skipped", "Errors", } diff --git a/src/pkg/path/elements.go b/src/pkg/path/elements.go index 38c0ec1df..141769f22 100644 --- a/src/pkg/path/elements.go +++ b/src/pkg/path/elements.go @@ -31,6 +31,9 @@ var piiSafePathElems = pii.MapWithPlurals( PagesCategory.String(), DetailsCategory.String(), + // other internal values + "fault_error", // streamstore.FaultErrorType causes an import cycle + // well known folders // https://learn.microsoft.com/en-us/graph/api/resources/mailfolder?view=graph-rest-1.0 "archive",