minor logging and cli output tweaks (#5016)

just touchups for supportability and tracking, plus a ux tweak here and there.  No real logic changes.

---

#### Does this PR need a docs update or release note?

- [x]  No

#### Type of change

- [x] 🤖 Supportability/Tests

#### Test Plan

- [x]  Unit test
- [x] 💚 E2E
This commit is contained in:
Keepers 2024-01-12 15:25:07 -07:00 committed by GitHub
parent 8c2d7c3c18
commit dab53502ae
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 57 additions and 32 deletions

View File

@ -190,6 +190,8 @@ func genericCreateCommand(
ictx = clues.Add(ctx, "resource_owner_selected", owner)
)
logger.Ctx(ictx).Infof("setting up backup")
bo, err := r.NewBackupWithLookup(ictx, discSel, ins)
if err != nil {
cerr := clues.WrapWC(ictx, err, owner)
@ -206,21 +208,23 @@ func genericCreateCommand(
}
ictx = clues.Add(
ctx,
ictx,
"resource_owner_id", bo.ResourceOwner.ID(),
"resource_owner_name", bo.ResourceOwner.Name())
"resource_owner_name", clues.Hide(bo.ResourceOwner.Name()))
logger.Ctx(ictx).Infof("running backup")
err = bo.Run(ictx)
if err != nil {
if errors.Is(err, core.ErrServiceNotEnabled) {
logger.Ctx(ctx).Infow("service not enabled",
logger.Ctx(ictx).Infow("service not enabled",
"resource_owner_id", bo.ResourceOwner.ID(),
"service", serviceName)
continue
}
cerr := clues.WrapWC(ictx, err, owner)
cerr := clues.Wrap(err, owner)
errs = append(errs, cerr)
meta, err := json.Marshal(cerr.Core().Values)
@ -236,10 +240,10 @@ func genericCreateCommand(
bIDs = append(bIDs, string(bo.Results.BackupID))
if !DisplayJSONFormat() {
Infof(ctx, fmt.Sprintf("Backup complete %s %s", observe.Bullet, color.BlueOutput(bo.Results.BackupID)))
printBackupStats(ctx, r, string(bo.Results.BackupID))
Infof(ictx, fmt.Sprintf("Backup complete %s %s", observe.Bullet, color.BlueOutput(bo.Results.BackupID)))
printBackupStats(ictx, r, string(bo.Results.BackupID))
} else {
Infof(ctx, "Backup complete - ID: %v\n", bo.Results.BackupID)
Infof(ictx, "Backup complete - ID: %v\n", bo.Results.BackupID)
}
}

View File

@ -70,10 +70,13 @@ func (mc *mergeCollection) Items(
go func() {
defer close(res)
logger.Ctx(ctx).Infow(
"getting items for merged collection",
log := logger.Ctx(ctx)
log.Infow(
"streaming all merged collections' items",
"merged_collection_count", len(mc.cols))
defer log.Debug("done streaming all merged collections' items")
for _, c := range mc.cols {
// Unfortunately doesn't seem to be a way right now to see if the
// iteration failed and we should be exiting early.

View File

@ -22,7 +22,7 @@ import (
const (
hideProgressBarsFN = "hide-progress"
retainProgressBarsFN = "retain-progress"
progressBarWidth = 40
progressBarWidth = 32
)
func init() {
@ -117,6 +117,10 @@ func (o *observer) resetWriter(ctx context.Context) {
// opts = append(opts, mpb.WithAutoRefresh())
// }
if o.mp != nil {
logger.Ctx(ctx).Infow("resetting progress bars", "bar_count", o.mp.BarCount())
}
o.mp = mpb.NewWithContext(ctx, opts...)
}
@ -154,7 +158,12 @@ func Flush(ctx context.Context) {
obs := getObserver(ctx)
if obs.mp != nil {
logr := logger.Ctx(ctx)
logr.Infow("blocked flushing progress bars", "bar_count", obs.mp.BarCount())
obs.mp.Wait()
logr.Infow("progress bars flushed")
}
obs.resetWriter(ctx)

View File

@ -306,36 +306,42 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
}
LogFaultErrors(ctx, op.Errors.Errors(), "running backup")
op.doPersistence(ctx, &opStats, sstore, deets, startTime)
finalizeErrorHandling(ctx, op.Options, op.Errors, "running backup")
// -----
// Persistence
// -----
logger.Ctx(ctx).Infow(
"completed backup",
"results", op.Results,
"failure", op.Errors.Failure())
err = op.persistResults(startTime, &opStats, op.Counter)
return op.Errors.Failure()
}
func (op *BackupOperation) doPersistence(
ctx context.Context,
opStats *backupStats,
detailsStore streamstore.Streamer,
deets *details.Builder,
start time.Time,
) {
observe.Message(ctx, observe.ProgressCfg{}, "Finalizing storage")
err := op.persistResults(start, opStats, op.Counter)
if err != nil {
op.Errors.Fail(clues.Wrap(err, "persisting backup results"))
return op.Errors.Failure()
return
}
err = op.createBackupModels(
ctx,
sstore,
opStats,
detailsStore,
*opStats,
op.Results.BackupID,
op.BackupVersion,
deets.Details())
if err != nil {
op.Errors.Fail(clues.Wrap(err, "persisting backup models"))
return op.Errors.Failure()
}
finalizeErrorHandling(ctx, op.Options, op.Errors, "running backup")
if op.Errors.Failure() == nil {
logger.Ctx(ctx).Infow("completed backup", "results", op.Results)
}
return op.Errors.Failure()
}
// do is purely the action of running a backup. All pre/post behavior

View File

@ -357,9 +357,9 @@ func (bs backupStats) MinimumPrintable() any {
// for printing out to a terminal in a columnar display.
func (bs backupStats) Headers(skipID bool) []string {
headers := []string{
"Bytes Uploaded",
"Items Uploaded",
"Items Skipped",
"Bytes uploaded",
"Items uploaded",
"Items skipped",
"Errors",
}

View File

@ -364,9 +364,9 @@ func (suite *BackupUnitSuite) TestStats_headersValues() {
expectHeaders := []string{
"ID",
"Bytes Uploaded",
"Items Uploaded",
"Items Skipped",
"Bytes uploaded",
"Items uploaded",
"Items skipped",
"Errors",
}

View File

@ -31,6 +31,9 @@ var piiSafePathElems = pii.MapWithPlurals(
PagesCategory.String(),
DetailsCategory.String(),
// other internal values
"fault_error", // streamstore.FaultErrorType causes an import cycle
// well known folders
// https://learn.microsoft.com/en-us/graph/api/resources/mailfolder?view=graph-rest-1.0
"archive",