additional logging when creating backup model (#2916)

#### Type of change

- [x] 🤖 Supportability/Tests

#### Issue(s)

* #2913
This commit is contained in:
Keepers 2023-03-22 14:00:33 -06:00 committed by GitHub
parent 532c5c017b
commit e77a5e5a7e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 43 additions and 30 deletions

View File

@ -51,10 +51,7 @@ func filterContainersAndFillCollections(
tombstones = makeTombstones(dps)
)
logger.Ctx(ctx).Infow(
"filling collections",
"metadata_count",
len(dps))
logger.Ctx(ctx).Infow("filling collections", "len_deltapaths", len(dps))
// TODO(rkeepers): this should be passed in from the caller, probably
// as an interface that satisfies the NewCollection requirements.

View File

@ -960,14 +960,16 @@ func inflateBaseTree(
return nil
}
ctx = clues.Add(ctx, "snapshot_base_id", snap.ID)
root, err := loader.SnapshotRoot(snap.Manifest)
if err != nil {
return errors.Wrapf(err, "getting snapshot %s root directory", snap.ID)
return clues.Wrap(err, "getting snapshot root directory").WithClues(ctx)
}
dir, ok := root.(fs.Directory)
if !ok {
return errors.Errorf("snapshot %s root is not a directory", snap.ID)
return clues.New("snapshot root is not a directory").WithClues(ctx)
}
// For each subtree corresponding to the tuple
@ -976,27 +978,28 @@ func inflateBaseTree(
for _, subtreePath := range snap.SubtreePaths {
// We're starting from the root directory so don't need it in the path.
pathElems := encodeElements(subtreePath.PopFront().Elements()...)
ictx := clues.Add(ctx, "subtree_path", subtreePath)
ent, err := snapshotfs.GetNestedEntry(ctx, dir, pathElems)
ent, err := snapshotfs.GetNestedEntry(ictx, dir, pathElems)
if err != nil {
if isErrEntryNotFound(err) {
logger.Ctx(ctx).Infow("base snapshot missing subtree", "error", err)
logger.CtxErr(ictx, err).Infow("base snapshot missing subtree")
continue
}
return errors.Wrapf(err, "snapshot %s getting subtree root", snap.ID)
return clues.Wrap(err, "getting subtree root").WithClues(ictx)
}
subtreeDir, ok := ent.(fs.Directory)
if !ok {
return errors.Wrapf(err, "snapshot %s subtree root is not directory", snap.ID)
return clues.Wrap(err, "subtree root is not directory").WithClues(ictx)
}
// We're assuming here that the prefix for the path has not changed (i.e.
// all of tenant, service, resource owner, and category are the same in the
// old snapshot (snap) and the snapshot we're currently trying to make.
if err = traverseBaseDir(
ctx,
ictx,
0,
updatedPaths,
subtreePath.Dir(),
@ -1004,7 +1007,7 @@ func inflateBaseTree(
subtreeDir,
roots,
); err != nil {
return errors.Wrapf(err, "traversing base snapshot %s", snap.ID)
return clues.Wrap(err, "traversing base snapshot").WithClues(ictx)
}
}
@ -1042,9 +1045,13 @@ func inflateDirTree(
baseIDs = append(baseIDs, snap.ID)
}
logger.Ctx(ctx).Infow(
"merging hierarchies from base snapshots",
"snapshot_ids", baseIDs)
ctx = clues.Add(ctx, "len_base_snapshots", len(baseSnaps), "base_snapshot_ids", baseIDs)
if len(baseIDs) > 0 {
logger.Ctx(ctx).Info("merging hierarchies from base snapshots")
} else {
logger.Ctx(ctx).Info("no base snapshots to merge")
}
for _, snap := range baseSnaps {
if err = inflateBaseTree(ctx, loader, snap, updatedPaths, roots); err != nil {

View File

@ -202,7 +202,7 @@ func (w Wrapper) makeSnapshotWithRoot(
bc = &stats.ByteCounter{}
)
snapIDs := make([]manifest.ID, 0, len(prevSnapEntries))
snapIDs := make([]manifest.ID, 0, len(prevSnapEntries)) // just for logging
prevSnaps := make([]*snapshot.Manifest, 0, len(prevSnapEntries))
for _, ent := range prevSnapEntries {
@ -210,9 +210,17 @@ func (w Wrapper) makeSnapshotWithRoot(
snapIDs = append(snapIDs, ent.ID)
}
logger.Ctx(ctx).Infow(
"using snapshots for kopia-assisted incrementals",
"snapshot_ids", snapIDs)
ctx = clues.Add(
ctx,
"len_prev_base_snapshots", len(prevSnapEntries),
"assist_snap_ids", snapIDs,
"additional_tags", addlTags)
if len(snapIDs) > 0 {
logger.Ctx(ctx).Info("using snapshots for kopia-assisted incrementals")
} else {
logger.Ctx(ctx).Info("no base snapshots for kopia-assisted incrementals")
}
tags := map[string]string{}

View File

@ -671,26 +671,28 @@ func (op *BackupOperation) createBackupModels(
sscw streamstore.CollectorWriter,
snapID string,
backupID model.StableID,
backupDetails *details.Details,
deets *details.Details,
) error {
ctx = clues.Add(ctx, "snapshot_id", snapID)
ctx = clues.Add(ctx, "snapshot_id", snapID, "backup_id", backupID)
// generate a new fault bus so that we can maintain clean
// separation between the errors we serialize and those that
// are generated during the serialization process.
errs := fault.New(true)
if backupDetails == nil {
if deets == nil {
return clues.New("no backup details to record").WithClues(ctx)
}
err := sscw.Collect(ctx, streamstore.DetailsCollector(backupDetails))
ctx = clues.Add(ctx, "details_entry_count", len(deets.Entries))
err := sscw.Collect(ctx, streamstore.DetailsCollector(deets))
if err != nil {
return clues.Wrap(err, "creating backupDetails persistence").WithClues(ctx)
return clues.Wrap(err, "collecting details for persistence").WithClues(ctx)
}
err = sscw.Collect(ctx, streamstore.FaultErrorsCollector(op.Errors.Errors()))
if err != nil {
return clues.Wrap(err, "creating errors persistence").WithClues(ctx)
return clues.Wrap(err, "collecting errors for persistence").WithClues(ctx)
}
ssid, err := sscw.Write(ctx, errs)
@ -698,6 +700,8 @@ func (op *BackupOperation) createBackupModels(
return clues.Wrap(err, "persisting details and errors").WithClues(ctx)
}
ctx = clues.Add(ctx, "streamstore_snapshot_id", ssid)
b := backup.New(
snapID, ssid,
op.Status.String(),
@ -707,12 +711,9 @@ func (op *BackupOperation) createBackupModels(
op.Results.StartAndEndTime,
op.Errors.Errors())
err = op.store.Put(
clues.Add(ctx, "streamstore_snapshot_id", ssid),
model.BackupSchema,
b)
logger.Ctx(ctx).Info("creating new backup")
if err != nil {
if err = op.store.Put(ctx, model.BackupSchema, b); err != nil {
return clues.Wrap(err, "creating backup model").WithClues(ctx)
}