add logging around operation completion (#2337)
## Does this PR need a docs update or release note? - [x] ⛔ No ## Type of change - [x] 🧹 Tech Debt/Cleanup ## Issue(s) * #2329 ## Test Plan - [x] 💪 Manual
This commit is contained in:
parent
4ac81e6253
commit
ec7d3c6fc5
@ -6,6 +6,7 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/alcionai/corso/src/cli/backup"
|
||||
@ -121,6 +122,9 @@ func Handle() {
|
||||
}()
|
||||
|
||||
if err := corsoCmd.ExecuteContext(ctx); err != nil {
|
||||
logger.Ctx(ctx).
|
||||
With("err", err).
|
||||
Errorw("cli execution", clues.InErr(err).Slice()...)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
@ -4,7 +4,7 @@ go 1.19
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0
|
||||
github.com/alcionai/clues v0.0.0-20230120231953-1cf61dbafc40
|
||||
github.com/alcionai/clues v0.0.0-20230131232239-cee86233b005
|
||||
github.com/aws/aws-sdk-go v1.44.190
|
||||
github.com/aws/aws-xray-sdk-go v1.8.0
|
||||
github.com/google/uuid v1.3.0
|
||||
|
||||
@ -52,8 +52,8 @@ github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1o
|
||||
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
|
||||
github.com/alcionai/clues v0.0.0-20230120231953-1cf61dbafc40 h1:bvAwz0dcJeIyRjudVyzmmawOvc4SqlSerKd0B4dh0yw=
|
||||
github.com/alcionai/clues v0.0.0-20230120231953-1cf61dbafc40/go.mod h1:UlAs8jkWIpsOMakiC8NxPgQQVQRdvyf1hYMszlYYLb4=
|
||||
github.com/alcionai/clues v0.0.0-20230131232239-cee86233b005 h1:eTgICcmcydEWG8J+hgnidf0pzujV3Gd2XqmknykZkzA=
|
||||
github.com/alcionai/clues v0.0.0-20230131232239-cee86233b005/go.mod h1:UlAs8jkWIpsOMakiC8NxPgQQVQRdvyf1hYMszlYYLb4=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
|
||||
@ -40,6 +40,9 @@ type BackupOperation struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
account account.Account
|
||||
|
||||
// when true, this allows for incremental backups instead of full data pulls
|
||||
incremental bool
|
||||
}
|
||||
|
||||
// BackupResults aggregate the details of the result of the operation.
|
||||
@ -66,6 +69,7 @@ func NewBackupOperation(
|
||||
Selectors: selector,
|
||||
Version: "v0",
|
||||
account: acct,
|
||||
incremental: useIncrementalBackup(selector, opts),
|
||||
}
|
||||
if err := op.validate(); err != nil {
|
||||
return BackupOperation{}, err
|
||||
@ -102,10 +106,36 @@ type detailsWriter interface {
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// Run begins a synchronous backup operation.
|
||||
func (op *BackupOperation) Run(ctx context.Context) (err error) {
|
||||
func (op *BackupOperation) Run(ctx context.Context) error {
|
||||
ctx, end := D.Span(ctx, "operations:backup:run")
|
||||
defer end()
|
||||
defer func() {
|
||||
end()
|
||||
// wait for the progress display to clean up
|
||||
observe.Complete()
|
||||
}()
|
||||
|
||||
ctx = clues.AddAll(
|
||||
ctx,
|
||||
"tenant_id", op.account.ID(), // TODO: pii
|
||||
"resource_owner", op.ResourceOwner, // TODO: pii
|
||||
"backup_id", op.Results.BackupID,
|
||||
"service", op.Selectors.Service,
|
||||
"incremental", op.incremental)
|
||||
|
||||
if err := op.do(ctx); err != nil {
|
||||
logger.Ctx(ctx).
|
||||
With("err", err).
|
||||
Errorw("backup operation", clues.InErr(err).Slice()...)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Ctx(ctx).Infow("completed backup", "results", op.Results)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (op *BackupOperation) do(ctx context.Context) (err error) {
|
||||
var (
|
||||
opStats backupStats
|
||||
backupDetails *details.Builder
|
||||
@ -114,19 +144,10 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
|
||||
startTime = time.Now()
|
||||
detailsStore = streamstore.New(op.kopia, tenantID, op.Selectors.PathService())
|
||||
reasons = selectorToReasons(op.Selectors)
|
||||
uib = useIncrementalBackup(op.Selectors, op.Options)
|
||||
)
|
||||
|
||||
op.Results.BackupID = model.StableID(uuid.NewString())
|
||||
|
||||
ctx = clues.AddAll(
|
||||
ctx,
|
||||
"tenant_id", tenantID, // TODO: pii
|
||||
"resource_owner", op.ResourceOwner, // TODO: pii
|
||||
"backup_id", op.Results.BackupID,
|
||||
"service", op.Selectors.Service,
|
||||
"incremental", uib)
|
||||
|
||||
op.bus.Event(
|
||||
ctx,
|
||||
events.BackupStart,
|
||||
@ -139,9 +160,6 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
|
||||
|
||||
// persist operation results to the model store on exit
|
||||
defer func() {
|
||||
// wait for the progress display to clean up
|
||||
observe.Complete()
|
||||
|
||||
err = op.persistResults(startTime, &opStats)
|
||||
if err != nil {
|
||||
op.Errors.Fail(errors.Wrap(err, "persisting backup results"))
|
||||
@ -165,7 +183,7 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
|
||||
op.store,
|
||||
reasons,
|
||||
tenantID,
|
||||
uib,
|
||||
op.incremental,
|
||||
op.Errors)
|
||||
if err != nil {
|
||||
op.Errors.Fail(errors.Wrap(err, "collecting manifest heuristics"))
|
||||
@ -190,7 +208,7 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
|
||||
return opStats.readErr
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "collections", len(cs))
|
||||
ctx = clues.Add(ctx, "coll_count", len(cs))
|
||||
|
||||
opStats.k, backupDetails, toMerge, err = consumeBackupDataCollections(
|
||||
ctx,
|
||||
@ -200,7 +218,7 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
|
||||
mans,
|
||||
cs,
|
||||
op.Results.BackupID,
|
||||
uib && canUseMetaData)
|
||||
op.incremental && canUseMetaData)
|
||||
if err != nil {
|
||||
op.Errors.Fail(errors.Wrap(err, "backing up service data"))
|
||||
opStats.writeErr = op.Errors.Err()
|
||||
@ -208,11 +226,6 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
|
||||
return opStats.writeErr
|
||||
}
|
||||
|
||||
logger.Ctx(ctx).Debugf(
|
||||
"Backed up %d directories and %d files",
|
||||
opStats.k.TotalDirectoryCount, opStats.k.TotalFileCount,
|
||||
)
|
||||
|
||||
if err = mergeDetails(
|
||||
ctx,
|
||||
op.store,
|
||||
@ -334,7 +347,7 @@ func builderFromReason(ctx context.Context, tenant string, r kopia.Reason) (*pat
|
||||
false,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "building path").WithMap(clues.Values(ctx))
|
||||
return nil, clues.Wrap(err, "building path").WithClues(ctx)
|
||||
}
|
||||
|
||||
return p.ToBuilder().Dir(), nil
|
||||
@ -404,13 +417,9 @@ func consumeBackupDataCollections(
|
||||
|
||||
logger.Ctx(ctx).Infow(
|
||||
"using base for backup",
|
||||
"snapshot_id",
|
||||
m.ID,
|
||||
"services",
|
||||
svcs,
|
||||
"categories",
|
||||
cats,
|
||||
)
|
||||
"snapshot_id", m.ID,
|
||||
"services", svcs,
|
||||
"categories", cats)
|
||||
}
|
||||
|
||||
kopiaStats, deets, itemsSourcedFromBase, err := bu.BackupCollections(
|
||||
@ -481,7 +490,7 @@ func mergeDetails(
|
||||
|
||||
bID, ok := man.GetTag(kopia.TagBackupID)
|
||||
if !ok {
|
||||
return clues.New("no backup ID in snapshot manifest").WithMap(clues.Values(mctx))
|
||||
return clues.New("no backup ID in snapshot manifest").WithClues(mctx)
|
||||
}
|
||||
|
||||
mctx = clues.Add(mctx, "manifest_backup_id", bID)
|
||||
@ -493,14 +502,14 @@ func mergeDetails(
|
||||
detailsStore,
|
||||
)
|
||||
if err != nil {
|
||||
return clues.New("fetching base details for backup").WithMap(clues.Values(mctx))
|
||||
return clues.New("fetching base details for backup").WithClues(mctx)
|
||||
}
|
||||
|
||||
for _, entry := range baseDeets.Items() {
|
||||
rr, err := path.FromDataLayerPath(entry.RepoRef, true)
|
||||
if err != nil {
|
||||
return clues.New("parsing base item info path").
|
||||
WithMap(clues.Values(mctx)).
|
||||
WithClues(mctx).
|
||||
With("repo_ref", entry.RepoRef) // todo: pii
|
||||
}
|
||||
|
||||
@ -524,7 +533,7 @@ func mergeDetails(
|
||||
// Fixup paths in the item.
|
||||
item := entry.ItemInfo
|
||||
if err := details.UpdateItem(&item, newPath); err != nil {
|
||||
return clues.New("updating item details").WithMap(clues.Values(mctx))
|
||||
return clues.New("updating item details").WithClues(mctx)
|
||||
}
|
||||
|
||||
// TODO(ashmrtn): This may need updated if we start using this merge
|
||||
@ -550,7 +559,7 @@ func mergeDetails(
|
||||
|
||||
if addedEntries != len(shortRefsFromPrevBackup) {
|
||||
return clues.New("incomplete migration of backup details").
|
||||
WithMap(clues.Values(ctx)).
|
||||
WithClues(ctx).
|
||||
WithAll("item_count", addedEntries, "expected_item_count", len(shortRefsFromPrevBackup))
|
||||
}
|
||||
|
||||
@ -603,12 +612,12 @@ func (op *BackupOperation) createBackupModels(
|
||||
ctx = clues.Add(ctx, "snapshot_id", snapID)
|
||||
|
||||
if backupDetails == nil {
|
||||
return clues.New("no backup details to record").WithMap(clues.Values(ctx))
|
||||
return clues.New("no backup details to record").WithClues(ctx)
|
||||
}
|
||||
|
||||
detailsID, err := detailsStore.WriteBackupDetails(ctx, backupDetails)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "creating backupDetails model").WithMap(clues.Values(ctx))
|
||||
return clues.Wrap(err, "creating backupDetails model").WithClues(ctx)
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "details_id", detailsID)
|
||||
@ -622,7 +631,7 @@ func (op *BackupOperation) createBackupModels(
|
||||
)
|
||||
|
||||
if err = op.store.Put(ctx, model.BackupSchema, b); err != nil {
|
||||
return clues.Wrap(err, "creating backup model").WithMap(clues.Values(ctx))
|
||||
return clues.Wrap(err, "creating backup model").WithClues(ctx)
|
||||
}
|
||||
|
||||
dur := op.Results.CompletedAt.Sub(op.Results.StartedAt)
|
||||
|
||||
@ -73,7 +73,7 @@ func produceManifestsAndMetadata(
|
||||
if err := verifyDistinctBases(ctx, ms, errs); err != nil {
|
||||
logger.Ctx(ctx).With("error", err).Infow(
|
||||
"base snapshot collision, falling back to full backup",
|
||||
clues.Slice(ctx)...)
|
||||
clues.In(ctx).Slice()...)
|
||||
|
||||
return ms, nil, false, nil
|
||||
}
|
||||
@ -87,7 +87,7 @@ func produceManifestsAndMetadata(
|
||||
|
||||
bID, ok := man.GetTag(kopia.TagBackupID)
|
||||
if !ok {
|
||||
err = clues.New("snapshot manifest missing backup ID").WithMap(clues.Values(mctx))
|
||||
err = clues.New("snapshot manifest missing backup ID").WithClues(ctx)
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
@ -98,7 +98,7 @@ func produceManifestsAndMetadata(
|
||||
// if no backup exists for any of the complete manifests, we want
|
||||
// to fall back to a complete backup.
|
||||
if errors.Is(err, kopia.ErrNotFound) {
|
||||
logger.Ctx(ctx).Infow("backup missing, falling back to full backup", clues.Slice(mctx)...)
|
||||
logger.Ctx(ctx).Infow("backup missing, falling back to full backup", clues.In(mctx).Slice()...)
|
||||
return ms, nil, false, nil
|
||||
}
|
||||
|
||||
@ -113,7 +113,7 @@ func produceManifestsAndMetadata(
|
||||
// This makes an assumption that the ID points to a populated set of
|
||||
// details; we aren't doing the work to look them up.
|
||||
if len(dID) == 0 {
|
||||
logger.Ctx(ctx).Infow("backup missing details ID, falling back to full backup", clues.Slice(mctx)...)
|
||||
logger.Ctx(ctx).Infow("backup missing details ID, falling back to full backup", clues.In(mctx).Slice()...)
|
||||
return ms, nil, false, nil
|
||||
}
|
||||
|
||||
@ -159,7 +159,7 @@ func verifyDistinctBases(ctx context.Context, mans []*kopia.ManifestEntry, errs
|
||||
failed = true
|
||||
|
||||
errs.Add(clues.New("manifests have overlapping reasons").
|
||||
WithMap(clues.Values(ctx)).
|
||||
WithClues(ctx).
|
||||
With("other_manifest_id", b))
|
||||
|
||||
continue
|
||||
@ -170,7 +170,7 @@ func verifyDistinctBases(ctx context.Context, mans []*kopia.ManifestEntry, errs
|
||||
}
|
||||
|
||||
if failed {
|
||||
return clues.New("multiple base snapshots qualify").WithMap(clues.Values(ctx))
|
||||
return clues.New("multiple base snapshots qualify").WithClues(ctx)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@ -107,8 +107,33 @@ type restorer interface {
|
||||
// Run begins a synchronous restore operation.
|
||||
func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.Details, err error) {
|
||||
ctx, end := D.Span(ctx, "operations:restore:run")
|
||||
defer end()
|
||||
defer func() {
|
||||
end()
|
||||
// wait for the progress display to clean up
|
||||
observe.Complete()
|
||||
}()
|
||||
|
||||
ctx = clues.AddAll(
|
||||
ctx,
|
||||
"tenant_id", op.account.ID(), // TODO: pii
|
||||
"backup_id", op.BackupID,
|
||||
"service", op.Selectors.Service)
|
||||
|
||||
deets, err := op.do(ctx)
|
||||
if err != nil {
|
||||
logger.Ctx(ctx).
|
||||
With("err", err).
|
||||
Errorw("restore operation", clues.InErr(err).Slice()...)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logger.Ctx(ctx).Infow("completed restore", "results", op.Results)
|
||||
|
||||
return deets, nil
|
||||
}
|
||||
|
||||
func (op *RestoreOperation) do(ctx context.Context) (restoreDetails *details.Details, err error) {
|
||||
var (
|
||||
opStats = restoreStats{
|
||||
bytesRead: &stats.ByteCounter{},
|
||||
@ -118,9 +143,6 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De
|
||||
)
|
||||
|
||||
defer func() {
|
||||
// wait for the progress display to clean up
|
||||
observe.Complete()
|
||||
|
||||
err = op.persistResults(ctx, startTime, &opStats)
|
||||
if err != nil {
|
||||
return
|
||||
@ -129,12 +151,6 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De
|
||||
|
||||
detailsStore := streamstore.New(op.kopia, op.account.ID(), op.Selectors.PathService())
|
||||
|
||||
ctx = clues.AddAll(
|
||||
ctx,
|
||||
"tenant_id", op.account.ID(), // TODO: pii
|
||||
"backup_id", op.BackupID,
|
||||
"service", op.Selectors.Service)
|
||||
|
||||
bup, deets, err := getBackupAndDetailsFromID(
|
||||
ctx,
|
||||
op.BackupID,
|
||||
@ -166,7 +182,6 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "details_paths", len(paths))
|
||||
|
||||
observe.Message(ctx, observe.Safe(fmt.Sprintf("Discovered %d items in backup %s to restore", len(paths), op.BackupID)))
|
||||
|
||||
kopiaComplete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Enumerating items in repository"))
|
||||
@ -180,8 +195,7 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De
|
||||
}
|
||||
kopiaComplete <- struct{}{}
|
||||
|
||||
ctx = clues.Add(ctx, "collections", len(dcs))
|
||||
|
||||
ctx = clues.Add(ctx, "coll_count", len(dcs))
|
||||
opStats.cs = dcs
|
||||
opStats.resourceCount = len(data.ResourceOwnerSet(dcs))
|
||||
|
||||
|
||||
@ -267,7 +267,7 @@ func Ctx(ctx context.Context) *zap.SugaredLogger {
|
||||
return singleton(levelOf(llFlag), defaultLogLocation())
|
||||
}
|
||||
|
||||
return l.(*zap.SugaredLogger).With(clues.Slice(ctx)...)
|
||||
return l.(*zap.SugaredLogger).With(clues.In(ctx).Slice()...)
|
||||
}
|
||||
|
||||
// transforms the llevel flag value to a logLevel enum
|
||||
|
||||
@ -2,11 +2,12 @@ package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/events"
|
||||
"github.com/alcionai/corso/src/internal/kopia"
|
||||
@ -88,6 +89,8 @@ func Initialize(
|
||||
s storage.Storage,
|
||||
opts control.Options,
|
||||
) (Repository, error) {
|
||||
ctx = clues.AddAll(ctx, "acct_provider", acct.Provider, "storage_provider", s.Provider)
|
||||
|
||||
kopiaRef := kopia.NewConn(s)
|
||||
if err := kopiaRef.Initialize(ctx); err != nil {
|
||||
// replace common internal errors so that sdk users can check results with errors.Is()
|
||||
@ -95,7 +98,7 @@ func Initialize(
|
||||
return nil, ErrorRepoAlreadyExists
|
||||
}
|
||||
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "initializing kopia")
|
||||
}
|
||||
// kopiaRef comes with a count of 1 and NewWrapper/NewModelStore bumps it again so safe
|
||||
// to close here.
|
||||
@ -103,17 +106,17 @@ func Initialize(
|
||||
|
||||
w, err := kopia.NewWrapper(kopiaRef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
ms, err := kopia.NewModelStore(kopiaRef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
bus, err := events.NewBus(ctx, s, acct.ID(), opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "constructing event bus")
|
||||
}
|
||||
|
||||
repoID := newRepoID(s)
|
||||
@ -131,7 +134,7 @@ func Initialize(
|
||||
}
|
||||
|
||||
if err := newRepoModel(ctx, ms, r.ID); err != nil {
|
||||
return nil, errors.New("setting up repository")
|
||||
return nil, clues.New("setting up repository").WithClues(ctx)
|
||||
}
|
||||
|
||||
r.Bus.Event(ctx, events.RepoInit, nil)
|
||||
@ -150,6 +153,8 @@ func Connect(
|
||||
s storage.Storage,
|
||||
opts control.Options,
|
||||
) (Repository, error) {
|
||||
ctx = clues.AddAll(ctx, "acct_provider", acct.Provider, "storage_provider", s.Provider)
|
||||
|
||||
// Close/Reset the progress bar. This ensures callers don't have to worry about
|
||||
// their output getting clobbered (#1720)
|
||||
defer observe.Complete()
|
||||
@ -160,7 +165,7 @@ func Connect(
|
||||
|
||||
kopiaRef := kopia.NewConn(s)
|
||||
if err := kopiaRef.Connect(ctx); err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "connecting kopia client")
|
||||
}
|
||||
// kopiaRef comes with a count of 1 and NewWrapper/NewModelStore bumps it again so safe
|
||||
// to close here.
|
||||
@ -168,17 +173,17 @@ func Connect(
|
||||
|
||||
w, err := kopia.NewWrapper(kopiaRef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
ms, err := kopia.NewModelStore(kopiaRef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
bus, err := events.NewBus(ctx, s, acct.ID(), opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "constructing event bus")
|
||||
}
|
||||
|
||||
rm, err := getRepoModel(ctx, ms)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user